From 43fb48e77061230c37c735eebb5f5bf84fa177d6 Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 15:32:52 +0900 Subject: [PATCH 1/8] chore(ci): fix failure condition when modified files are empty Signed-off-by: Kotaro Yoshimoto --- .github/workflows/cppcheck-differential.yaml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/cppcheck-differential.yaml b/.github/workflows/cppcheck-differential.yaml index 99f8aa1311b5e..3da3f9636a800 100644 --- a/.github/workflows/cppcheck-differential.yaml +++ b/.github/workflows/cppcheck-differential.yaml @@ -31,21 +31,20 @@ jobs: run: git fetch origin ${{ github.base_ref }} shell: bash - - name: Get changed files - id: changed-files + - name: Get modified files + id: get-modified-files run: | - git diff --name-only "origin/${{ github.base_ref }}"...HEAD > changed_files.txt - cat changed_files.txt + echo "changed_files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | tr '\n' ' ' || true)" >> $GITHUB_OUTPUT shell: bash - + - name: Run Cppcheck on changed files continue-on-error: true id: cppcheck run: | files=$(cat changed_files.txt | grep -E '\.(cpp|hpp)$' || true) if [ -n "$files" ]; then - echo "Running Cppcheck on changed files: $files" - cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions $files 2> cppcheck-report.txt + echo "Running Cppcheck on changed files: ${{ steps.get-modified-files.outputs.changed_files }}" + cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions ${{ steps.get-modified-files.outputs.changed_files }} 2> cppcheck-report.txt else echo "No C++ files changed." touch cppcheck-report.txt @@ -63,5 +62,5 @@ jobs: path: cppcheck-report.txt - name: Fail the job if Cppcheck failed - if: steps.cppcheck.outcome == 'failure' + if: ${{ steps.get-modified-files.outputs.changed_files != '' && steps.cppcheck.outcome == 'failure' }} run: exit 1 From 7a28691e349628984d9ef7f33902cf17bba6c5c9 Mon Sep 17 00:00:00 2001 From: Ryuta Kambe Date: Wed, 26 Jun 2024 15:35:29 +0900 Subject: [PATCH 2/8] fix(autoware_velocity_smoother): fix duplicateBreak warning (#7699) Signed-off-by: Ryuta Kambe --- planning/autoware_velocity_smoother/src/node.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/planning/autoware_velocity_smoother/src/node.cpp b/planning/autoware_velocity_smoother/src/node.cpp index aefbd9ef20f84..5c66f5b906bba 100644 --- a/planning/autoware_velocity_smoother/src/node.cpp +++ b/planning/autoware_velocity_smoother/src/node.cpp @@ -1013,7 +1013,6 @@ VelocitySmootherNode::AlgorithmType VelocitySmootherNode::getAlgorithmType( } throw std::domain_error("[VelocitySmootherNode] undesired algorithm is selected."); - return AlgorithmType::INVALID; } double VelocitySmootherNode::calcTravelDistance() const From 39361430ace4cf1c255095d360576b67e52086f2 Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 15:35:31 +0900 Subject: [PATCH 3/8] chore: delete unnecessary new line Signed-off-by: Kotaro Yoshimoto --- .github/workflows/cppcheck-differential.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/cppcheck-differential.yaml b/.github/workflows/cppcheck-differential.yaml index 3da3f9636a800..59254d85d3367 100644 --- a/.github/workflows/cppcheck-differential.yaml +++ b/.github/workflows/cppcheck-differential.yaml @@ -36,7 +36,6 @@ jobs: run: | echo "changed_files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | tr '\n' ' ' || true)" >> $GITHUB_OUTPUT shell: bash - - name: Run Cppcheck on changed files continue-on-error: true id: cppcheck From 3b8bcb3d1f253cf1ed85928f771b2aa837b41414 Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 06:41:50 +0000 Subject: [PATCH 4/8] chore: delete lidar_centerpoint_tvm package for test Signed-off-by: GitHub --- perception/lidar_centerpoint_tvm/.gitignore | 0 .../lidar_centerpoint_tvm/CMakeLists.txt | 100 ------- perception/lidar_centerpoint_tvm/README.md | 77 ------ .../config/centerpoint.param.yaml | 10 - .../inference_engine_tvm_config.hpp | 61 ----- ...processing_inference_engine_tvm_config.hpp | 60 ----- .../inference_engine_tvm_config.hpp | 56 ---- .../centerpoint_config.hpp | 126 --------- .../lidar_centerpoint_tvm/centerpoint_tvm.hpp | 186 ------------- .../include/lidar_centerpoint_tvm/node.hpp | 71 ----- .../postprocess/circle_nms.hpp | 37 --- .../postprocess/generate_detected_boxes.hpp | 39 --- .../preprocess/generate_features.hpp | 38 --- .../preprocess/pointcloud_densification.hpp | 94 ------- .../preprocess/voxel_generator.hpp | 77 ------ .../lidar_centerpoint_tvm/ros_utils.hpp | 47 ---- .../single_inference_node.hpp | 66 ----- .../include/lidar_centerpoint_tvm/utils.hpp | 50 ---- .../visibility_control.hpp | 36 --- .../launch/lidar_centerpoint_tvm.launch.xml | 26 -- ...inference_lidar_centerpoint_tvm.launch.xml | 33 --- .../lib/centerpoint_tvm.cpp | 246 ------------------ .../lib/postprocess/circle_nms.cpp | 65 ----- .../postprocess/generate_detected_boxes.cpp | 176 ------------- .../lib/preprocess/generate_features.cpp | 131 ---------- .../preprocess/pointcloud_densification.cpp | 115 -------- .../lib/preprocess/voxel_generator.cpp | 138 ---------- .../lidar_centerpoint_tvm/lib/ros_utils.cpp | 118 --------- .../lidar_centerpoint_tvm/lib/utils.cpp | 41 --- perception/lidar_centerpoint_tvm/package.xml | 32 --- .../scripts/lidar_centerpoint_visualizer.py | 55 ---- perception/lidar_centerpoint_tvm/src/node.cpp | 154 ----------- .../src/single_inference_node.cpp | 239 ----------------- 33 files changed, 2800 deletions(-) delete mode 100644 perception/lidar_centerpoint_tvm/.gitignore delete mode 100644 perception/lidar_centerpoint_tvm/CMakeLists.txt delete mode 100644 perception/lidar_centerpoint_tvm/README.md delete mode 100644 perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml delete mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp delete mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp delete mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp delete mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp delete mode 100644 perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml delete mode 100644 perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml delete mode 100644 perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/ros_utils.cpp delete mode 100644 perception/lidar_centerpoint_tvm/lib/utils.cpp delete mode 100644 perception/lidar_centerpoint_tvm/package.xml delete mode 100755 perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py delete mode 100644 perception/lidar_centerpoint_tvm/src/node.cpp delete mode 100644 perception/lidar_centerpoint_tvm/src/single_inference_node.cpp diff --git a/perception/lidar_centerpoint_tvm/.gitignore b/perception/lidar_centerpoint_tvm/.gitignore deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/perception/lidar_centerpoint_tvm/CMakeLists.txt b/perception/lidar_centerpoint_tvm/CMakeLists.txt deleted file mode 100644 index e9da98d4ae3c4..0000000000000 --- a/perception/lidar_centerpoint_tvm/CMakeLists.txt +++ /dev/null @@ -1,100 +0,0 @@ -cmake_minimum_required(VERSION 3.14) -project(lidar_centerpoint_tvm) - -find_package(autoware_cmake REQUIRED) -autoware_package() - -set(tvm_runtime_DIR ${tvm_vendor_DIR}) -find_package(tvm_runtime CONFIG REQUIRED) - -# Gather neural network information. -set(${PROJECT_NAME}_BACKEND llvm CACHE STRING "${PROJECT_NAME} neural network backend") -set(MODEL_NAME_ENCODER centerpoint_encoder) - -# Get neural network. -set(NN_DEPENDENCY_ENCODER "") -get_neural_network(${MODEL_NAME_ENCODER} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_ENCODER) - -set(MODEL_NAME_BACKBONE centerpoint_backbone) - -# Get neural network. -set(NN_DEPENDENCY_BACKBONE "") -get_neural_network(${MODEL_NAME_BACKBONE} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_BACKBONE) - -if((NOT NN_DEPENDENCY_ENCODER STREQUAL "") AND (NOT NN_DEPENDENCY_BACKBONE STREQUAL "")) - ## centerpoint_tvm ## - ament_auto_add_library(${PROJECT_NAME} SHARED - data/models/${MODEL_NAME_ENCODER}/inference_engine_tvm_config.hpp - data/models/${MODEL_NAME_BACKBONE}/inference_engine_tvm_config.hpp - data/models/${MODEL_NAME_BACKBONE}/preprocessing_inference_engine_tvm_config.hpp - lib/centerpoint_tvm.cpp - lib/utils.cpp - lib/ros_utils.cpp - lib/preprocess/pointcloud_densification.cpp - lib/preprocess/voxel_generator.cpp - lib/preprocess/generate_features.cpp - lib/postprocess/circle_nms.cpp - lib/postprocess/generate_detected_boxes.cpp - ) - - add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_ENCODER}) - add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_BACKBONE}) - - target_compile_options(${PROJECT_NAME} PRIVATE "-Wno-sign-conversion" "-Wno-conversion") - - target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC - "${tvm_vendor_INCLUDE_DIRS}" - ) - - target_link_libraries(${PROJECT_NAME} - ${tvm_runtime_LIBRARIES} - ) - - target_include_directories(${PROJECT_NAME} PRIVATE - data/models - ) - - ## node ## - ament_auto_add_library(lidar_centerpoint_tvm_component SHARED - src/node.cpp - ) - - target_link_libraries(lidar_centerpoint_tvm_component - ${PROJECT_NAME} - ) - - rclcpp_components_register_node(lidar_centerpoint_tvm_component - PLUGIN "autoware::perception::lidar_centerpoint_tvm::LidarCenterPointTVMNode" - EXECUTABLE lidar_centerpoint_tvm_node - ) - - ## single inference node ## - ament_auto_add_library(single_inference_lidar_centerpoint_tvm_component SHARED - src/single_inference_node.cpp - ) - - target_link_libraries(single_inference_lidar_centerpoint_tvm_component - ${tvm_runtime_LIBRARIES} - ) - - - rclcpp_components_register_node(single_inference_lidar_centerpoint_tvm_component - PLUGIN "autoware::perception::lidar_centerpoint_tvm::SingleInferenceLidarCenterPointNode" - EXECUTABLE single_inference_lidar_centerpoint_tvm_node - ) - - install(PROGRAMS - scripts/lidar_centerpoint_visualizer.py - DESTINATION lib/${PROJECT_NAME} - ) - - ament_export_dependencies(ament_cmake_python) - - ament_auto_package(INSTALL_TO_SHARE - launch - config - ) -else() - message(WARNING "Neural network not found, skipping package.") - ament_auto_package() -endif() diff --git a/perception/lidar_centerpoint_tvm/README.md b/perception/lidar_centerpoint_tvm/README.md deleted file mode 100644 index 26d588d4483ad..0000000000000 --- a/perception/lidar_centerpoint_tvm/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# lidar_centerpoint_tvm - -## Design - -### Usage - -lidar_centerpoint_tvm is a package for detecting dynamic 3D objects using TVM compiled centerpoint module for different backends. To use this package, replace `lidar_centerpoint` with `lidar_centerpoint_tvm` in perception launch files(for example, `lidar_based_detection.launch.xml` is lidar based detection is chosen.). - -#### Neural network - -This package will not build without a neural network for its inference. -The network is provided by the `tvm_utility` package. -See its design page for more information on how to enable downloading pre-compiled networks (by setting the `DOWNLOAD_ARTIFACTS` cmake variable), or how to handle user-compiled networks. - -#### Backend - -The backend used for the inference can be selected by setting the `lidar_centerpoint_tvm_BACKEND` cmake variable. -The current available options are `llvm` for a CPU backend, and `vulkan` or `opencl` for a GPU backend. -It defaults to `llvm`. - -### Inputs / Outputs - -### Input - -| Name | Type | Description | -| -------------------- | ------------------------------- | ---------------- | -| `~/input/pointcloud` | `sensor_msgs::msg::PointCloud2` | input pointcloud | - -### Output - -| Name | Type | Description | -| -------------------------- | ------------------------------------------------ | -------------------- | -| `~/output/objects` | `autoware_perception_msgs::msg::DetectedObjects` | detected objects | -| `debug/cyclic_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | cyclic time (msg) | -| `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) | - -## Parameters - -### Core Parameters - -| Name | Type | Default Value | Description | -| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- | -| `score_threshold` | float | `0.1` | detected objects with score less than threshold are ignored | -| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud | -| `densification_num_past_frames` | int | `1` | the number of past frames to fuse with the current frame | - -### Bounding Box - -The lidar segmentation node establishes a bounding box for the detected obstacles. -The `L-fit` method of fitting a bounding box to a cluster is used for that. - -### Limitation and Known Issue - -Due to an accuracy issue of `centerpoint` model, `vulkan` cannot be used at the moment. -As for 'llvm' backend, real-time performance cannot be achieved. - -### Scatter Implementation - -Scatter function can be implemented using either TVMScript or C++. For C++ implementation, please refer to - -## Reference - -[1] Yin, Tianwei, Xingyi Zhou, and Philipp Krähenbühl. "Center-based 3d object detection and tracking." arXiv preprint arXiv:2006.11275 (2020). - -[2] Lang, Alex H., et al. "PointPillars: Fast encoders for object detection from point clouds." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019. - -[3] - -[4] - -[5] - -## Related issues - - - -- #908: Run Lidar Centerpoint with TVM diff --git a/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml b/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml deleted file mode 100644 index 34dce71975985..0000000000000 --- a/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml +++ /dev/null @@ -1,10 +0,0 @@ -/**: - ros__parameters: - class_names: ["CAR", "PEDESTRIAN", "BICYCLE"] - rename_car_to_truck_and_bus: true - point_feature_size: 4 - max_voxel_size: 40000 - point_cloud_range: [-89.6, -89.6, -3.0, 89.6, 89.6, 5.0] - voxel_size: [0.32, 0.32, 8.0] - downsample_factor: 1 - encoder_in_feature_size: 9 diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp deleted file mode 100644 index 5d832027c7591..0000000000000 --- a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2021 Arm Limited and Contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "tvm_utility/pipeline.hpp" - -#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT -#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT - -namespace model_zoo -{ -namespace perception -{ -namespace lidar_obstacle_detection -{ -namespace centerpoint_backbone -{ -namespace onnx_centerpoint_backbone -{ - -static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ - {3, 0, 0}, // modelzoo_version - - "centerpoint_backbone", // network_name - "llvm", // network_backend - - "./deploy_lib.so", // network_module_path - "./deploy_graph.json", // network_graph_path - "./deploy_param.params", // network_params_path - - // cspell: ignore DLCPU - kDLCPU, // tvm_device_type - 0, // tvm_device_id - - {{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}}, // network_inputs - - {{"heatmap", kDLFloat, 32, 1, {1, 3, 560, 560}}, - {"reg", kDLFloat, 32, 1, {1, 2, 560, 560}}, - {"height", kDLFloat, 32, 1, {1, 1, 560, 560}}, - {"dim", kDLFloat, 32, 1, {1, 3, 560, 560}}, - {"rot", kDLFloat, 32, 1, {1, 2, 560, 560}}, - {"vel", kDLFloat, 32, 1, {1, 2, 560, 560}}} // network_outputs -}; - -} // namespace onnx_centerpoint_backbone -} // namespace centerpoint_backbone -} // namespace lidar_obstacle_detection -} // namespace perception -} // namespace model_zoo -// NOLINTNEXTLINE -#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp deleted file mode 100644 index 0cae27b49572c..0000000000000 --- a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2021 Arm Limited and Contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "tvm_utility/pipeline.hpp" - -#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT -#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT - -namespace model_zoo -{ -namespace perception -{ -namespace lidar_obstacle_detection -{ -namespace centerpoint_backbone -{ -namespace onnx_centerpoint_backbone -{ -namespace preprocessing -{ - -static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ - {3, 0, 0}, // modelzoo_version - - "centerpoint_backbone", // network_name - "llvm", // network_backend - - "./preprocess.so", // network_module_path - "./", // network_graph_path - "./", // network_params_path - - // cspell: ignore DLCPU - kDLCPU, // tvm_device_type - 0, // tvm_device_id - - {{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}}, - {"coords", kDLInt, 32, 1, {40000, 3}}}, // network_inputs - - {{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}} // network_outputs -}; - -} // namespace preprocessing -} // namespace onnx_centerpoint_backbone -} // namespace centerpoint_backbone -} // namespace lidar_obstacle_detection -} // namespace perception -} // namespace model_zoo -// NOLINTNEXTLINE -#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp deleted file mode 100644 index 3e423dbe99877..0000000000000 --- a/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 Arm Limited and Contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "tvm_utility/pipeline.hpp" - -#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT -#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT - -namespace model_zoo -{ -namespace perception -{ -namespace lidar_obstacle_detection -{ -namespace centerpoint_encoder -{ -namespace onnx_centerpoint_encoder -{ - -static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ - {3, 0, 0}, // modelzoo_version - - "centerpoint_encoder", // network_name - "llvm", // network_backend - - "./deploy_lib.so", // network_module_path - "./deploy_graph.json", // network_graph_path - "./deploy_param.params", // network_params_path - - // cspell: ignore DLCPU - kDLCPU, // tvm_device_type - 0, // tvm_device_id - - {{"input_features", kDLFloat, 32, 1, {40000, 32, 9}}}, // network_inputs - - {{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}}} // network_outputs -}; - -} // namespace onnx_centerpoint_encoder -} // namespace centerpoint_encoder -} // namespace lidar_obstacle_detection -} // namespace perception -} // namespace model_zoo -// NOLINTNEXTLINE -#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp deleted file mode 100644 index 5825decbb78d6..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ -#define LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -class CenterPointConfig -{ -public: - explicit CenterPointConfig( - const std::size_t class_size, const float point_feature_size, const std::size_t max_voxel_size, - const std::vector & point_cloud_range, const std::vector & voxel_size, - const std::size_t downsample_factor, const std::size_t encoder_in_feature_size, - const float score_threshold, const float circle_nms_dist_threshold, - const float yaw_norm_threshold) - { - class_size_ = class_size; - point_feature_size_ = point_feature_size; - max_voxel_size_ = max_voxel_size; - if (point_cloud_range.size() == 6) { - range_min_x_ = static_cast(point_cloud_range[0]); - range_min_y_ = static_cast(point_cloud_range[1]); - range_min_z_ = static_cast(point_cloud_range[2]); - range_max_x_ = static_cast(point_cloud_range[3]); - range_max_y_ = static_cast(point_cloud_range[4]); - range_max_z_ = static_cast(point_cloud_range[5]); - } - if (voxel_size.size() == 3) { - voxel_size_x_ = static_cast(voxel_size[0]); - voxel_size_y_ = static_cast(voxel_size[1]); - voxel_size_z_ = static_cast(voxel_size[2]); - } - downsample_factor_ = downsample_factor; - encoder_in_feature_size_ = encoder_in_feature_size; - - if (score_threshold > 0 && score_threshold < 1) { - score_threshold_ = score_threshold; - } - - if (circle_nms_dist_threshold > 0) { - circle_nms_dist_threshold_ = circle_nms_dist_threshold; - } - - if (yaw_norm_threshold >= 0 && yaw_norm_threshold < 1) { - yaw_norm_threshold_ = yaw_norm_threshold; - } - - grid_size_x_ = static_cast((range_max_x_ - range_min_x_) / voxel_size_x_); - grid_size_y_ = static_cast((range_max_y_ - range_min_y_) / voxel_size_y_); - grid_size_z_ = static_cast((range_max_z_ - range_min_z_) / voxel_size_z_); - offset_x_ = range_min_x_ + voxel_size_x_ / 2; - offset_y_ = range_min_y_ + voxel_size_y_ / 2; - offset_z_ = range_min_z_ + voxel_size_z_ / 2; - down_grid_size_x_ = grid_size_x_ / downsample_factor_; - down_grid_size_y_ = grid_size_y_ / downsample_factor_; - }; - - // input params - std::size_t class_size_{3}; - const std::size_t point_dim_size_{3}; // x, y and z - std::size_t point_feature_size_{4}; // x, y, z and time lag - std::size_t max_point_in_voxel_size_{32}; - std::size_t max_voxel_size_{40000}; - float range_min_x_{-89.6f}; - float range_min_y_{-89.6f}; - float range_min_z_{-3.0f}; - float range_max_x_{89.6f}; - float range_max_y_{89.6f}; - float range_max_z_{5.0f}; - float voxel_size_x_{0.32f}; - float voxel_size_y_{0.32f}; - float voxel_size_z_{8.0f}; - - // network params - const std::size_t batch_size_{1}; - std::size_t downsample_factor_{2}; - std::size_t encoder_in_feature_size_{9}; - const std::size_t encoder_out_feature_size_{32}; - const std::size_t head_out_size_{6}; - const std::size_t head_out_offset_size_{2}; - const std::size_t head_out_z_size_{1}; - const std::size_t head_out_dim_size_{3}; - const std::size_t head_out_rot_size_{2}; - const std::size_t head_out_vel_size_{2}; - - // post-process params - float score_threshold_{0.35f}; - float circle_nms_dist_threshold_{1.5f}; - float yaw_norm_threshold_{0.0f}; - - // calculated params - std::size_t grid_size_x_ = (range_max_x_ - range_min_x_) / voxel_size_x_; - std::size_t grid_size_y_ = (range_max_y_ - range_min_y_) / voxel_size_y_; - std::size_t grid_size_z_ = (range_max_z_ - range_min_z_) / voxel_size_z_; - float offset_x_ = range_min_x_ + voxel_size_x_ / 2; - float offset_y_ = range_min_y_ + voxel_size_y_ / 2; - float offset_z_ = range_min_z_ + voxel_size_z_ / 2; - std::size_t down_grid_size_x_ = grid_size_x_ / downsample_factor_; - std::size_t down_grid_size_y_ = grid_size_y_ / downsample_factor_; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp deleted file mode 100644 index 865ca7d4253bf..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc., Arm Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ -#define LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -using tvm_utility::pipeline::TVMArrayContainer; -using tvm_utility::pipeline::TVMArrayContainerVector; - -struct MixedInputs -{ - // The number of non-empty voxel - std::size_t num_voxels; - // The voxel features (point info in each voxel) or pillar features. - std::vector features; - // The number of points in each voxel. - std::vector num_points_per_voxel; - // The index from voxel number to its 3D position - std::vector coords; -}; - -class LIDAR_CENTERPOINT_TVM_LOCAL TVMScatterIE : public tvm_utility::pipeline::InferenceEngine -{ -public: - explicit TVMScatterIE( - tvm_utility::pipeline::InferenceEngineTVMConfig config, const std::string & pkg_name, - const std::string & data_path, const std::string & function_name); - TVMArrayContainerVector schedule(const TVMArrayContainerVector & input); - void set_coords(TVMArrayContainer coords) { coords_ = coords; } - -private: - tvm_utility::pipeline::InferenceEngineTVMConfig config_; - TVMArrayContainer coords_; - TVMArrayContainerVector output_; - tvm::runtime::PackedFunc scatter_function; -}; - -class LIDAR_CENTERPOINT_TVM_LOCAL VoxelEncoderPreProcessor -: public tvm_utility::pipeline::PreProcessor -{ -public: - /// \brief Constructor. - /// \param[in] config The TVM configuration. - /// \param[in] config_mod The centerpoint model configuration. - explicit VoxelEncoderPreProcessor( - const tvm_utility::pipeline::InferenceEngineTVMConfig & config, - const CenterPointConfig & config_mod); - - /// \brief Convert the voxel_features to encoder_in_features (a TVM array). - /// \param[in] voxel_inputs The voxel features related input - /// \return A TVM array containing the encoder_in_features. - /// \throw std::runtime_error If the features are incorrectly configured. - TVMArrayContainerVector schedule(const MixedInputs & voxel_inputs); - -private: - const int64_t max_voxel_size; - const int64_t max_point_in_voxel_size; - const int64_t encoder_in_feature_size; - const int64_t input_datatype_bytes; - const CenterPointConfig config_detail; - std::vector encoder_in_features; - TVMArrayContainer output; -}; - -class LIDAR_CENTERPOINT_TVM_LOCAL BackboneNeckHeadPostProcessor -: public tvm_utility::pipeline::PostProcessor> -{ -public: - /// \brief Constructor. - /// \param[in] config The TVM configuration. - /// \param[in] config_mod The centerpoint model configuration. - explicit BackboneNeckHeadPostProcessor( - const tvm_utility::pipeline::InferenceEngineTVMConfig & config, - const CenterPointConfig & config_mod); - - /// \brief Copy the inference result. - /// \param[in] input The result of the inference engine. - /// \return The inferred data. - std::vector schedule(const TVMArrayContainerVector & input); - -private: - const int64_t output_datatype_bytes; - const CenterPointConfig config_detail; - std::vector head_out_heatmap; - std::vector head_out_offset; - std::vector head_out_z; - std::vector head_out_dim; - std::vector head_out_rot; - std::vector head_out_vel; -}; - -class LIDAR_CENTERPOINT_TVM_PUBLIC CenterPointTVM -{ -public: - /// \brief Constructor - /// \param[in] dense_param The densification parameter used to constructing vg_ptr. - /// \param[in] config The CenterPoint model configuration. - explicit CenterPointTVM( - const DensificationParam & densification_param, const CenterPointConfig & config, - const std::string & data_path); - - ~CenterPointTVM(); - - bool detect( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer, - std::vector & det_boxes3d); - -protected: - void initPtr(); - - virtual bool preprocess( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); - - using VE_PrePT = VoxelEncoderPreProcessor; - using IET = tvm_utility::pipeline::InferenceEngineTVM; - using BNH_PostPT = BackboneNeckHeadPostProcessor; - using TSE = TVMScatterIE; - using TSP = tvm_utility::pipeline::TowStagePipeline; - - tvm_utility::pipeline::InferenceEngineTVMConfig config_ve; - tvm_utility::pipeline::InferenceEngineTVMConfig config_bnh; - - // Voxel Encoder Pipeline. - std::shared_ptr VE_PreP; - std::shared_ptr VE_IE; - - // Backbone Neck Head Pipeline. - std::shared_ptr BNH_IE; - std::shared_ptr BNH_PostP; - - std::shared_ptr scatter_ie; - std::shared_ptr> - TSP_pipeline; - - // Variables - std::unique_ptr vg_ptr_{nullptr}; - - CenterPointConfig config_; - std::size_t num_voxels_{0}; - std::shared_ptr> voxels_; - std::shared_ptr> coordinates_; - std::shared_ptr> num_points_per_voxel_; - TVMArrayContainer coords_tvm_; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp deleted file mode 100644 index 88a43139908a7..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__NODE_HPP_ -#define LIDAR_CENTERPOINT_TVM__NODE_HPP_ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -class LIDAR_CENTERPOINT_TVM_PUBLIC LidarCenterPointTVMNode : public rclcpp::Node -{ -public: - explicit LidarCenterPointTVMNode(const rclcpp::NodeOptions & node_options); - -private: - void pointCloudCallback(const sensor_msgs::msg::PointCloud2::ConstSharedPtr input_pointcloud_msg); - - tf2_ros::Buffer tf_buffer_; - tf2_ros::TransformListener tf_listener_{tf_buffer_}; - - rclcpp::Subscription::SharedPtr pointcloud_sub_; - rclcpp::Publisher::SharedPtr objects_pub_; - - float score_threshold_{0.0}; - std::vector class_names_; - bool rename_car_to_truck_and_bus_{false}; - bool has_twist_{false}; - - std::unique_ptr detector_ptr_{nullptr}; - - // debugger - std::unique_ptr> stop_watch_ptr_{ - nullptr}; - std::unique_ptr debug_publisher_ptr_{nullptr}; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__NODE_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp deleted file mode 100644 index 3a0847f00bd21..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2022 AutoCore Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ -#define LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ - -#include - -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -// Non-maximum suppression (NMS) uses the distance on the xy plane instead of -// intersection over union (IoU) to suppress overlapped objects. -std::size_t circleNMS( - std::vector & boxes3d, const float dist_thresh, std::vector & keep_mask); - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp deleted file mode 100644 index ba91addc5e02e..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ -#define LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ - -#include -#include - -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -void generateDetectedBoxes3D( - const std::vector & out_heatmap, const std::vector & out_offset, - const std::vector & out_z, const std::vector & out_dim, - const std::vector & out_rot, const std::vector & out_vel, - const CenterPointConfig & config, std::vector & det_boxes3d); - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp deleted file mode 100644 index d799dc0bfac57..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ -#define LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ - -#include - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -void generateFeatures( - const std::vector & voxel_features, const std::vector & voxel_num_points, - const std::vector & coords, const std::size_t num_voxels, - const CenterPointConfig & config, std::vector & features); - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp deleted file mode 100644 index 70ec420666a3f..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ -#define LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ - -#include - -#include -#include -#ifdef ROS_DISTRO_GALACTIC -#include -#else -#include -#endif - -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -class LIDAR_CENTERPOINT_TVM_LOCAL DensificationParam -{ -public: - DensificationParam(const std::string & world_frame_id, const uint32_t num_past_frames) - : world_frame_id_(std::move(world_frame_id)), - pointcloud_cache_size_(num_past_frames + /*current frame*/ 1) - { - } - - std::string world_frame_id() const { return world_frame_id_; } - uint32_t pointcloud_cache_size() const { return pointcloud_cache_size_; } - -private: - std::string world_frame_id_; - uint32_t pointcloud_cache_size_{1}; -}; - -struct PointCloudWithTransform -{ - sensor_msgs::msg::PointCloud2 pointcloud_msg; - Eigen::Affine3f affine_past2world; -}; - -class LIDAR_CENTERPOINT_TVM_LOCAL PointCloudDensification -{ -public: - explicit PointCloudDensification(const DensificationParam & param); - - bool enqueuePointCloud( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); - - double getCurrentTimestamp() const { return current_timestamp_; } - Eigen::Affine3f getAffineWorldToCurrent() const { return affine_world2current_; } - std::list::iterator getPointCloudCacheIter() - { - return pointcloud_cache_.begin(); - } - bool isCacheEnd(std::list::iterator iter) - { - return iter == pointcloud_cache_.end(); - } - -private: - void enqueue(const sensor_msgs::msg::PointCloud2 & msg, const Eigen::Affine3f & affine); - void dequeue(); - - DensificationParam param_; - double current_timestamp_{0.0}; - Eigen::Affine3f affine_world2current_; - std::list pointcloud_cache_; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp deleted file mode 100644 index 7ef49841c680b..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ -#define LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ - -#include -#include -#include - -#include - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -class LIDAR_CENTERPOINT_TVM_LOCAL VoxelGeneratorTemplate -{ -public: - explicit VoxelGeneratorTemplate( - const DensificationParam & param, const CenterPointConfig & config); - - virtual std::size_t pointsToVoxels( - std::vector & voxels, std::vector & coordinates, - std::vector & num_points_per_voxel) = 0; - - bool enqueuePointCloud( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); - -protected: - std::unique_ptr pd_ptr_{nullptr}; - - CenterPointConfig config_; - std::array range_; - std::array grid_size_; - std::array recip_voxel_size_; -}; - -class LIDAR_CENTERPOINT_TVM_LOCAL VoxelGenerator : public VoxelGeneratorTemplate -{ -public: - using VoxelGeneratorTemplate::VoxelGeneratorTemplate; - - /** - * @brief Traverse all the lidar points and put each point into the corresponding voxel. - * - * @param voxels To store point info in each voxel - * @param coordinates To store the index from voxel number to its 3D position - * @param num_points_per_voxel To store the number of points in each voxel - * @return The number of non-empty voxel - */ - std::size_t pointsToVoxels( - std::vector & voxels, std::vector & coordinates, - std::vector & num_points_per_voxel) override; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp deleted file mode 100644 index a33430a1b09ba..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ -#define LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ - -#include - -#include -#include -#include -#include - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -void box3DToDetectedObject( - const Box3D & box3d, const std::vector & class_names, - const bool rename_car_to_truck_and_bus, const bool has_twist, - autoware_perception_msgs::msg::DetectedObject & obj); - -uint8_t getSemanticType(const std::string & class_name); - -bool isCarLikeVehicleLabel(const uint8_t label); - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp deleted file mode 100644 index 0a4fcb27ac202..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2022 TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ -#define LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -class SingleInferenceLidarCenterPointNode : public rclcpp::Node -{ -public: - explicit SingleInferenceLidarCenterPointNode(const rclcpp::NodeOptions & node_options); - -private: - void detect(const std::string & pcd_path, const std::string & detections_path); - std::vector getVertices( - const autoware_perception_msgs::msg::Shape & shape, const Eigen::Affine3d & pose) const; - void dumpDetectionsAsMesh( - const autoware_perception_msgs::msg::DetectedObjects & objects_msg, - const std::string & output_path) const; - - tf2_ros::Buffer tf_buffer_; - tf2_ros::TransformListener tf_listener_{tf_buffer_}; - - float score_threshold_{0.0}; - std::vector class_names_; - bool rename_car_to_truck_and_bus_{false}; - bool has_twist_{false}; - - std::unique_ptr detector_ptr_{nullptr}; -}; - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp deleted file mode 100644 index 259deef53f189..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__UTILS_HPP_ -#define LIDAR_CENTERPOINT_TVM__UTILS_HPP_ - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ -struct Box3D -{ - // initializer not allowed for __shared__ variable - int32_t label; - float score; - float x; - float y; - float z; - float length; - float width; - float height; - float yaw; - float vel_x; - float vel_y; -}; - -// cspell: ignore divup -std::size_t divup(const std::size_t a, const std::size_t b); - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#endif // LIDAR_CENTERPOINT_TVM__UTILS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp deleted file mode 100644 index a4afc54afc5fb..0000000000000 --- a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2021-2022 Arm Ltd., AutoCore Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ -#define LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ - -#if defined(__WIN32) -#if defined(LIDAR_CENTERPOINT_TVM_BUILDING_DLL) || defined(LIDAR_CENTERPOINT_TVM_EXPORTS) -#define LIDAR_CENTERPOINT_TVM_PUBLIC __declspec(dllexport) -#define LIDAR_CENTERPOINT_TVM__LOCAL -#else -#define LIDAR_CENTERPOINT_TVM_PUBLIC __declspec(dllimport) -#define LIDAR_CENTERPOINT_TVM__LOCAL -#endif -#elif defined(__linux__) -#define LIDAR_CENTERPOINT_TVM_PUBLIC __attribute__((visibility("default"))) -#define LIDAR_CENTERPOINT_TVM_LOCAL __attribute__((visibility("hidden"))) -#elif defined(__APPLE__) -#define LIDAR_CENTERPOINT_TVM_PUBLIC __attribute__((visibility("default"))) -#define LIDAR_CENTERPOINT_TVM_LOCAL __attribute__((visibility("hidden"))) -#else -#error "Unsupported Build Configuration" -#endif - -#endif // LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ diff --git a/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml b/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml deleted file mode 100644 index 2bd6e3aa15c21..0000000000000 --- a/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml b/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml deleted file mode 100644 index c2e0801fbd11c..0000000000000 --- a/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp b/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp deleted file mode 100644 index 91f727448b76a..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc., Arm Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/centerpoint_tvm.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -auto config_en = model_zoo::perception::lidar_obstacle_detection::centerpoint_encoder:: - onnx_centerpoint_encoder::config; -auto config_bk = model_zoo::perception::lidar_obstacle_detection::centerpoint_backbone:: - onnx_centerpoint_backbone::config; -auto config_scatter = model_zoo::perception::lidar_obstacle_detection::centerpoint_backbone:: - onnx_centerpoint_backbone::preprocessing::config; - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -TVMScatterIE::TVMScatterIE( - tvm_utility::pipeline::InferenceEngineTVMConfig config, const std::string & pkg_name, - const std::string & data_path, const std::string & function_name) -: config_(config) -{ - std::string network_prefix = data_path + "/" + pkg_name + "/models/" + config.network_name + "/"; - std::string network_module_path = network_prefix + config.network_module_path; - - std::ifstream module(network_module_path); - if (!module.good()) { - throw std::runtime_error("File " + network_module_path + " specified in config.hpp not found"); - } - module.close(); - tvm::runtime::Module runtime_mod = tvm::runtime::Module::LoadFromFile(network_module_path); - - scatter_function = runtime_mod.GetFunction(function_name); - - for (auto & output_config : config.network_outputs) { - output_.push_back(TVMArrayContainer( - output_config.node_shape, output_config.tvm_dtype_code, output_config.tvm_dtype_bits, - output_config.tvm_dtype_lanes, config.tvm_device_type, config.tvm_device_id)); - } -} - -TVMArrayContainerVector TVMScatterIE::schedule(const TVMArrayContainerVector & input) -{ - scatter_function(input[0].getArray(), coords_.getArray(), output_[0].getArray()); - - return output_; -} - -VoxelEncoderPreProcessor::VoxelEncoderPreProcessor( - const tvm_utility::pipeline::InferenceEngineTVMConfig & config, - const CenterPointConfig & config_mod) -: max_voxel_size(config.network_inputs[0].node_shape[0]), - max_point_in_voxel_size(config.network_inputs[0].node_shape[1]), - encoder_in_feature_size(config.network_inputs[0].node_shape[2]), - input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8), - config_detail(config_mod) -{ - encoder_in_features.resize(max_voxel_size * max_point_in_voxel_size * encoder_in_feature_size); - // Allocate input variable - std::vector shape_x{1, max_voxel_size, max_point_in_voxel_size, encoder_in_feature_size}; - TVMArrayContainer x{ - shape_x, - config.network_inputs[0].tvm_dtype_code, - config.network_inputs[0].tvm_dtype_bits, - config.network_inputs[0].tvm_dtype_lanes, - config.tvm_device_type, - config.tvm_device_id}; - output = x; -} - -TVMArrayContainerVector VoxelEncoderPreProcessor::schedule(const MixedInputs & voxel_inputs) -{ - // generate encoder_in_features from the voxels - generateFeatures( - voxel_inputs.features, voxel_inputs.num_points_per_voxel, voxel_inputs.coords, - voxel_inputs.num_voxels, config_detail, encoder_in_features); - - TVMArrayCopyFromBytes( - output.getArray(), encoder_in_features.data(), - max_voxel_size * max_point_in_voxel_size * encoder_in_feature_size * input_datatype_bytes); - - return {output}; -} - -BackboneNeckHeadPostProcessor::BackboneNeckHeadPostProcessor( - const tvm_utility::pipeline::InferenceEngineTVMConfig & config, - const CenterPointConfig & config_mod) -: output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8), config_detail(config_mod) -{ - head_out_heatmap.resize( - config.network_outputs[0].node_shape[1] * config.network_outputs[0].node_shape[2] * - config.network_outputs[0].node_shape[3]); - head_out_offset.resize( - config.network_outputs[1].node_shape[1] * config.network_outputs[1].node_shape[2] * - config.network_outputs[1].node_shape[3]); - head_out_z.resize( - config.network_outputs[2].node_shape[1] * config.network_outputs[2].node_shape[2] * - config.network_outputs[2].node_shape[3]); - head_out_dim.resize( - config.network_outputs[3].node_shape[1] * config.network_outputs[3].node_shape[2] * - config.network_outputs[3].node_shape[3]); - head_out_rot.resize( - config.network_outputs[4].node_shape[1] * config.network_outputs[4].node_shape[2] * - config.network_outputs[4].node_shape[3]); - head_out_vel.resize( - config.network_outputs[5].node_shape[1] * config.network_outputs[5].node_shape[2] * - config.network_outputs[5].node_shape[3]); -} - -std::vector BackboneNeckHeadPostProcessor::schedule(const TVMArrayContainerVector & input) -{ - TVMArrayCopyToBytes( - input[0].getArray(), head_out_heatmap.data(), head_out_heatmap.size() * output_datatype_bytes); - TVMArrayCopyToBytes( - input[1].getArray(), head_out_offset.data(), head_out_offset.size() * output_datatype_bytes); - TVMArrayCopyToBytes( - input[2].getArray(), head_out_z.data(), head_out_z.size() * output_datatype_bytes); - TVMArrayCopyToBytes( - input[3].getArray(), head_out_dim.data(), head_out_dim.size() * output_datatype_bytes); - TVMArrayCopyToBytes( - input[4].getArray(), head_out_rot.data(), head_out_rot.size() * output_datatype_bytes); - TVMArrayCopyToBytes( - input[5].getArray(), head_out_vel.data(), head_out_vel.size() * output_datatype_bytes); - - std::vector det_boxes3d; - - generateDetectedBoxes3D( - head_out_heatmap, head_out_offset, head_out_z, head_out_dim, head_out_rot, head_out_vel, - config_detail, det_boxes3d); - - return det_boxes3d; -} - -CenterPointTVM::CenterPointTVM( - const DensificationParam & densification_param, const CenterPointConfig & config, - const std::string & data_path) -: config_ve(config_en), - config_bnh(config_bk), - VE_PreP(std::make_shared(config_en, config)), - VE_IE(std::make_shared(config_en, "lidar_centerpoint_tvm", data_path)), - BNH_IE(std::make_shared(config_bk, "lidar_centerpoint_tvm", data_path)), - BNH_PostP(std::make_shared(config_bk, config)), - scatter_ie(std::make_shared(config_scatter, "lidar_centerpoint_tvm", data_path, "scatter")), - TSP_pipeline(std::make_shared(VE_PreP, VE_IE, scatter_ie, BNH_IE, BNH_PostP)), - config_(config) -{ - vg_ptr_ = std::make_unique(densification_param, config_); - initPtr(); -} - -CenterPointTVM::~CenterPointTVM() -{ -} - -void CenterPointTVM::initPtr() -{ - const auto voxels_size = - config_.max_voxel_size_ * config_.max_point_in_voxel_size_ * config_.point_feature_size_; - const auto coordinates_size = config_.max_voxel_size_ * config_.point_dim_size_; - - voxels_ = std::make_shared>(voxels_size); - coordinates_ = std::make_shared>(coordinates_size); - num_points_per_voxel_ = std::make_shared>(config_.max_voxel_size_); - std::vector shape_coords{ - config_scatter.network_inputs[1].node_shape[0], config_scatter.network_inputs[1].node_shape[1]}; - coords_tvm_ = TVMArrayContainer( - shape_coords, config_scatter.network_inputs[1].tvm_dtype_code, - config_scatter.network_inputs[1].tvm_dtype_bits, - config_scatter.network_inputs[1].tvm_dtype_lanes, config_scatter.tvm_device_type, - config_scatter.tvm_device_id); -} - -bool CenterPointTVM::detect( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer, - std::vector & det_boxes3d) -{ - std::fill(voxels_->begin(), voxels_->end(), 0); - std::fill(coordinates_->begin(), coordinates_->end(), -1); - std::fill(num_points_per_voxel_->begin(), num_points_per_voxel_->end(), 0); - - if (!preprocess(input_pointcloud_msg, tf_buffer)) { - RCLCPP_WARN_STREAM( - rclcpp::get_logger("lidar_centerpoint"), "Fail to preprocess and skip to detect."); - return false; - } - - MixedInputs voxel_inputs{num_voxels_, *voxels_, *num_points_per_voxel_, *coordinates_}; - auto bnh_output = TSP_pipeline->schedule(voxel_inputs); - - det_boxes3d = bnh_output; - if (det_boxes3d.size() == 0) { - RCLCPP_WARN_STREAM(rclcpp::get_logger("lidar_centerpoint_tvm"), "No detected boxes."); - } - - return true; -} - -bool CenterPointTVM::preprocess( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer) -{ - bool is_success = vg_ptr_->enqueuePointCloud(input_pointcloud_msg, tf_buffer); - if (!is_success) { - return false; - } - num_voxels_ = vg_ptr_->pointsToVoxels(*voxels_, *coordinates_, *num_points_per_voxel_); - if (num_voxels_ == 0) { - return false; - } - - TVMArrayCopyFromBytes( - coords_tvm_.getArray(), coordinates_->data(), coordinates_->size() * sizeof(int32_t)); - - scatter_ie->set_coords(coords_tvm_); - - return true; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp b/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp deleted file mode 100644 index 0ab51347be772..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 AutoCore Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/postprocess/circle_nms.hpp" - -#include - -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -inline float dist2dPow(const Box3D & a, const Box3D & b) -{ - return powf(a.x - b.x, 2) + powf(a.y - b.y, 2); -} - -std::size_t circleNMS( - std::vector & boxes3d, const float dist_thresh, std::vector & keep_mask) -{ - // params: boxes3d, vector sorted by score from largest to smallest - const auto num_boxes3d = boxes3d.size(); - const float dist2d_pow_thresh = powf(dist_thresh, 2); - - // generate keep_mask - std::size_t num_to_keep = 0; - std::vector suppress(num_boxes3d); // suppress[i]=true mean i-th box should be suppressed - - // std::uint64_t * suppress_ptr = & suppress.front(); - for (std::size_t i = 0; i < num_boxes3d; i++) { - if (suppress[i]) { - keep_mask[i] = false; - } else { - keep_mask[i] = true; - num_to_keep++; - for (std::size_t j = i + 1; j < num_boxes3d; j++) { - // if the j-th box has been suppressed, continue - if (suppress[j]) continue; - // if the j-th box is in the circle of i-th, set j-th box to be suppressed - if (dist2dPow(boxes3d[i], boxes3d[j]) < dist2d_pow_thresh) suppress[j] = true; - } - } - } - - return num_to_keep; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp b/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp deleted file mode 100644 index cbfe2d0c66669..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp" - -#include - -#include -#include -#include -#include - -namespace -{ -const std::size_t THREAD_NUM_POST = 32; -} // namespace - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -struct is_score_greater -{ - explicit is_score_greater(float t) : t_(t) {} - bool operator()(const Box3D & b) { return b.score > t_; } - -private: - float t_{0.0}; -}; - -struct is_kept -{ - bool operator()(const bool keep) { return keep; } -}; - -struct score_greater -{ - bool operator()(const Box3D & lb, const Box3D & rb) { return lb.score > rb.score; } -}; - -inline float sigmoid(float x) -{ - return 1.0f / (1.0f + expf(-x)); -} - -void generateBoxes3D_worker( - const std::vector & out_heatmap, const std::vector & out_offset, - const std::vector & out_z, const std::vector & out_dim, - const std::vector & out_rot, const std::vector & out_vel, - const CenterPointConfig & config, std::vector & boxes3d, std::size_t thread_idx, - std::size_t grids_per_thread) -{ - // generate boxes3d from the outputs of the network. - // shape of out_*: (N, DOWN_GRID_SIZE_Y, DOWN_GRID_SIZE_X) - // heatmap: N = class_size, offset: N = 2, z: N = 1, dim: N = 3, rot: N = 2, vel: N = 2 - for (std::size_t idx = 0; idx < grids_per_thread; idx++) { - std::size_t grid_idx = thread_idx * grids_per_thread + idx; - const auto down_grid_size = config.down_grid_size_y_ * config.down_grid_size_x_; - if (grid_idx >= down_grid_size) { - return; - } - - const auto yi = grid_idx / config.down_grid_size_x_; - const auto xi = grid_idx % config.down_grid_size_x_; - - int32_t label = -1; - float max_score = -1; - for (std::size_t ci = 0; ci < config.class_size_; ci++) { - float score = sigmoid(out_heatmap[down_grid_size * ci + grid_idx]); - if (score > max_score) { - label = ci; - max_score = score; - } - } - - const float offset_x = out_offset[down_grid_size * 0 + grid_idx]; - const float offset_y = out_offset[down_grid_size * 1 + grid_idx]; - const float x = - config.voxel_size_x_ * config.downsample_factor_ * (xi + offset_x) + config.range_min_x_; - const float y = - config.voxel_size_y_ * config.downsample_factor_ * (yi + offset_y) + config.range_min_y_; - const float z = out_z[grid_idx]; - const float w = out_dim[down_grid_size * 0 + grid_idx]; - const float l = out_dim[down_grid_size * 1 + grid_idx]; - const float h = out_dim[down_grid_size * 2 + grid_idx]; - const float yaw_sin = out_rot[down_grid_size * 0 + grid_idx]; - const float yaw_cos = out_rot[down_grid_size * 1 + grid_idx]; - const float yaw_norm = sqrtf(yaw_sin * yaw_sin + yaw_cos * yaw_cos); - const float vel_x = out_vel[down_grid_size * 0 + grid_idx]; - const float vel_y = out_vel[down_grid_size * 1 + grid_idx]; - - boxes3d[grid_idx].label = label; - boxes3d[grid_idx].score = yaw_norm >= config.yaw_norm_threshold_ ? max_score : 0.f; - boxes3d[grid_idx].x = x; - boxes3d[grid_idx].y = y; - boxes3d[grid_idx].z = z; - boxes3d[grid_idx].length = expf(l); - boxes3d[grid_idx].width = expf(w); - boxes3d[grid_idx].height = expf(h); - boxes3d[grid_idx].yaw = atan2f(yaw_sin, yaw_cos); - boxes3d[grid_idx].vel_x = vel_x; - boxes3d[grid_idx].vel_y = vel_y; - } -} - -// cspell: ignore divup -void generateDetectedBoxes3D( - const std::vector & out_heatmap, const std::vector & out_offset, - const std::vector & out_z, const std::vector & out_dim, - const std::vector & out_rot, const std::vector & out_vel, - const CenterPointConfig & config, std::vector & det_boxes3d) -{ - std::vector threadPool; - const auto down_grid_size = config.down_grid_size_y_ * config.down_grid_size_x_; - std::vector boxes3d(down_grid_size); - std::size_t grids_per_thread = divup(down_grid_size, THREAD_NUM_POST); - for (std::size_t idx = 0; idx < THREAD_NUM_POST; idx++) { - std::thread worker( - generateBoxes3D_worker, std::ref(out_heatmap), std::ref(out_offset), std::ref(out_z), - std::ref(out_dim), std::ref(out_rot), std::ref(out_vel), std::ref(config), std::ref(boxes3d), - idx, grids_per_thread); - threadPool.push_back(std::move(worker)); - } - for (std::size_t idx = 0; idx < THREAD_NUM_POST; idx++) { - threadPool[idx].join(); - } - - // suppress by score - const auto num_det_boxes3d = - std::count_if(boxes3d.begin(), boxes3d.end(), is_score_greater(config.score_threshold_)); - if (num_det_boxes3d == 0) { - // construct boxes3d failed - std::cerr << "lidar_centerpoint_tvm: construct boxes3d failed" << std::endl; - } - std::vector det_boxes3d_no_nms(num_det_boxes3d); - std::copy_if( - boxes3d.begin(), boxes3d.end(), det_boxes3d_no_nms.begin(), - is_score_greater(config.score_threshold_)); - - // sort by score - std::sort(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), score_greater()); - - // suppress by NMS - std::vector final_keep_mask(num_det_boxes3d); - const auto num_final_det_boxes3d = - circleNMS(det_boxes3d_no_nms, config.circle_nms_dist_threshold_, final_keep_mask); - - det_boxes3d.resize(num_final_det_boxes3d); - std::size_t box_id = 0; - for (std::size_t idx = 0; idx < final_keep_mask.size(); idx++) { - if (final_keep_mask[idx]) { - det_boxes3d[box_id] = det_boxes3d_no_nms[idx]; - box_id++; - } - } - // std::copy_if(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), final_keep_mask.begin(), - // det_boxes3d.begin(), is_kept()); -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp deleted file mode 100644 index 9b98adc2def4e..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/preprocess/generate_features.hpp" - -#include - -#include - -namespace -{ -const std::size_t THREAD_NUM_VFE = 4; -} // namespace - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -void generateFeatures_worker( - const std::vector & voxel_features, const std::vector & voxel_num_points, - const std::vector & coords, const std::size_t num_voxels, - const CenterPointConfig & config, std::vector & features, std::size_t thread_idx, - std::size_t pillars_per_thread) -{ - for (std::size_t idx = 0; idx < pillars_per_thread; idx++) { - std::size_t pillar_idx = thread_idx * pillars_per_thread + idx; - if (pillar_idx >= num_voxels) return; - - // voxel/pillar information - float points_sum[3] = {0.0, 0.0, 0.0}; // sum of x, y, z in the voxel - int32_t coordinate[3] = { - coords[pillar_idx * 3], coords[pillar_idx * 3 + 1], - coords[pillar_idx * 3 + 2]}; // 3D position(z,y,x) of the voxel - std::size_t points_count = voxel_num_points[pillar_idx]; // number of points in the voxel - - for (std::size_t i = 0; i < config.max_point_in_voxel_size_; i++) { - std::size_t point_idx = - pillar_idx * config.max_point_in_voxel_size_ * config.point_feature_size_ + - i * config.point_feature_size_; - for (std::size_t j = 0; j < config.point_feature_size_; j++) { - // point (x, y, z, intensity) - if (i < points_count && j < 3) points_sum[j] += voxel_features[point_idx + j]; - } - } - - // calculate voxel mean - float mean[3] = { - points_sum[0] / points_count, points_sum[1] / points_count, points_sum[2] / points_count}; - // calculate offset - float x_offset = coordinate[2] * config.voxel_size_x_ + config.offset_x_; - float y_offset = coordinate[1] * config.voxel_size_y_ + config.offset_y_; - // float z_offset = coordinate[0] * config.voxel_size_z_ + config.offset_z_; - - // build the encoder_in_features - for (std::size_t i = 0; i < config.max_point_in_voxel_size_; i++) { - // feature_idx - std::size_t feature_idx = - pillar_idx * config.max_point_in_voxel_size_ * config.encoder_in_feature_size_ + - i * config.encoder_in_feature_size_; - std::size_t point_idx = - pillar_idx * config.max_point_in_voxel_size_ * config.point_feature_size_ + - i * config.point_feature_size_; - if (i < points_count) { - features[feature_idx + 0] = voxel_features[point_idx + 0]; - features[feature_idx + 1] = voxel_features[point_idx + 1]; - features[feature_idx + 2] = voxel_features[point_idx + 2]; - features[feature_idx + 3] = voxel_features[point_idx + 3]; - // feature-mean - features[feature_idx + 4] = voxel_features[point_idx + 0] - mean[0]; - features[feature_idx + 5] = voxel_features[point_idx + 1] - mean[1]; - features[feature_idx + 6] = voxel_features[point_idx + 2] - mean[2]; - // feature-offset - features[feature_idx + 7] = voxel_features[point_idx + 0] - x_offset; - features[feature_idx + 8] = voxel_features[point_idx + 1] - y_offset; - - } else { - features[feature_idx + 0] = 0; - features[feature_idx + 1] = 0; - features[feature_idx + 2] = 0; - features[feature_idx + 3] = 0; - // feature-mean - features[feature_idx + 4] = 0; - features[feature_idx + 5] = 0; - features[feature_idx + 6] = 0; - // feature-offset - features[feature_idx + 7] = 0; - features[feature_idx + 8] = 0; - } - } - } -} - -// cspell: ignore divup -void generateFeatures( - const std::vector & voxel_features, const std::vector & voxel_num_points, - const std::vector & coords, const std::size_t num_voxels, - const CenterPointConfig & config, std::vector & features) -{ - // voxel_features (float): max_voxel_size*max_point_in_voxel_size*point_feature_size - // voxel_num_points (int): max_voxel_size - // coords (int): max_voxel_size*point_dim_size - std::vector threadPool; - std::size_t pillars_per_thread = divup(config.max_voxel_size_, THREAD_NUM_VFE); - for (std::size_t idx = 0; idx < THREAD_NUM_VFE; idx++) { - std::thread worker( - generateFeatures_worker, std::ref(voxel_features), std::ref(voxel_num_points), - std::ref(coords), num_voxels, std::ref(config), std::ref(features), idx, pillars_per_thread); - threadPool.push_back(std::move(worker)); - } - for (std::size_t idx = 0; idx < THREAD_NUM_VFE; idx++) { - threadPool[idx].join(); - } -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp deleted file mode 100644 index 5562e963d177b..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp" - -#include - -#include - -#include -#ifdef ROS_DISTRO_GALACTIC -#include -#else -#include -#endif - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -boost::optional getTransform( - const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, - const std::string & source_frame_id, const rclcpp::Time & time) -{ - try { - geometry_msgs::msg::TransformStamped transform_stamped; - transform_stamped = tf_buffer.lookupTransform( - target_frame_id, source_frame_id, time, rclcpp::Duration::from_seconds(0.5)); - return transform_stamped.transform; - } catch (tf2::TransformException & ex) { - RCLCPP_WARN_STREAM(rclcpp::get_logger("lidar_centerpoint_tvm"), ex.what()); - return boost::none; - } -} - -Eigen::Affine3f transformToEigen(const geometry_msgs::msg::Transform & t) -{ - Eigen::Affine3f a; - a.matrix() = tf2::transformToEigen(t).matrix().cast(); - return a; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -PointCloudDensification::PointCloudDensification(const DensificationParam & param) : param_(param) -{ -} - -bool PointCloudDensification::enqueuePointCloud( - const sensor_msgs::msg::PointCloud2 & pointcloud_msg, const tf2_ros::Buffer & tf_buffer) -{ - const auto header = pointcloud_msg.header; - - if (param_.pointcloud_cache_size() > 1) { - auto transform_world2current = - getTransform(tf_buffer, header.frame_id, param_.world_frame_id(), header.stamp); - if (!transform_world2current) { - return false; - } - auto affine_world2current = transformToEigen(transform_world2current.get()); - - enqueue(pointcloud_msg, affine_world2current); - } else { - enqueue(pointcloud_msg, Eigen::Affine3f::Identity()); - } - dequeue(); - - return true; -} - -void PointCloudDensification::enqueue( - const sensor_msgs::msg::PointCloud2 & msg, const Eigen::Affine3f & affine_world2current) -{ - affine_world2current_ = affine_world2current; - current_timestamp_ = rclcpp::Time(msg.header.stamp).seconds(); - PointCloudWithTransform pointcloud = {msg, affine_world2current.inverse()}; - pointcloud_cache_.push_front(pointcloud); -} - -void PointCloudDensification::dequeue() -{ - if (pointcloud_cache_.size() > param_.pointcloud_cache_size()) { - pointcloud_cache_.pop_back(); - } -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp deleted file mode 100644 index 11ae8e7064b1c..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/preprocess/voxel_generator.hpp" - -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -VoxelGeneratorTemplate::VoxelGeneratorTemplate( - const DensificationParam & param, const CenterPointConfig & config) -: config_(config) -{ - pd_ptr_ = std::make_unique(param); - range_[0] = config.range_min_x_; - range_[1] = config.range_min_y_; - range_[2] = config.range_min_z_; - range_[3] = config.range_max_x_; - range_[4] = config.range_max_y_; - range_[5] = config.range_max_z_; - grid_size_[0] = config.grid_size_x_; - grid_size_[1] = config.grid_size_y_; - grid_size_[2] = config.grid_size_z_; - recip_voxel_size_[0] = 1 / config.voxel_size_x_; - recip_voxel_size_[1] = 1 / config.voxel_size_y_; - recip_voxel_size_[2] = 1 / config.voxel_size_z_; -} - -bool VoxelGeneratorTemplate::enqueuePointCloud( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer) -{ - return pd_ptr_->enqueuePointCloud(input_pointcloud_msg, tf_buffer); -} - -std::size_t VoxelGenerator::pointsToVoxels( - std::vector & voxels, std::vector & coordinates, - std::vector & num_points_per_voxel) -{ - // voxels (float): (max_voxel_size * max_point_in_voxel_size * point_feature_size), point info in - // each voxel coordinates (int): (max_voxel_size * point_dim_size), the index from voxel to its 3D - // position num_points_per_voxel (float): (max_voxel_size), the number of points in each voxel - - const std::size_t grid_size = config_.grid_size_z_ * config_.grid_size_y_ * config_.grid_size_x_; - std::vector coord_to_voxel_idx( - grid_size, -1); // the index from 3D position to the voxel id - - std::size_t voxel_cnt = 0; // @return - std::vector point; - point.resize(config_.point_feature_size_); - std::vector coord_zyx; // record which grid the zyx coord of current point belong to - coord_zyx.resize(config_.point_dim_size_); - bool out_of_range; - std::size_t point_cnt; - int32_t c, coord_idx, voxel_idx; - Eigen::Vector3f point_current, point_past; - - for (auto pc_cache_iter = pd_ptr_->getPointCloudCacheIter(); !pd_ptr_->isCacheEnd(pc_cache_iter); - pc_cache_iter++) { - auto pc_msg = pc_cache_iter->pointcloud_msg; - auto affine_past2current = - pd_ptr_->getAffineWorldToCurrent() * pc_cache_iter->affine_past2world; - float time_lag = static_cast( - pd_ptr_->getCurrentTimestamp() - rclcpp::Time(pc_msg.header.stamp).seconds()); - - for (sensor_msgs::PointCloud2ConstIterator x_iter(pc_msg, "x"), y_iter(pc_msg, "y"), - z_iter(pc_msg, "z"); - x_iter != x_iter.end(); ++x_iter, ++y_iter, ++z_iter) { - point_past << *x_iter, *y_iter, *z_iter; - point_current = affine_past2current * point_past; - - point[0] = point_current.x(); - point[1] = point_current.y(); - point[2] = point_current.z(); - point[3] = time_lag; - - out_of_range = false; - for (std::size_t di = 0; di < config_.point_dim_size_; di++) { - c = static_cast((point[di] - range_[di]) * recip_voxel_size_[di]); - if (c < 0 || c >= grid_size_[di]) { - out_of_range = true; - break; - } - coord_zyx[config_.point_dim_size_ - di - 1] = c; - } - if (out_of_range) { - continue; - } - - coord_idx = coord_zyx[0] * config_.grid_size_y_ * config_.grid_size_x_ + - coord_zyx[1] * config_.grid_size_x_ + coord_zyx[2]; - voxel_idx = coord_to_voxel_idx[coord_idx]; - if (voxel_idx == -1) { - voxel_idx = voxel_cnt; - if (voxel_cnt >= config_.max_voxel_size_) { - continue; - } - - voxel_cnt++; - coord_to_voxel_idx[coord_idx] = voxel_idx; - for (std::size_t di = 0; di < config_.point_dim_size_; di++) { - coordinates[voxel_idx * config_.point_dim_size_ + di] = coord_zyx[di]; - } - } - - point_cnt = num_points_per_voxel[voxel_idx]; - if (point_cnt < config_.max_point_in_voxel_size_) { - for (std::size_t fi = 0; fi < config_.point_feature_size_; fi++) { - voxels - [voxel_idx * config_.max_point_in_voxel_size_ * config_.point_feature_size_ + - point_cnt * config_.point_feature_size_ + fi] = point[fi]; - } - num_points_per_voxel[voxel_idx]++; - } - } - } - - return voxel_cnt; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp b/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp deleted file mode 100644 index 94639d71e66c0..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/ros_utils.hpp" - -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -using Label = autoware_perception_msgs::msg::ObjectClassification; - -void box3DToDetectedObject( - const Box3D & box3d, const std::vector & class_names, - const bool rename_car_to_truck_and_bus, const bool has_twist, - autoware_perception_msgs::msg::DetectedObject & obj) -{ - // TODO(yukke42): the value of classification confidence of DNN, not probability. - obj.existence_probability = box3d.score; - - // classification - autoware_perception_msgs::msg::ObjectClassification classification; - classification.probability = 1.0f; - if (box3d.label >= 0 && static_cast(box3d.label) < class_names.size()) { - classification.label = getSemanticType(class_names[box3d.label]); - } else { - classification.label = Label::UNKNOWN; - RCLCPP_WARN_STREAM( - rclcpp::get_logger("lidar_centerpoint"), "Unexpected label: UNKNOWN is set."); - } - - float l = box3d.length; - float w = box3d.width; - if (classification.label == Label::CAR && rename_car_to_truck_and_bus) { - // Note: object size is referred from multi_object_tracker - if ((w * l > 2.2 * 5.5) && (w * l <= 2.5 * 7.9)) { - classification.label = Label::TRUCK; - } else if (w * l > 2.5 * 7.9) { - classification.label = Label::BUS; - } - } - - if (isCarLikeVehicleLabel(classification.label)) { - obj.kinematics.orientation_availability = - autoware_perception_msgs::msg::DetectedObjectKinematics::SIGN_UNKNOWN; - } - - obj.classification.emplace_back(classification); - - // pose and shape - // mmdet3d yaw format to ros yaw format - float yaw = -box3d.yaw - autoware::universe_utils::pi / 2; - obj.kinematics.pose_with_covariance.pose.position = - autoware::universe_utils::createPoint(box3d.x, box3d.y, box3d.z); - obj.kinematics.pose_with_covariance.pose.orientation = - autoware::universe_utils::createQuaternionFromYaw(yaw); - obj.shape.type = autoware_perception_msgs::msg::Shape::BOUNDING_BOX; - obj.shape.dimensions = - autoware::universe_utils::createTranslation(box3d.length, box3d.width, box3d.height); - - // twist - if (has_twist) { - float vel_x = box3d.vel_x; - float vel_y = box3d.vel_y; - geometry_msgs::msg::Twist twist; - twist.linear.x = std::sqrt(std::pow(vel_x, 2) + std::pow(vel_y, 2)); - twist.angular.z = 2 * (std::atan2(vel_y, vel_x) - yaw); - obj.kinematics.twist_with_covariance.twist = twist; - obj.kinematics.has_twist = has_twist; - } -} - -uint8_t getSemanticType(const std::string & class_name) -{ - if (class_name == "CAR") { - return Label::CAR; - } else if (class_name == "TRUCK") { - return Label::TRUCK; - } else if (class_name == "BUS") { - return Label::BUS; - } else if (class_name == "TRAILER") { - return Label::TRAILER; - } else if (class_name == "BICYCLE") { - return Label::BICYCLE; - } else if (class_name == "MOTORBIKE") { - return Label::MOTORCYCLE; - } else if (class_name == "PEDESTRIAN") { - return Label::PEDESTRIAN; - } else { - return Label::UNKNOWN; - } -} - -bool isCarLikeVehicleLabel(const uint8_t label) -{ - return label == Label::CAR || label == Label::TRUCK || label == Label::BUS || - label == Label::TRAILER; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/utils.cpp b/perception/lidar_centerpoint_tvm/lib/utils.cpp deleted file mode 100644 index caf9cb84fa1c7..0000000000000 --- a/perception/lidar_centerpoint_tvm/lib/utils.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/utils.hpp" - -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -// cspell: ignore divup -std::size_t divup(const std::size_t a, const std::size_t b) -{ - if (a == 0) { - throw std::runtime_error("A dividend of divup isn't positive."); - } - if (b == 0) { - throw std::runtime_error("A divisor of divup isn't positive."); - } - - return (a + b - 1) / b; -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/package.xml b/perception/lidar_centerpoint_tvm/package.xml deleted file mode 100644 index c71a27e4a1677..0000000000000 --- a/perception/lidar_centerpoint_tvm/package.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - lidar_centerpoint_tvm - 0.1.0 - lidar_centerpoint_tvm - Liu Zhengfei - Xinyu Wang - Apache 2.0 - - ament_cmake_auto - autoware_cmake - - autoware_perception_msgs - autoware_universe_utils - pcl_ros - rclcpp - rclcpp_components - tf2_eigen - tf2_geometry_msgs - tf2_sensor_msgs - tvm_utility - tvm_vendor - - ament_cmake_gtest - ament_lint_auto - autoware_lint_common - - - ament_cmake - - diff --git a/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py b/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py deleted file mode 100755 index 5a1135379615f..0000000000000 --- a/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -# Copyright 2022 TIER IV, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import time - -import open3d as o3d -import rclpy -from rclpy.node import Node - - -def main(args=None): - rclpy.init(args=args) - - node = Node("lidar_centerpoint_visualizer") - node.declare_parameter("pcd_path", rclpy.Parameter.Type.STRING) - node.declare_parameter("detections_path", rclpy.Parameter.Type.STRING) - - pcd_path = node.get_parameter("pcd_path").get_parameter_value().string_value - detections_path = node.get_parameter("detections_path").get_parameter_value().string_value - - while not os.path.exists(pcd_path) and not os.path.exists(detections_path): - time.sleep(1.0) - - if not rclpy.ok(): - rclpy.shutdown() - return - - mesh = o3d.io.read_triangle_mesh(detections_path) - pcd = o3d.io.read_point_cloud(pcd_path) - - mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0]) - - detection_lines = o3d.geometry.LineSet.create_from_triangle_mesh(mesh) - detection_lines.paint_uniform_color([1.0, 0.0, 1.0]) - - o3d.visualization.draw_geometries([mesh_frame, pcd, detection_lines]) - - rclpy.shutdown() - - -if __name__ == "__main__": - main() diff --git a/perception/lidar_centerpoint_tvm/src/node.cpp b/perception/lidar_centerpoint_tvm/src/node.cpp deleted file mode 100644 index 9cf82b1c6bcae..0000000000000 --- a/perception/lidar_centerpoint_tvm/src/node.cpp +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/node.hpp" - -#include -#include -#include -#include -#include - -#ifdef ROS_DISTRO_GALACTIC -#include -#else -#include -#endif - -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -LidarCenterPointTVMNode::LidarCenterPointTVMNode(const rclcpp::NodeOptions & node_options) -: Node("lidar_centerpoint_tvm", node_options), tf_buffer_(this->get_clock()) -{ - const float score_threshold = - static_cast(this->declare_parameter("score_threshold", 0.35)); - const float circle_nms_dist_threshold = - static_cast(this->declare_parameter("circle_nms_dist_threshold", 1.5)); - const float yaw_norm_threshold = - static_cast(this->declare_parameter("yaw_norm_threshold", 0.0)); - const std::string densification_world_frame_id = - this->declare_parameter("densification_world_frame_id", "map"); - const int32_t densification_num_past_frames = - this->declare_parameter("densification_num_past_frames", 1); - - class_names_ = this->declare_parameter>("class_names"); - rename_car_to_truck_and_bus_ = this->declare_parameter("rename_car_to_truck_and_bus", false); - has_twist_ = this->declare_parameter("has_twist", false); - - const std::size_t point_feature_size = - static_cast(this->declare_parameter("point_feature_size")); - const std::size_t max_voxel_size = - static_cast(this->declare_parameter("max_voxel_size")); - const auto point_cloud_range = this->declare_parameter>("point_cloud_range"); - const auto voxel_size = this->declare_parameter>("voxel_size"); - const std::size_t downsample_factor = - static_cast(this->declare_parameter("downsample_factor")); - const std::size_t encoder_in_feature_size = - static_cast(this->declare_parameter("encoder_in_feature_size")); - const auto data_path = this->declare_parameter("data_path"); - - DensificationParam densification_param( - densification_world_frame_id, densification_num_past_frames); - - if (point_cloud_range.size() != 6) { - RCLCPP_WARN_STREAM( - rclcpp::get_logger("lidar_centerpoint"), - "The size of point_cloud_range != 6: use the default parameters."); - } - if (voxel_size.size() != 3) { - RCLCPP_WARN_STREAM( - rclcpp::get_logger("lidar_centerpoint"), - "The size of voxel_size != 3: use the default parameters."); - } - CenterPointConfig config( - class_names_.size(), point_feature_size, max_voxel_size, point_cloud_range, voxel_size, - downsample_factor, encoder_in_feature_size, score_threshold, circle_nms_dist_threshold, - yaw_norm_threshold); - detector_ptr_ = std::make_unique(densification_param, config, data_path); - - pointcloud_sub_ = this->create_subscription( - "~/input/pointcloud", rclcpp::SensorDataQoS{}.keep_last(1), - std::bind(&LidarCenterPointTVMNode::pointCloudCallback, this, std::placeholders::_1)); - objects_pub_ = this->create_publisher( - "~/output/objects", rclcpp::QoS{1}); - - // initialize debug tool - { - using autoware::universe_utils::DebugPublisher; - using autoware::universe_utils::StopWatch; - stop_watch_ptr_ = std::make_unique>(); - debug_publisher_ptr_ = std::make_unique(this, "lidar_centerpoint_tvm"); - stop_watch_ptr_->tic("cyclic_time"); - stop_watch_ptr_->tic("processing_time"); - } -} - -void LidarCenterPointTVMNode::pointCloudCallback( - const sensor_msgs::msg::PointCloud2::ConstSharedPtr input_pointcloud_msg) -{ - const auto objects_sub_count = - objects_pub_->get_subscription_count() + objects_pub_->get_intra_process_subscription_count(); - if (objects_sub_count < 1) { - return; - } - - if (stop_watch_ptr_) { - stop_watch_ptr_->toc("processing_time", true); - } - - std::vector det_boxes3d; - bool is_success = detector_ptr_->detect(*input_pointcloud_msg, tf_buffer_, det_boxes3d); - if (!is_success) { - return; - } - - autoware_perception_msgs::msg::DetectedObjects output_msg; - output_msg.header = input_pointcloud_msg->header; - for (const auto & box3d : det_boxes3d) { - autoware_perception_msgs::msg::DetectedObject obj; - box3DToDetectedObject(box3d, class_names_, rename_car_to_truck_and_bus_, has_twist_, obj); - output_msg.objects.emplace_back(obj); - } - - if (objects_sub_count > 0) { - objects_pub_->publish(output_msg); - } - - // add processing time for debug - if (debug_publisher_ptr_ && stop_watch_ptr_) { - const double cyclic_time_ms = stop_watch_ptr_->toc("cyclic_time", true); - const double processing_time_ms = stop_watch_ptr_->toc("processing_time", true); - debug_publisher_ptr_->publish( - "debug/cyclic_time_ms", cyclic_time_ms); - debug_publisher_ptr_->publish( - "debug/processing_time_ms", processing_time_ms); - } -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#include -RCLCPP_COMPONENTS_REGISTER_NODE( - autoware::perception::lidar_centerpoint_tvm::LidarCenterPointTVMNode) diff --git a/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp b/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp deleted file mode 100644 index 224664e51ea2d..0000000000000 --- a/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2022 TIER IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lidar_centerpoint_tvm/single_inference_node.hpp" - -#include -#include -#include -#include -#include - -#include - -#include -#include -#ifdef ROS_DISTRO_GALACTIC -#include -#else -#include -#endif - -#include -#include -#include - -namespace autoware -{ -namespace perception -{ -namespace lidar_centerpoint_tvm -{ - -SingleInferenceLidarCenterPointNode::SingleInferenceLidarCenterPointNode( - const rclcpp::NodeOptions & node_options) -: Node("lidar_center_point_tvm", node_options), tf_buffer_(this->get_clock()) -{ - const float score_threshold = - static_cast(this->declare_parameter("score_threshold", 0.35)); - const float circle_nms_dist_threshold = - static_cast(this->declare_parameter("circle_nms_dist_threshold", 1.5)); - const float yaw_norm_threshold = - static_cast(this->declare_parameter("yaw_norm_threshold", 0.0)); - const std::string densification_world_frame_id = - this->declare_parameter("densification_world_frame_id", "map"); - const int densification_num_past_frames = - this->declare_parameter("densification_num_past_frames", 0); - - class_names_ = this->declare_parameter>("class_names"); - has_twist_ = this->declare_parameter("has_twist", false); - const std::size_t point_feature_size = - static_cast(this->declare_parameter("point_feature_size")); - const std::size_t max_voxel_size = - static_cast(this->declare_parameter("max_voxel_size")); - const auto point_cloud_range = this->declare_parameter>("point_cloud_range"); - const auto voxel_size = this->declare_parameter>("voxel_size"); - const std::size_t downsample_factor = - static_cast(this->declare_parameter("downsample_factor")); - const std::size_t encoder_in_feature_size = - static_cast(this->declare_parameter("encoder_in_feature_size")); - const auto pcd_path = this->declare_parameter("pcd_path"); - const auto detections_path = this->declare_parameter("detections_path"); - const auto data_path = this->declare_parameter("data_path"); - DensificationParam densification_param( - densification_world_frame_id, densification_num_past_frames); - - if (point_cloud_range.size() != 6) { - RCLCPP_WARN_STREAM( - rclcpp::get_logger("single_inference_lidar_centerpoint"), - "The size of point_cloud_range != 6: use the default parameters."); - } - if (voxel_size.size() != 3) { - RCLCPP_WARN_STREAM( - rclcpp::get_logger("single_inference_lidar_centerpoint"), - "The size of voxel_size != 3: use the default parameters."); - } - CenterPointConfig config( - class_names_.size(), point_feature_size, max_voxel_size, point_cloud_range, voxel_size, - downsample_factor, encoder_in_feature_size, score_threshold, circle_nms_dist_threshold, - yaw_norm_threshold); - detector_ptr_ = std::make_unique(densification_param, config, data_path); - - detect(pcd_path, detections_path); - exit(0); -} - -std::vector SingleInferenceLidarCenterPointNode::getVertices( - const autoware_perception_msgs::msg::Shape & shape, const Eigen::Affine3d & pose) const -{ - std::vector vertices; - Eigen::Vector3d vertex; - - vertex.x() = -shape.dimensions.x / 2.0; - vertex.y() = -shape.dimensions.y / 2.0; - vertex.z() = shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = -shape.dimensions.x / 2.0; - vertex.y() = shape.dimensions.y / 2.0; - vertex.z() = shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = -shape.dimensions.x / 2.0; - vertex.y() = shape.dimensions.y / 2.0; - vertex.z() = -shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = shape.dimensions.x / 2.0; - vertex.y() = shape.dimensions.y / 2.0; - vertex.z() = shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = shape.dimensions.x / 2.0; - vertex.y() = shape.dimensions.y / 2.0; - vertex.z() = -shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = shape.dimensions.x / 2.0; - vertex.y() = -shape.dimensions.y / 2.0; - vertex.z() = shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = shape.dimensions.x / 2.0; - vertex.y() = -shape.dimensions.y / 2.0; - vertex.z() = -shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - vertex.x() = -shape.dimensions.x / 2.0; - vertex.y() = -shape.dimensions.y / 2.0; - vertex.z() = -shape.dimensions.z / 2.0; - vertices.push_back(pose * vertex); - - return vertices; -} - -void SingleInferenceLidarCenterPointNode::detect( - const std::string & pcd_path, const std::string & detections_path) -{ - sensor_msgs::msg::PointCloud2 msg; - pcl::PointCloud::Ptr pc_ptr(new pcl::PointCloud()); - - pcl::io::loadPCDFile(pcd_path, *pc_ptr); - pcl::toROSMsg(*pc_ptr, msg); - msg.header.frame_id = "lidar_frame"; - - std::vector det_boxes3d; - bool is_success = detector_ptr_->detect(msg, tf_buffer_, det_boxes3d); - if (!is_success) { - return; - } - - autoware_perception_msgs::msg::DetectedObjects output_msg; - output_msg.header = msg.header; - for (const auto & box3d : det_boxes3d) { - autoware_perception_msgs::msg::DetectedObject obj; - box3DToDetectedObject(box3d, class_names_, rename_car_to_truck_and_bus_, has_twist_, obj); - output_msg.objects.emplace_back(obj); - } - - dumpDetectionsAsMesh(output_msg, detections_path); - - RCLCPP_INFO( - rclcpp::get_logger("single_inference_lidar_centerpoint_tvm"), - "The detection results were saved as meshes in %s", detections_path.c_str()); -} - -void SingleInferenceLidarCenterPointNode::dumpDetectionsAsMesh( - const autoware_perception_msgs::msg::DetectedObjects & objects_msg, - const std::string & output_path) const -{ - std::ofstream ofs(output_path, std::ofstream::out); - std::stringstream vertices_stream; - std::stringstream faces_stream; - int index = 0; - int num_detections = static_cast(objects_msg.objects.size()); - - ofs << "ply" << std::endl; - ofs << "format ascii 1.0" << std::endl; - ofs << "comment created by lidar_centerpoint" << std::endl; - ofs << "element vertex " << 8 * num_detections << std::endl; - ofs << "property float x" << std::endl; - ofs << "property float y" << std::endl; - ofs << "property float z" << std::endl; - ofs << "element face " << 12 * num_detections << std::endl; - ofs << "property list uchar uint vertex_indices" << std::endl; - ofs << "end_header" << std::endl; - - auto streamFace = [&faces_stream](int v1, int v2, int v3) { - faces_stream << std::to_string(3) << " " << std::to_string(v1) << " " << std::to_string(v2) - << " " << std::to_string(v3) << std::endl; - }; - - for (const auto & object : objects_msg.objects) { - Eigen::Affine3d pose_affine; - tf2::fromMsg(object.kinematics.pose_with_covariance.pose, pose_affine); - - std::vector vertices = getVertices(object.shape, pose_affine); - - for (const auto & vertex : vertices) { - vertices_stream << vertex.x() << " " << vertex.y() << " " << vertex.z() << std::endl; - } - - streamFace(index + 1, index + 3, index + 4); - streamFace(index + 3, index + 5, index + 6); - streamFace(index + 0, index + 7, index + 5); - streamFace(index + 7, index + 2, index + 4); - streamFace(index + 5, index + 3, index + 1); - streamFace(index + 7, index + 0, index + 2); - streamFace(index + 2, index + 1, index + 4); - streamFace(index + 4, index + 3, index + 6); - streamFace(index + 5, index + 7, index + 6); - streamFace(index + 6, index + 7, index + 4); - streamFace(index + 0, index + 5, index + 1); - index += 8; - } - - ofs << vertices_stream.str(); - ofs << faces_stream.str(); - - ofs.close(); -} - -} // namespace lidar_centerpoint_tvm -} // namespace perception -} // namespace autoware - -#include -RCLCPP_COMPONENTS_REGISTER_NODE( - autoware::perception::lidar_centerpoint_tvm::SingleInferenceLidarCenterPointNode) From 6a8989890682a9e12e1fa5a48af2ea63a1ac899e Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 06:50:18 +0000 Subject: [PATCH 5/8] chore(ci): fix workflow conditions Signed-off-by: Kotaro Yoshimoto --- .github/workflows/cppcheck-differential.yaml | 22 +++++++++----------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/.github/workflows/cppcheck-differential.yaml b/.github/workflows/cppcheck-differential.yaml index 59254d85d3367..f837756d2dbc2 100644 --- a/.github/workflows/cppcheck-differential.yaml +++ b/.github/workflows/cppcheck-differential.yaml @@ -31,35 +31,33 @@ jobs: run: git fetch origin ${{ github.base_ref }} shell: bash - - name: Get modified files - id: get-modified-files + - name: Get changed files + id: get-changed-files run: | - echo "changed_files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | tr '\n' ' ' || true)" >> $GITHUB_OUTPUT + echo "changed-files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | tr '\n' ' ' || true)" >> $GITHUB_OUTPUT shell: bash + - name: Run Cppcheck on changed files + if: ${{ steps.get-changed-files.outputs.changed-files != '' }} continue-on-error: true id: cppcheck run: | - files=$(cat changed_files.txt | grep -E '\.(cpp|hpp)$' || true) - if [ -n "$files" ]; then - echo "Running Cppcheck on changed files: ${{ steps.get-modified-files.outputs.changed_files }}" - cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions ${{ steps.get-modified-files.outputs.changed_files }} 2> cppcheck-report.txt - else - echo "No C++ files changed." - touch cppcheck-report.txt - fi + echo "Running Cppcheck on changed files: ${{ steps.get-changed-files.outputs.changed-files }}" + cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions ${{ steps.get-changed-files.outputs.changed-files }} 2> cppcheck-report.txt shell: bash - name: Show cppcheck-report result + if: ${{ steps.get-changed-files.outputs.changed-files != '' }} run: | cat cppcheck-report.txt - name: Upload Cppcheck report + if: ${{ steps.get-changed-files.outputs.changed-files != '' }} uses: actions/upload-artifact@v2 with: name: cppcheck-report path: cppcheck-report.txt - name: Fail the job if Cppcheck failed - if: ${{ steps.get-modified-files.outputs.changed_files != '' && steps.cppcheck.outcome == 'failure' }} + if: ${{ steps.get-changed-files.outputs.changed-files != '' && steps.cppcheck.outcome == 'failure' }} run: exit 1 From 63d4da2c2b23bfed138ebd464e54985ed9f8ceef Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 07:01:26 +0000 Subject: [PATCH 6/8] chore(ci): filter un-existing files in cppcheck-differential Signed-off-by: Kotaro Yoshimoto --- .github/workflows/cppcheck-differential.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cppcheck-differential.yaml b/.github/workflows/cppcheck-differential.yaml index f837756d2dbc2..af5f844ad535e 100644 --- a/.github/workflows/cppcheck-differential.yaml +++ b/.github/workflows/cppcheck-differential.yaml @@ -31,10 +31,10 @@ jobs: run: git fetch origin ${{ github.base_ref }} shell: bash - - name: Get changed files + - name: Get changed files (existing files only) id: get-changed-files run: | - echo "changed-files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | tr '\n' ' ' || true)" >> $GITHUB_OUTPUT + echo "changed-files=$(git diff --name-only "origin/${{ github.base_ref }}"...HEAD | grep -E '\.(cpp|hpp)$' | while read -r file; do [ -e "$file" ] && echo -n "$file "; done)" >> $GITHUB_OUTPUT shell: bash - name: Run Cppcheck on changed files From 981afbd553f5e4a06370f65351f95e59bc3d11a2 Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 07:06:36 +0000 Subject: [PATCH 7/8] Revert "chore: delete lidar_centerpoint_tvm package for test" This reverts commit 3b8bcb3d1f253cf1ed85928f771b2aa837b41414. --- perception/lidar_centerpoint_tvm/.gitignore | 0 .../lidar_centerpoint_tvm/CMakeLists.txt | 100 +++++++ perception/lidar_centerpoint_tvm/README.md | 77 ++++++ .../config/centerpoint.param.yaml | 10 + .../inference_engine_tvm_config.hpp | 61 +++++ ...processing_inference_engine_tvm_config.hpp | 60 +++++ .../inference_engine_tvm_config.hpp | 56 ++++ .../centerpoint_config.hpp | 126 +++++++++ .../lidar_centerpoint_tvm/centerpoint_tvm.hpp | 186 +++++++++++++ .../include/lidar_centerpoint_tvm/node.hpp | 71 +++++ .../postprocess/circle_nms.hpp | 37 +++ .../postprocess/generate_detected_boxes.hpp | 39 +++ .../preprocess/generate_features.hpp | 38 +++ .../preprocess/pointcloud_densification.hpp | 94 +++++++ .../preprocess/voxel_generator.hpp | 77 ++++++ .../lidar_centerpoint_tvm/ros_utils.hpp | 47 ++++ .../single_inference_node.hpp | 66 +++++ .../include/lidar_centerpoint_tvm/utils.hpp | 50 ++++ .../visibility_control.hpp | 36 +++ .../launch/lidar_centerpoint_tvm.launch.xml | 26 ++ ...inference_lidar_centerpoint_tvm.launch.xml | 33 +++ .../lib/centerpoint_tvm.cpp | 246 ++++++++++++++++++ .../lib/postprocess/circle_nms.cpp | 65 +++++ .../postprocess/generate_detected_boxes.cpp | 176 +++++++++++++ .../lib/preprocess/generate_features.cpp | 131 ++++++++++ .../preprocess/pointcloud_densification.cpp | 115 ++++++++ .../lib/preprocess/voxel_generator.cpp | 138 ++++++++++ .../lidar_centerpoint_tvm/lib/ros_utils.cpp | 118 +++++++++ .../lidar_centerpoint_tvm/lib/utils.cpp | 41 +++ perception/lidar_centerpoint_tvm/package.xml | 32 +++ .../scripts/lidar_centerpoint_visualizer.py | 55 ++++ perception/lidar_centerpoint_tvm/src/node.cpp | 154 +++++++++++ .../src/single_inference_node.cpp | 239 +++++++++++++++++ 33 files changed, 2800 insertions(+) create mode 100644 perception/lidar_centerpoint_tvm/.gitignore create mode 100644 perception/lidar_centerpoint_tvm/CMakeLists.txt create mode 100644 perception/lidar_centerpoint_tvm/README.md create mode 100644 perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml create mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp create mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp create mode 100644 perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp create mode 100644 perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp create mode 100644 perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml create mode 100644 perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml create mode 100644 perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/ros_utils.cpp create mode 100644 perception/lidar_centerpoint_tvm/lib/utils.cpp create mode 100644 perception/lidar_centerpoint_tvm/package.xml create mode 100755 perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py create mode 100644 perception/lidar_centerpoint_tvm/src/node.cpp create mode 100644 perception/lidar_centerpoint_tvm/src/single_inference_node.cpp diff --git a/perception/lidar_centerpoint_tvm/.gitignore b/perception/lidar_centerpoint_tvm/.gitignore new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/perception/lidar_centerpoint_tvm/CMakeLists.txt b/perception/lidar_centerpoint_tvm/CMakeLists.txt new file mode 100644 index 0000000000000..e9da98d4ae3c4 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/CMakeLists.txt @@ -0,0 +1,100 @@ +cmake_minimum_required(VERSION 3.14) +project(lidar_centerpoint_tvm) + +find_package(autoware_cmake REQUIRED) +autoware_package() + +set(tvm_runtime_DIR ${tvm_vendor_DIR}) +find_package(tvm_runtime CONFIG REQUIRED) + +# Gather neural network information. +set(${PROJECT_NAME}_BACKEND llvm CACHE STRING "${PROJECT_NAME} neural network backend") +set(MODEL_NAME_ENCODER centerpoint_encoder) + +# Get neural network. +set(NN_DEPENDENCY_ENCODER "") +get_neural_network(${MODEL_NAME_ENCODER} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_ENCODER) + +set(MODEL_NAME_BACKBONE centerpoint_backbone) + +# Get neural network. +set(NN_DEPENDENCY_BACKBONE "") +get_neural_network(${MODEL_NAME_BACKBONE} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_BACKBONE) + +if((NOT NN_DEPENDENCY_ENCODER STREQUAL "") AND (NOT NN_DEPENDENCY_BACKBONE STREQUAL "")) + ## centerpoint_tvm ## + ament_auto_add_library(${PROJECT_NAME} SHARED + data/models/${MODEL_NAME_ENCODER}/inference_engine_tvm_config.hpp + data/models/${MODEL_NAME_BACKBONE}/inference_engine_tvm_config.hpp + data/models/${MODEL_NAME_BACKBONE}/preprocessing_inference_engine_tvm_config.hpp + lib/centerpoint_tvm.cpp + lib/utils.cpp + lib/ros_utils.cpp + lib/preprocess/pointcloud_densification.cpp + lib/preprocess/voxel_generator.cpp + lib/preprocess/generate_features.cpp + lib/postprocess/circle_nms.cpp + lib/postprocess/generate_detected_boxes.cpp + ) + + add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_ENCODER}) + add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_BACKBONE}) + + target_compile_options(${PROJECT_NAME} PRIVATE "-Wno-sign-conversion" "-Wno-conversion") + + target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC + "${tvm_vendor_INCLUDE_DIRS}" + ) + + target_link_libraries(${PROJECT_NAME} + ${tvm_runtime_LIBRARIES} + ) + + target_include_directories(${PROJECT_NAME} PRIVATE + data/models + ) + + ## node ## + ament_auto_add_library(lidar_centerpoint_tvm_component SHARED + src/node.cpp + ) + + target_link_libraries(lidar_centerpoint_tvm_component + ${PROJECT_NAME} + ) + + rclcpp_components_register_node(lidar_centerpoint_tvm_component + PLUGIN "autoware::perception::lidar_centerpoint_tvm::LidarCenterPointTVMNode" + EXECUTABLE lidar_centerpoint_tvm_node + ) + + ## single inference node ## + ament_auto_add_library(single_inference_lidar_centerpoint_tvm_component SHARED + src/single_inference_node.cpp + ) + + target_link_libraries(single_inference_lidar_centerpoint_tvm_component + ${tvm_runtime_LIBRARIES} + ) + + + rclcpp_components_register_node(single_inference_lidar_centerpoint_tvm_component + PLUGIN "autoware::perception::lidar_centerpoint_tvm::SingleInferenceLidarCenterPointNode" + EXECUTABLE single_inference_lidar_centerpoint_tvm_node + ) + + install(PROGRAMS + scripts/lidar_centerpoint_visualizer.py + DESTINATION lib/${PROJECT_NAME} + ) + + ament_export_dependencies(ament_cmake_python) + + ament_auto_package(INSTALL_TO_SHARE + launch + config + ) +else() + message(WARNING "Neural network not found, skipping package.") + ament_auto_package() +endif() diff --git a/perception/lidar_centerpoint_tvm/README.md b/perception/lidar_centerpoint_tvm/README.md new file mode 100644 index 0000000000000..26d588d4483ad --- /dev/null +++ b/perception/lidar_centerpoint_tvm/README.md @@ -0,0 +1,77 @@ +# lidar_centerpoint_tvm + +## Design + +### Usage + +lidar_centerpoint_tvm is a package for detecting dynamic 3D objects using TVM compiled centerpoint module for different backends. To use this package, replace `lidar_centerpoint` with `lidar_centerpoint_tvm` in perception launch files(for example, `lidar_based_detection.launch.xml` is lidar based detection is chosen.). + +#### Neural network + +This package will not build without a neural network for its inference. +The network is provided by the `tvm_utility` package. +See its design page for more information on how to enable downloading pre-compiled networks (by setting the `DOWNLOAD_ARTIFACTS` cmake variable), or how to handle user-compiled networks. + +#### Backend + +The backend used for the inference can be selected by setting the `lidar_centerpoint_tvm_BACKEND` cmake variable. +The current available options are `llvm` for a CPU backend, and `vulkan` or `opencl` for a GPU backend. +It defaults to `llvm`. + +### Inputs / Outputs + +### Input + +| Name | Type | Description | +| -------------------- | ------------------------------- | ---------------- | +| `~/input/pointcloud` | `sensor_msgs::msg::PointCloud2` | input pointcloud | + +### Output + +| Name | Type | Description | +| -------------------------- | ------------------------------------------------ | -------------------- | +| `~/output/objects` | `autoware_perception_msgs::msg::DetectedObjects` | detected objects | +| `debug/cyclic_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | cyclic time (msg) | +| `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) | + +## Parameters + +### Core Parameters + +| Name | Type | Default Value | Description | +| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- | +| `score_threshold` | float | `0.1` | detected objects with score less than threshold are ignored | +| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud | +| `densification_num_past_frames` | int | `1` | the number of past frames to fuse with the current frame | + +### Bounding Box + +The lidar segmentation node establishes a bounding box for the detected obstacles. +The `L-fit` method of fitting a bounding box to a cluster is used for that. + +### Limitation and Known Issue + +Due to an accuracy issue of `centerpoint` model, `vulkan` cannot be used at the moment. +As for 'llvm' backend, real-time performance cannot be achieved. + +### Scatter Implementation + +Scatter function can be implemented using either TVMScript or C++. For C++ implementation, please refer to + +## Reference + +[1] Yin, Tianwei, Xingyi Zhou, and Philipp Krähenbühl. "Center-based 3d object detection and tracking." arXiv preprint arXiv:2006.11275 (2020). + +[2] Lang, Alex H., et al. "PointPillars: Fast encoders for object detection from point clouds." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019. + +[3] + +[4] + +[5] + +## Related issues + + + +- #908: Run Lidar Centerpoint with TVM diff --git a/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml b/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml new file mode 100644 index 0000000000000..34dce71975985 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml @@ -0,0 +1,10 @@ +/**: + ros__parameters: + class_names: ["CAR", "PEDESTRIAN", "BICYCLE"] + rename_car_to_truck_and_bus: true + point_feature_size: 4 + max_voxel_size: 40000 + point_cloud_range: [-89.6, -89.6, -3.0, 89.6, 89.6, 5.0] + voxel_size: [0.32, 0.32, 8.0] + downsample_factor: 1 + encoder_in_feature_size: 9 diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..5d832027c7591 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/inference_engine_tvm_config.hpp @@ -0,0 +1,61 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT + +namespace model_zoo +{ +namespace perception +{ +namespace lidar_obstacle_detection +{ +namespace centerpoint_backbone +{ +namespace onnx_centerpoint_backbone +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {3, 0, 0}, // modelzoo_version + + "centerpoint_backbone", // network_name + "llvm", // network_backend + + "./deploy_lib.so", // network_module_path + "./deploy_graph.json", // network_graph_path + "./deploy_param.params", // network_params_path + + // cspell: ignore DLCPU + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}}, // network_inputs + + {{"heatmap", kDLFloat, 32, 1, {1, 3, 560, 560}}, + {"reg", kDLFloat, 32, 1, {1, 2, 560, 560}}, + {"height", kDLFloat, 32, 1, {1, 1, 560, 560}}, + {"dim", kDLFloat, 32, 1, {1, 3, 560, 560}}, + {"rot", kDLFloat, 32, 1, {1, 2, 560, 560}}, + {"vel", kDLFloat, 32, 1, {1, 2, 560, 560}}} // network_outputs +}; + +} // namespace onnx_centerpoint_backbone +} // namespace centerpoint_backbone +} // namespace lidar_obstacle_detection +} // namespace perception +} // namespace model_zoo +// NOLINTNEXTLINE +#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..0cae27b49572c --- /dev/null +++ b/perception/lidar_centerpoint_tvm/data/models/centerpoint_backbone/preprocessing_inference_engine_tvm_config.hpp @@ -0,0 +1,60 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT + +namespace model_zoo +{ +namespace perception +{ +namespace lidar_obstacle_detection +{ +namespace centerpoint_backbone +{ +namespace onnx_centerpoint_backbone +{ +namespace preprocessing +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {3, 0, 0}, // modelzoo_version + + "centerpoint_backbone", // network_name + "llvm", // network_backend + + "./preprocess.so", // network_module_path + "./", // network_graph_path + "./", // network_params_path + + // cspell: ignore DLCPU + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}}, + {"coords", kDLInt, 32, 1, {40000, 3}}}, // network_inputs + + {{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}} // network_outputs +}; + +} // namespace preprocessing +} // namespace onnx_centerpoint_backbone +} // namespace centerpoint_backbone +} // namespace lidar_obstacle_detection +} // namespace perception +} // namespace model_zoo +// NOLINTNEXTLINE +#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp b/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..3e423dbe99877 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/data/models/centerpoint_encoder/inference_engine_tvm_config.hpp @@ -0,0 +1,56 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT + +namespace model_zoo +{ +namespace perception +{ +namespace lidar_obstacle_detection +{ +namespace centerpoint_encoder +{ +namespace onnx_centerpoint_encoder +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {3, 0, 0}, // modelzoo_version + + "centerpoint_encoder", // network_name + "llvm", // network_backend + + "./deploy_lib.so", // network_module_path + "./deploy_graph.json", // network_graph_path + "./deploy_param.params", // network_params_path + + // cspell: ignore DLCPU + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"input_features", kDLFloat, 32, 1, {40000, 32, 9}}}, // network_inputs + + {{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}}} // network_outputs +}; + +} // namespace onnx_centerpoint_encoder +} // namespace centerpoint_encoder +} // namespace lidar_obstacle_detection +} // namespace perception +} // namespace model_zoo +// NOLINTNEXTLINE +#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp new file mode 100644 index 0000000000000..5825decbb78d6 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_config.hpp @@ -0,0 +1,126 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ +#define LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +class CenterPointConfig +{ +public: + explicit CenterPointConfig( + const std::size_t class_size, const float point_feature_size, const std::size_t max_voxel_size, + const std::vector & point_cloud_range, const std::vector & voxel_size, + const std::size_t downsample_factor, const std::size_t encoder_in_feature_size, + const float score_threshold, const float circle_nms_dist_threshold, + const float yaw_norm_threshold) + { + class_size_ = class_size; + point_feature_size_ = point_feature_size; + max_voxel_size_ = max_voxel_size; + if (point_cloud_range.size() == 6) { + range_min_x_ = static_cast(point_cloud_range[0]); + range_min_y_ = static_cast(point_cloud_range[1]); + range_min_z_ = static_cast(point_cloud_range[2]); + range_max_x_ = static_cast(point_cloud_range[3]); + range_max_y_ = static_cast(point_cloud_range[4]); + range_max_z_ = static_cast(point_cloud_range[5]); + } + if (voxel_size.size() == 3) { + voxel_size_x_ = static_cast(voxel_size[0]); + voxel_size_y_ = static_cast(voxel_size[1]); + voxel_size_z_ = static_cast(voxel_size[2]); + } + downsample_factor_ = downsample_factor; + encoder_in_feature_size_ = encoder_in_feature_size; + + if (score_threshold > 0 && score_threshold < 1) { + score_threshold_ = score_threshold; + } + + if (circle_nms_dist_threshold > 0) { + circle_nms_dist_threshold_ = circle_nms_dist_threshold; + } + + if (yaw_norm_threshold >= 0 && yaw_norm_threshold < 1) { + yaw_norm_threshold_ = yaw_norm_threshold; + } + + grid_size_x_ = static_cast((range_max_x_ - range_min_x_) / voxel_size_x_); + grid_size_y_ = static_cast((range_max_y_ - range_min_y_) / voxel_size_y_); + grid_size_z_ = static_cast((range_max_z_ - range_min_z_) / voxel_size_z_); + offset_x_ = range_min_x_ + voxel_size_x_ / 2; + offset_y_ = range_min_y_ + voxel_size_y_ / 2; + offset_z_ = range_min_z_ + voxel_size_z_ / 2; + down_grid_size_x_ = grid_size_x_ / downsample_factor_; + down_grid_size_y_ = grid_size_y_ / downsample_factor_; + }; + + // input params + std::size_t class_size_{3}; + const std::size_t point_dim_size_{3}; // x, y and z + std::size_t point_feature_size_{4}; // x, y, z and time lag + std::size_t max_point_in_voxel_size_{32}; + std::size_t max_voxel_size_{40000}; + float range_min_x_{-89.6f}; + float range_min_y_{-89.6f}; + float range_min_z_{-3.0f}; + float range_max_x_{89.6f}; + float range_max_y_{89.6f}; + float range_max_z_{5.0f}; + float voxel_size_x_{0.32f}; + float voxel_size_y_{0.32f}; + float voxel_size_z_{8.0f}; + + // network params + const std::size_t batch_size_{1}; + std::size_t downsample_factor_{2}; + std::size_t encoder_in_feature_size_{9}; + const std::size_t encoder_out_feature_size_{32}; + const std::size_t head_out_size_{6}; + const std::size_t head_out_offset_size_{2}; + const std::size_t head_out_z_size_{1}; + const std::size_t head_out_dim_size_{3}; + const std::size_t head_out_rot_size_{2}; + const std::size_t head_out_vel_size_{2}; + + // post-process params + float score_threshold_{0.35f}; + float circle_nms_dist_threshold_{1.5f}; + float yaw_norm_threshold_{0.0f}; + + // calculated params + std::size_t grid_size_x_ = (range_max_x_ - range_min_x_) / voxel_size_x_; + std::size_t grid_size_y_ = (range_max_y_ - range_min_y_) / voxel_size_y_; + std::size_t grid_size_z_ = (range_max_z_ - range_min_z_) / voxel_size_z_; + float offset_x_ = range_min_x_ + voxel_size_x_ / 2; + float offset_y_ = range_min_y_ + voxel_size_y_ / 2; + float offset_z_ = range_min_z_ + voxel_size_z_ / 2; + std::size_t down_grid_size_x_ = grid_size_x_ / downsample_factor_; + std::size_t down_grid_size_y_ = grid_size_y_ / downsample_factor_; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__CENTERPOINT_CONFIG_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp new file mode 100644 index 0000000000000..865ca7d4253bf --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/centerpoint_tvm.hpp @@ -0,0 +1,186 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc., Arm Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ +#define LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +using tvm_utility::pipeline::TVMArrayContainer; +using tvm_utility::pipeline::TVMArrayContainerVector; + +struct MixedInputs +{ + // The number of non-empty voxel + std::size_t num_voxels; + // The voxel features (point info in each voxel) or pillar features. + std::vector features; + // The number of points in each voxel. + std::vector num_points_per_voxel; + // The index from voxel number to its 3D position + std::vector coords; +}; + +class LIDAR_CENTERPOINT_TVM_LOCAL TVMScatterIE : public tvm_utility::pipeline::InferenceEngine +{ +public: + explicit TVMScatterIE( + tvm_utility::pipeline::InferenceEngineTVMConfig config, const std::string & pkg_name, + const std::string & data_path, const std::string & function_name); + TVMArrayContainerVector schedule(const TVMArrayContainerVector & input); + void set_coords(TVMArrayContainer coords) { coords_ = coords; } + +private: + tvm_utility::pipeline::InferenceEngineTVMConfig config_; + TVMArrayContainer coords_; + TVMArrayContainerVector output_; + tvm::runtime::PackedFunc scatter_function; +}; + +class LIDAR_CENTERPOINT_TVM_LOCAL VoxelEncoderPreProcessor +: public tvm_utility::pipeline::PreProcessor +{ +public: + /// \brief Constructor. + /// \param[in] config The TVM configuration. + /// \param[in] config_mod The centerpoint model configuration. + explicit VoxelEncoderPreProcessor( + const tvm_utility::pipeline::InferenceEngineTVMConfig & config, + const CenterPointConfig & config_mod); + + /// \brief Convert the voxel_features to encoder_in_features (a TVM array). + /// \param[in] voxel_inputs The voxel features related input + /// \return A TVM array containing the encoder_in_features. + /// \throw std::runtime_error If the features are incorrectly configured. + TVMArrayContainerVector schedule(const MixedInputs & voxel_inputs); + +private: + const int64_t max_voxel_size; + const int64_t max_point_in_voxel_size; + const int64_t encoder_in_feature_size; + const int64_t input_datatype_bytes; + const CenterPointConfig config_detail; + std::vector encoder_in_features; + TVMArrayContainer output; +}; + +class LIDAR_CENTERPOINT_TVM_LOCAL BackboneNeckHeadPostProcessor +: public tvm_utility::pipeline::PostProcessor> +{ +public: + /// \brief Constructor. + /// \param[in] config The TVM configuration. + /// \param[in] config_mod The centerpoint model configuration. + explicit BackboneNeckHeadPostProcessor( + const tvm_utility::pipeline::InferenceEngineTVMConfig & config, + const CenterPointConfig & config_mod); + + /// \brief Copy the inference result. + /// \param[in] input The result of the inference engine. + /// \return The inferred data. + std::vector schedule(const TVMArrayContainerVector & input); + +private: + const int64_t output_datatype_bytes; + const CenterPointConfig config_detail; + std::vector head_out_heatmap; + std::vector head_out_offset; + std::vector head_out_z; + std::vector head_out_dim; + std::vector head_out_rot; + std::vector head_out_vel; +}; + +class LIDAR_CENTERPOINT_TVM_PUBLIC CenterPointTVM +{ +public: + /// \brief Constructor + /// \param[in] dense_param The densification parameter used to constructing vg_ptr. + /// \param[in] config The CenterPoint model configuration. + explicit CenterPointTVM( + const DensificationParam & densification_param, const CenterPointConfig & config, + const std::string & data_path); + + ~CenterPointTVM(); + + bool detect( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer, + std::vector & det_boxes3d); + +protected: + void initPtr(); + + virtual bool preprocess( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); + + using VE_PrePT = VoxelEncoderPreProcessor; + using IET = tvm_utility::pipeline::InferenceEngineTVM; + using BNH_PostPT = BackboneNeckHeadPostProcessor; + using TSE = TVMScatterIE; + using TSP = tvm_utility::pipeline::TowStagePipeline; + + tvm_utility::pipeline::InferenceEngineTVMConfig config_ve; + tvm_utility::pipeline::InferenceEngineTVMConfig config_bnh; + + // Voxel Encoder Pipeline. + std::shared_ptr VE_PreP; + std::shared_ptr VE_IE; + + // Backbone Neck Head Pipeline. + std::shared_ptr BNH_IE; + std::shared_ptr BNH_PostP; + + std::shared_ptr scatter_ie; + std::shared_ptr> + TSP_pipeline; + + // Variables + std::unique_ptr vg_ptr_{nullptr}; + + CenterPointConfig config_; + std::size_t num_voxels_{0}; + std::shared_ptr> voxels_; + std::shared_ptr> coordinates_; + std::shared_ptr> num_points_per_voxel_; + TVMArrayContainer coords_tvm_; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__CENTERPOINT_TVM_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp new file mode 100644 index 0000000000000..88a43139908a7 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/node.hpp @@ -0,0 +1,71 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__NODE_HPP_ +#define LIDAR_CENTERPOINT_TVM__NODE_HPP_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +class LIDAR_CENTERPOINT_TVM_PUBLIC LidarCenterPointTVMNode : public rclcpp::Node +{ +public: + explicit LidarCenterPointTVMNode(const rclcpp::NodeOptions & node_options); + +private: + void pointCloudCallback(const sensor_msgs::msg::PointCloud2::ConstSharedPtr input_pointcloud_msg); + + tf2_ros::Buffer tf_buffer_; + tf2_ros::TransformListener tf_listener_{tf_buffer_}; + + rclcpp::Subscription::SharedPtr pointcloud_sub_; + rclcpp::Publisher::SharedPtr objects_pub_; + + float score_threshold_{0.0}; + std::vector class_names_; + bool rename_car_to_truck_and_bus_{false}; + bool has_twist_{false}; + + std::unique_ptr detector_ptr_{nullptr}; + + // debugger + std::unique_ptr> stop_watch_ptr_{ + nullptr}; + std::unique_ptr debug_publisher_ptr_{nullptr}; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__NODE_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp new file mode 100644 index 0000000000000..3a0847f00bd21 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/circle_nms.hpp @@ -0,0 +1,37 @@ +// Copyright 2022 AutoCore Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ +#define LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ + +#include + +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +// Non-maximum suppression (NMS) uses the distance on the xy plane instead of +// intersection over union (IoU) to suppress overlapped objects. +std::size_t circleNMS( + std::vector & boxes3d, const float dist_thresh, std::vector & keep_mask); + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__POSTPROCESS__CIRCLE_NMS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp new file mode 100644 index 0000000000000..ba91addc5e02e --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp @@ -0,0 +1,39 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ +#define LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ + +#include +#include + +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +void generateDetectedBoxes3D( + const std::vector & out_heatmap, const std::vector & out_offset, + const std::vector & out_z, const std::vector & out_dim, + const std::vector & out_rot, const std::vector & out_vel, + const CenterPointConfig & config, std::vector & det_boxes3d); + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__POSTPROCESS__GENERATE_DETECTED_BOXES_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp new file mode 100644 index 0000000000000..d799dc0bfac57 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/generate_features.hpp @@ -0,0 +1,38 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ +#define LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ + +#include + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +void generateFeatures( + const std::vector & voxel_features, const std::vector & voxel_num_points, + const std::vector & coords, const std::size_t num_voxels, + const CenterPointConfig & config, std::vector & features); + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__GENERATE_FEATURES_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp new file mode 100644 index 0000000000000..70ec420666a3f --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp @@ -0,0 +1,94 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ +#define LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ + +#include + +#include +#include +#ifdef ROS_DISTRO_GALACTIC +#include +#else +#include +#endif + +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +class LIDAR_CENTERPOINT_TVM_LOCAL DensificationParam +{ +public: + DensificationParam(const std::string & world_frame_id, const uint32_t num_past_frames) + : world_frame_id_(std::move(world_frame_id)), + pointcloud_cache_size_(num_past_frames + /*current frame*/ 1) + { + } + + std::string world_frame_id() const { return world_frame_id_; } + uint32_t pointcloud_cache_size() const { return pointcloud_cache_size_; } + +private: + std::string world_frame_id_; + uint32_t pointcloud_cache_size_{1}; +}; + +struct PointCloudWithTransform +{ + sensor_msgs::msg::PointCloud2 pointcloud_msg; + Eigen::Affine3f affine_past2world; +}; + +class LIDAR_CENTERPOINT_TVM_LOCAL PointCloudDensification +{ +public: + explicit PointCloudDensification(const DensificationParam & param); + + bool enqueuePointCloud( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); + + double getCurrentTimestamp() const { return current_timestamp_; } + Eigen::Affine3f getAffineWorldToCurrent() const { return affine_world2current_; } + std::list::iterator getPointCloudCacheIter() + { + return pointcloud_cache_.begin(); + } + bool isCacheEnd(std::list::iterator iter) + { + return iter == pointcloud_cache_.end(); + } + +private: + void enqueue(const sensor_msgs::msg::PointCloud2 & msg, const Eigen::Affine3f & affine); + void dequeue(); + + DensificationParam param_; + double current_timestamp_{0.0}; + Eigen::Affine3f affine_world2current_; + std::list pointcloud_cache_; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__POINTCLOUD_DENSIFICATION_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp new file mode 100644 index 0000000000000..7ef49841c680b --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/preprocess/voxel_generator.hpp @@ -0,0 +1,77 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ +#define LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ + +#include +#include +#include + +#include + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +class LIDAR_CENTERPOINT_TVM_LOCAL VoxelGeneratorTemplate +{ +public: + explicit VoxelGeneratorTemplate( + const DensificationParam & param, const CenterPointConfig & config); + + virtual std::size_t pointsToVoxels( + std::vector & voxels, std::vector & coordinates, + std::vector & num_points_per_voxel) = 0; + + bool enqueuePointCloud( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer); + +protected: + std::unique_ptr pd_ptr_{nullptr}; + + CenterPointConfig config_; + std::array range_; + std::array grid_size_; + std::array recip_voxel_size_; +}; + +class LIDAR_CENTERPOINT_TVM_LOCAL VoxelGenerator : public VoxelGeneratorTemplate +{ +public: + using VoxelGeneratorTemplate::VoxelGeneratorTemplate; + + /** + * @brief Traverse all the lidar points and put each point into the corresponding voxel. + * + * @param voxels To store point info in each voxel + * @param coordinates To store the index from voxel number to its 3D position + * @param num_points_per_voxel To store the number of points in each voxel + * @return The number of non-empty voxel + */ + std::size_t pointsToVoxels( + std::vector & voxels, std::vector & coordinates, + std::vector & num_points_per_voxel) override; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__PREPROCESS__VOXEL_GENERATOR_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp new file mode 100644 index 0000000000000..a33430a1b09ba --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/ros_utils.hpp @@ -0,0 +1,47 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ +#define LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ + +#include + +#include +#include +#include +#include + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +void box3DToDetectedObject( + const Box3D & box3d, const std::vector & class_names, + const bool rename_car_to_truck_and_bus, const bool has_twist, + autoware_perception_msgs::msg::DetectedObject & obj); + +uint8_t getSemanticType(const std::string & class_name); + +bool isCarLikeVehicleLabel(const uint8_t label); + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__ROS_UTILS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp new file mode 100644 index 0000000000000..0a4fcb27ac202 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/single_inference_node.hpp @@ -0,0 +1,66 @@ +// Copyright 2022 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ +#define LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +class SingleInferenceLidarCenterPointNode : public rclcpp::Node +{ +public: + explicit SingleInferenceLidarCenterPointNode(const rclcpp::NodeOptions & node_options); + +private: + void detect(const std::string & pcd_path, const std::string & detections_path); + std::vector getVertices( + const autoware_perception_msgs::msg::Shape & shape, const Eigen::Affine3d & pose) const; + void dumpDetectionsAsMesh( + const autoware_perception_msgs::msg::DetectedObjects & objects_msg, + const std::string & output_path) const; + + tf2_ros::Buffer tf_buffer_; + tf2_ros::TransformListener tf_listener_{tf_buffer_}; + + float score_threshold_{0.0}; + std::vector class_names_; + bool rename_car_to_truck_and_bus_{false}; + bool has_twist_{false}; + + std::unique_ptr detector_ptr_{nullptr}; +}; + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__SINGLE_INFERENCE_NODE_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp new file mode 100644 index 0000000000000..259deef53f189 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/utils.hpp @@ -0,0 +1,50 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__UTILS_HPP_ +#define LIDAR_CENTERPOINT_TVM__UTILS_HPP_ + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ +struct Box3D +{ + // initializer not allowed for __shared__ variable + int32_t label; + float score; + float x; + float y; + float z; + float length; + float width; + float height; + float yaw; + float vel_x; + float vel_y; +}; + +// cspell: ignore divup +std::size_t divup(const std::size_t a, const std::size_t b); + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#endif // LIDAR_CENTERPOINT_TVM__UTILS_HPP_ diff --git a/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp new file mode 100644 index 0000000000000..a4afc54afc5fb --- /dev/null +++ b/perception/lidar_centerpoint_tvm/include/lidar_centerpoint_tvm/visibility_control.hpp @@ -0,0 +1,36 @@ +// Copyright 2021-2022 Arm Ltd., AutoCore Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ +#define LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ + +#if defined(__WIN32) +#if defined(LIDAR_CENTERPOINT_TVM_BUILDING_DLL) || defined(LIDAR_CENTERPOINT_TVM_EXPORTS) +#define LIDAR_CENTERPOINT_TVM_PUBLIC __declspec(dllexport) +#define LIDAR_CENTERPOINT_TVM__LOCAL +#else +#define LIDAR_CENTERPOINT_TVM_PUBLIC __declspec(dllimport) +#define LIDAR_CENTERPOINT_TVM__LOCAL +#endif +#elif defined(__linux__) +#define LIDAR_CENTERPOINT_TVM_PUBLIC __attribute__((visibility("default"))) +#define LIDAR_CENTERPOINT_TVM_LOCAL __attribute__((visibility("hidden"))) +#elif defined(__APPLE__) +#define LIDAR_CENTERPOINT_TVM_PUBLIC __attribute__((visibility("default"))) +#define LIDAR_CENTERPOINT_TVM_LOCAL __attribute__((visibility("hidden"))) +#else +#error "Unsupported Build Configuration" +#endif + +#endif // LIDAR_CENTERPOINT_TVM__VISIBILITY_CONTROL_HPP_ diff --git a/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml b/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml new file mode 100644 index 0000000000000..2bd6e3aa15c21 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/launch/lidar_centerpoint_tvm.launch.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml b/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml new file mode 100644 index 0000000000000..c2e0801fbd11c --- /dev/null +++ b/perception/lidar_centerpoint_tvm/launch/single_inference_lidar_centerpoint_tvm.launch.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp b/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp new file mode 100644 index 0000000000000..91f727448b76a --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/centerpoint_tvm.cpp @@ -0,0 +1,246 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc., Arm Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/centerpoint_tvm.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +auto config_en = model_zoo::perception::lidar_obstacle_detection::centerpoint_encoder:: + onnx_centerpoint_encoder::config; +auto config_bk = model_zoo::perception::lidar_obstacle_detection::centerpoint_backbone:: + onnx_centerpoint_backbone::config; +auto config_scatter = model_zoo::perception::lidar_obstacle_detection::centerpoint_backbone:: + onnx_centerpoint_backbone::preprocessing::config; + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +TVMScatterIE::TVMScatterIE( + tvm_utility::pipeline::InferenceEngineTVMConfig config, const std::string & pkg_name, + const std::string & data_path, const std::string & function_name) +: config_(config) +{ + std::string network_prefix = data_path + "/" + pkg_name + "/models/" + config.network_name + "/"; + std::string network_module_path = network_prefix + config.network_module_path; + + std::ifstream module(network_module_path); + if (!module.good()) { + throw std::runtime_error("File " + network_module_path + " specified in config.hpp not found"); + } + module.close(); + tvm::runtime::Module runtime_mod = tvm::runtime::Module::LoadFromFile(network_module_path); + + scatter_function = runtime_mod.GetFunction(function_name); + + for (auto & output_config : config.network_outputs) { + output_.push_back(TVMArrayContainer( + output_config.node_shape, output_config.tvm_dtype_code, output_config.tvm_dtype_bits, + output_config.tvm_dtype_lanes, config.tvm_device_type, config.tvm_device_id)); + } +} + +TVMArrayContainerVector TVMScatterIE::schedule(const TVMArrayContainerVector & input) +{ + scatter_function(input[0].getArray(), coords_.getArray(), output_[0].getArray()); + + return output_; +} + +VoxelEncoderPreProcessor::VoxelEncoderPreProcessor( + const tvm_utility::pipeline::InferenceEngineTVMConfig & config, + const CenterPointConfig & config_mod) +: max_voxel_size(config.network_inputs[0].node_shape[0]), + max_point_in_voxel_size(config.network_inputs[0].node_shape[1]), + encoder_in_feature_size(config.network_inputs[0].node_shape[2]), + input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8), + config_detail(config_mod) +{ + encoder_in_features.resize(max_voxel_size * max_point_in_voxel_size * encoder_in_feature_size); + // Allocate input variable + std::vector shape_x{1, max_voxel_size, max_point_in_voxel_size, encoder_in_feature_size}; + TVMArrayContainer x{ + shape_x, + config.network_inputs[0].tvm_dtype_code, + config.network_inputs[0].tvm_dtype_bits, + config.network_inputs[0].tvm_dtype_lanes, + config.tvm_device_type, + config.tvm_device_id}; + output = x; +} + +TVMArrayContainerVector VoxelEncoderPreProcessor::schedule(const MixedInputs & voxel_inputs) +{ + // generate encoder_in_features from the voxels + generateFeatures( + voxel_inputs.features, voxel_inputs.num_points_per_voxel, voxel_inputs.coords, + voxel_inputs.num_voxels, config_detail, encoder_in_features); + + TVMArrayCopyFromBytes( + output.getArray(), encoder_in_features.data(), + max_voxel_size * max_point_in_voxel_size * encoder_in_feature_size * input_datatype_bytes); + + return {output}; +} + +BackboneNeckHeadPostProcessor::BackboneNeckHeadPostProcessor( + const tvm_utility::pipeline::InferenceEngineTVMConfig & config, + const CenterPointConfig & config_mod) +: output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8), config_detail(config_mod) +{ + head_out_heatmap.resize( + config.network_outputs[0].node_shape[1] * config.network_outputs[0].node_shape[2] * + config.network_outputs[0].node_shape[3]); + head_out_offset.resize( + config.network_outputs[1].node_shape[1] * config.network_outputs[1].node_shape[2] * + config.network_outputs[1].node_shape[3]); + head_out_z.resize( + config.network_outputs[2].node_shape[1] * config.network_outputs[2].node_shape[2] * + config.network_outputs[2].node_shape[3]); + head_out_dim.resize( + config.network_outputs[3].node_shape[1] * config.network_outputs[3].node_shape[2] * + config.network_outputs[3].node_shape[3]); + head_out_rot.resize( + config.network_outputs[4].node_shape[1] * config.network_outputs[4].node_shape[2] * + config.network_outputs[4].node_shape[3]); + head_out_vel.resize( + config.network_outputs[5].node_shape[1] * config.network_outputs[5].node_shape[2] * + config.network_outputs[5].node_shape[3]); +} + +std::vector BackboneNeckHeadPostProcessor::schedule(const TVMArrayContainerVector & input) +{ + TVMArrayCopyToBytes( + input[0].getArray(), head_out_heatmap.data(), head_out_heatmap.size() * output_datatype_bytes); + TVMArrayCopyToBytes( + input[1].getArray(), head_out_offset.data(), head_out_offset.size() * output_datatype_bytes); + TVMArrayCopyToBytes( + input[2].getArray(), head_out_z.data(), head_out_z.size() * output_datatype_bytes); + TVMArrayCopyToBytes( + input[3].getArray(), head_out_dim.data(), head_out_dim.size() * output_datatype_bytes); + TVMArrayCopyToBytes( + input[4].getArray(), head_out_rot.data(), head_out_rot.size() * output_datatype_bytes); + TVMArrayCopyToBytes( + input[5].getArray(), head_out_vel.data(), head_out_vel.size() * output_datatype_bytes); + + std::vector det_boxes3d; + + generateDetectedBoxes3D( + head_out_heatmap, head_out_offset, head_out_z, head_out_dim, head_out_rot, head_out_vel, + config_detail, det_boxes3d); + + return det_boxes3d; +} + +CenterPointTVM::CenterPointTVM( + const DensificationParam & densification_param, const CenterPointConfig & config, + const std::string & data_path) +: config_ve(config_en), + config_bnh(config_bk), + VE_PreP(std::make_shared(config_en, config)), + VE_IE(std::make_shared(config_en, "lidar_centerpoint_tvm", data_path)), + BNH_IE(std::make_shared(config_bk, "lidar_centerpoint_tvm", data_path)), + BNH_PostP(std::make_shared(config_bk, config)), + scatter_ie(std::make_shared(config_scatter, "lidar_centerpoint_tvm", data_path, "scatter")), + TSP_pipeline(std::make_shared(VE_PreP, VE_IE, scatter_ie, BNH_IE, BNH_PostP)), + config_(config) +{ + vg_ptr_ = std::make_unique(densification_param, config_); + initPtr(); +} + +CenterPointTVM::~CenterPointTVM() +{ +} + +void CenterPointTVM::initPtr() +{ + const auto voxels_size = + config_.max_voxel_size_ * config_.max_point_in_voxel_size_ * config_.point_feature_size_; + const auto coordinates_size = config_.max_voxel_size_ * config_.point_dim_size_; + + voxels_ = std::make_shared>(voxels_size); + coordinates_ = std::make_shared>(coordinates_size); + num_points_per_voxel_ = std::make_shared>(config_.max_voxel_size_); + std::vector shape_coords{ + config_scatter.network_inputs[1].node_shape[0], config_scatter.network_inputs[1].node_shape[1]}; + coords_tvm_ = TVMArrayContainer( + shape_coords, config_scatter.network_inputs[1].tvm_dtype_code, + config_scatter.network_inputs[1].tvm_dtype_bits, + config_scatter.network_inputs[1].tvm_dtype_lanes, config_scatter.tvm_device_type, + config_scatter.tvm_device_id); +} + +bool CenterPointTVM::detect( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer, + std::vector & det_boxes3d) +{ + std::fill(voxels_->begin(), voxels_->end(), 0); + std::fill(coordinates_->begin(), coordinates_->end(), -1); + std::fill(num_points_per_voxel_->begin(), num_points_per_voxel_->end(), 0); + + if (!preprocess(input_pointcloud_msg, tf_buffer)) { + RCLCPP_WARN_STREAM( + rclcpp::get_logger("lidar_centerpoint"), "Fail to preprocess and skip to detect."); + return false; + } + + MixedInputs voxel_inputs{num_voxels_, *voxels_, *num_points_per_voxel_, *coordinates_}; + auto bnh_output = TSP_pipeline->schedule(voxel_inputs); + + det_boxes3d = bnh_output; + if (det_boxes3d.size() == 0) { + RCLCPP_WARN_STREAM(rclcpp::get_logger("lidar_centerpoint_tvm"), "No detected boxes."); + } + + return true; +} + +bool CenterPointTVM::preprocess( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer) +{ + bool is_success = vg_ptr_->enqueuePointCloud(input_pointcloud_msg, tf_buffer); + if (!is_success) { + return false; + } + num_voxels_ = vg_ptr_->pointsToVoxels(*voxels_, *coordinates_, *num_points_per_voxel_); + if (num_voxels_ == 0) { + return false; + } + + TVMArrayCopyFromBytes( + coords_tvm_.getArray(), coordinates_->data(), coordinates_->size() * sizeof(int32_t)); + + scatter_ie->set_coords(coords_tvm_); + + return true; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp b/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp new file mode 100644 index 0000000000000..0ab51347be772 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/postprocess/circle_nms.cpp @@ -0,0 +1,65 @@ +// Copyright 2022 AutoCore Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/postprocess/circle_nms.hpp" + +#include + +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +inline float dist2dPow(const Box3D & a, const Box3D & b) +{ + return powf(a.x - b.x, 2) + powf(a.y - b.y, 2); +} + +std::size_t circleNMS( + std::vector & boxes3d, const float dist_thresh, std::vector & keep_mask) +{ + // params: boxes3d, vector sorted by score from largest to smallest + const auto num_boxes3d = boxes3d.size(); + const float dist2d_pow_thresh = powf(dist_thresh, 2); + + // generate keep_mask + std::size_t num_to_keep = 0; + std::vector suppress(num_boxes3d); // suppress[i]=true mean i-th box should be suppressed + + // std::uint64_t * suppress_ptr = & suppress.front(); + for (std::size_t i = 0; i < num_boxes3d; i++) { + if (suppress[i]) { + keep_mask[i] = false; + } else { + keep_mask[i] = true; + num_to_keep++; + for (std::size_t j = i + 1; j < num_boxes3d; j++) { + // if the j-th box has been suppressed, continue + if (suppress[j]) continue; + // if the j-th box is in the circle of i-th, set j-th box to be suppressed + if (dist2dPow(boxes3d[i], boxes3d[j]) < dist2d_pow_thresh) suppress[j] = true; + } + } + } + + return num_to_keep; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp b/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp new file mode 100644 index 0000000000000..cbfe2d0c66669 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp @@ -0,0 +1,176 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/postprocess/generate_detected_boxes.hpp" + +#include + +#include +#include +#include +#include + +namespace +{ +const std::size_t THREAD_NUM_POST = 32; +} // namespace + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +struct is_score_greater +{ + explicit is_score_greater(float t) : t_(t) {} + bool operator()(const Box3D & b) { return b.score > t_; } + +private: + float t_{0.0}; +}; + +struct is_kept +{ + bool operator()(const bool keep) { return keep; } +}; + +struct score_greater +{ + bool operator()(const Box3D & lb, const Box3D & rb) { return lb.score > rb.score; } +}; + +inline float sigmoid(float x) +{ + return 1.0f / (1.0f + expf(-x)); +} + +void generateBoxes3D_worker( + const std::vector & out_heatmap, const std::vector & out_offset, + const std::vector & out_z, const std::vector & out_dim, + const std::vector & out_rot, const std::vector & out_vel, + const CenterPointConfig & config, std::vector & boxes3d, std::size_t thread_idx, + std::size_t grids_per_thread) +{ + // generate boxes3d from the outputs of the network. + // shape of out_*: (N, DOWN_GRID_SIZE_Y, DOWN_GRID_SIZE_X) + // heatmap: N = class_size, offset: N = 2, z: N = 1, dim: N = 3, rot: N = 2, vel: N = 2 + for (std::size_t idx = 0; idx < grids_per_thread; idx++) { + std::size_t grid_idx = thread_idx * grids_per_thread + idx; + const auto down_grid_size = config.down_grid_size_y_ * config.down_grid_size_x_; + if (grid_idx >= down_grid_size) { + return; + } + + const auto yi = grid_idx / config.down_grid_size_x_; + const auto xi = grid_idx % config.down_grid_size_x_; + + int32_t label = -1; + float max_score = -1; + for (std::size_t ci = 0; ci < config.class_size_; ci++) { + float score = sigmoid(out_heatmap[down_grid_size * ci + grid_idx]); + if (score > max_score) { + label = ci; + max_score = score; + } + } + + const float offset_x = out_offset[down_grid_size * 0 + grid_idx]; + const float offset_y = out_offset[down_grid_size * 1 + grid_idx]; + const float x = + config.voxel_size_x_ * config.downsample_factor_ * (xi + offset_x) + config.range_min_x_; + const float y = + config.voxel_size_y_ * config.downsample_factor_ * (yi + offset_y) + config.range_min_y_; + const float z = out_z[grid_idx]; + const float w = out_dim[down_grid_size * 0 + grid_idx]; + const float l = out_dim[down_grid_size * 1 + grid_idx]; + const float h = out_dim[down_grid_size * 2 + grid_idx]; + const float yaw_sin = out_rot[down_grid_size * 0 + grid_idx]; + const float yaw_cos = out_rot[down_grid_size * 1 + grid_idx]; + const float yaw_norm = sqrtf(yaw_sin * yaw_sin + yaw_cos * yaw_cos); + const float vel_x = out_vel[down_grid_size * 0 + grid_idx]; + const float vel_y = out_vel[down_grid_size * 1 + grid_idx]; + + boxes3d[grid_idx].label = label; + boxes3d[grid_idx].score = yaw_norm >= config.yaw_norm_threshold_ ? max_score : 0.f; + boxes3d[grid_idx].x = x; + boxes3d[grid_idx].y = y; + boxes3d[grid_idx].z = z; + boxes3d[grid_idx].length = expf(l); + boxes3d[grid_idx].width = expf(w); + boxes3d[grid_idx].height = expf(h); + boxes3d[grid_idx].yaw = atan2f(yaw_sin, yaw_cos); + boxes3d[grid_idx].vel_x = vel_x; + boxes3d[grid_idx].vel_y = vel_y; + } +} + +// cspell: ignore divup +void generateDetectedBoxes3D( + const std::vector & out_heatmap, const std::vector & out_offset, + const std::vector & out_z, const std::vector & out_dim, + const std::vector & out_rot, const std::vector & out_vel, + const CenterPointConfig & config, std::vector & det_boxes3d) +{ + std::vector threadPool; + const auto down_grid_size = config.down_grid_size_y_ * config.down_grid_size_x_; + std::vector boxes3d(down_grid_size); + std::size_t grids_per_thread = divup(down_grid_size, THREAD_NUM_POST); + for (std::size_t idx = 0; idx < THREAD_NUM_POST; idx++) { + std::thread worker( + generateBoxes3D_worker, std::ref(out_heatmap), std::ref(out_offset), std::ref(out_z), + std::ref(out_dim), std::ref(out_rot), std::ref(out_vel), std::ref(config), std::ref(boxes3d), + idx, grids_per_thread); + threadPool.push_back(std::move(worker)); + } + for (std::size_t idx = 0; idx < THREAD_NUM_POST; idx++) { + threadPool[idx].join(); + } + + // suppress by score + const auto num_det_boxes3d = + std::count_if(boxes3d.begin(), boxes3d.end(), is_score_greater(config.score_threshold_)); + if (num_det_boxes3d == 0) { + // construct boxes3d failed + std::cerr << "lidar_centerpoint_tvm: construct boxes3d failed" << std::endl; + } + std::vector det_boxes3d_no_nms(num_det_boxes3d); + std::copy_if( + boxes3d.begin(), boxes3d.end(), det_boxes3d_no_nms.begin(), + is_score_greater(config.score_threshold_)); + + // sort by score + std::sort(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), score_greater()); + + // suppress by NMS + std::vector final_keep_mask(num_det_boxes3d); + const auto num_final_det_boxes3d = + circleNMS(det_boxes3d_no_nms, config.circle_nms_dist_threshold_, final_keep_mask); + + det_boxes3d.resize(num_final_det_boxes3d); + std::size_t box_id = 0; + for (std::size_t idx = 0; idx < final_keep_mask.size(); idx++) { + if (final_keep_mask[idx]) { + det_boxes3d[box_id] = det_boxes3d_no_nms[idx]; + box_id++; + } + } + // std::copy_if(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), final_keep_mask.begin(), + // det_boxes3d.begin(), is_kept()); +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp new file mode 100644 index 0000000000000..9b98adc2def4e --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/preprocess/generate_features.cpp @@ -0,0 +1,131 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/preprocess/generate_features.hpp" + +#include + +#include + +namespace +{ +const std::size_t THREAD_NUM_VFE = 4; +} // namespace + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +void generateFeatures_worker( + const std::vector & voxel_features, const std::vector & voxel_num_points, + const std::vector & coords, const std::size_t num_voxels, + const CenterPointConfig & config, std::vector & features, std::size_t thread_idx, + std::size_t pillars_per_thread) +{ + for (std::size_t idx = 0; idx < pillars_per_thread; idx++) { + std::size_t pillar_idx = thread_idx * pillars_per_thread + idx; + if (pillar_idx >= num_voxels) return; + + // voxel/pillar information + float points_sum[3] = {0.0, 0.0, 0.0}; // sum of x, y, z in the voxel + int32_t coordinate[3] = { + coords[pillar_idx * 3], coords[pillar_idx * 3 + 1], + coords[pillar_idx * 3 + 2]}; // 3D position(z,y,x) of the voxel + std::size_t points_count = voxel_num_points[pillar_idx]; // number of points in the voxel + + for (std::size_t i = 0; i < config.max_point_in_voxel_size_; i++) { + std::size_t point_idx = + pillar_idx * config.max_point_in_voxel_size_ * config.point_feature_size_ + + i * config.point_feature_size_; + for (std::size_t j = 0; j < config.point_feature_size_; j++) { + // point (x, y, z, intensity) + if (i < points_count && j < 3) points_sum[j] += voxel_features[point_idx + j]; + } + } + + // calculate voxel mean + float mean[3] = { + points_sum[0] / points_count, points_sum[1] / points_count, points_sum[2] / points_count}; + // calculate offset + float x_offset = coordinate[2] * config.voxel_size_x_ + config.offset_x_; + float y_offset = coordinate[1] * config.voxel_size_y_ + config.offset_y_; + // float z_offset = coordinate[0] * config.voxel_size_z_ + config.offset_z_; + + // build the encoder_in_features + for (std::size_t i = 0; i < config.max_point_in_voxel_size_; i++) { + // feature_idx + std::size_t feature_idx = + pillar_idx * config.max_point_in_voxel_size_ * config.encoder_in_feature_size_ + + i * config.encoder_in_feature_size_; + std::size_t point_idx = + pillar_idx * config.max_point_in_voxel_size_ * config.point_feature_size_ + + i * config.point_feature_size_; + if (i < points_count) { + features[feature_idx + 0] = voxel_features[point_idx + 0]; + features[feature_idx + 1] = voxel_features[point_idx + 1]; + features[feature_idx + 2] = voxel_features[point_idx + 2]; + features[feature_idx + 3] = voxel_features[point_idx + 3]; + // feature-mean + features[feature_idx + 4] = voxel_features[point_idx + 0] - mean[0]; + features[feature_idx + 5] = voxel_features[point_idx + 1] - mean[1]; + features[feature_idx + 6] = voxel_features[point_idx + 2] - mean[2]; + // feature-offset + features[feature_idx + 7] = voxel_features[point_idx + 0] - x_offset; + features[feature_idx + 8] = voxel_features[point_idx + 1] - y_offset; + + } else { + features[feature_idx + 0] = 0; + features[feature_idx + 1] = 0; + features[feature_idx + 2] = 0; + features[feature_idx + 3] = 0; + // feature-mean + features[feature_idx + 4] = 0; + features[feature_idx + 5] = 0; + features[feature_idx + 6] = 0; + // feature-offset + features[feature_idx + 7] = 0; + features[feature_idx + 8] = 0; + } + } + } +} + +// cspell: ignore divup +void generateFeatures( + const std::vector & voxel_features, const std::vector & voxel_num_points, + const std::vector & coords, const std::size_t num_voxels, + const CenterPointConfig & config, std::vector & features) +{ + // voxel_features (float): max_voxel_size*max_point_in_voxel_size*point_feature_size + // voxel_num_points (int): max_voxel_size + // coords (int): max_voxel_size*point_dim_size + std::vector threadPool; + std::size_t pillars_per_thread = divup(config.max_voxel_size_, THREAD_NUM_VFE); + for (std::size_t idx = 0; idx < THREAD_NUM_VFE; idx++) { + std::thread worker( + generateFeatures_worker, std::ref(voxel_features), std::ref(voxel_num_points), + std::ref(coords), num_voxels, std::ref(config), std::ref(features), idx, pillars_per_thread); + threadPool.push_back(std::move(worker)); + } + for (std::size_t idx = 0; idx < THREAD_NUM_VFE; idx++) { + threadPool[idx].join(); + } +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp new file mode 100644 index 0000000000000..5562e963d177b --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/preprocess/pointcloud_densification.cpp @@ -0,0 +1,115 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/preprocess/pointcloud_densification.hpp" + +#include + +#include + +#include +#ifdef ROS_DISTRO_GALACTIC +#include +#else +#include +#endif + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +boost::optional getTransform( + const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, + const std::string & source_frame_id, const rclcpp::Time & time) +{ + try { + geometry_msgs::msg::TransformStamped transform_stamped; + transform_stamped = tf_buffer.lookupTransform( + target_frame_id, source_frame_id, time, rclcpp::Duration::from_seconds(0.5)); + return transform_stamped.transform; + } catch (tf2::TransformException & ex) { + RCLCPP_WARN_STREAM(rclcpp::get_logger("lidar_centerpoint_tvm"), ex.what()); + return boost::none; + } +} + +Eigen::Affine3f transformToEigen(const geometry_msgs::msg::Transform & t) +{ + Eigen::Affine3f a; + a.matrix() = tf2::transformToEigen(t).matrix().cast(); + return a; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +PointCloudDensification::PointCloudDensification(const DensificationParam & param) : param_(param) +{ +} + +bool PointCloudDensification::enqueuePointCloud( + const sensor_msgs::msg::PointCloud2 & pointcloud_msg, const tf2_ros::Buffer & tf_buffer) +{ + const auto header = pointcloud_msg.header; + + if (param_.pointcloud_cache_size() > 1) { + auto transform_world2current = + getTransform(tf_buffer, header.frame_id, param_.world_frame_id(), header.stamp); + if (!transform_world2current) { + return false; + } + auto affine_world2current = transformToEigen(transform_world2current.get()); + + enqueue(pointcloud_msg, affine_world2current); + } else { + enqueue(pointcloud_msg, Eigen::Affine3f::Identity()); + } + dequeue(); + + return true; +} + +void PointCloudDensification::enqueue( + const sensor_msgs::msg::PointCloud2 & msg, const Eigen::Affine3f & affine_world2current) +{ + affine_world2current_ = affine_world2current; + current_timestamp_ = rclcpp::Time(msg.header.stamp).seconds(); + PointCloudWithTransform pointcloud = {msg, affine_world2current.inverse()}; + pointcloud_cache_.push_front(pointcloud); +} + +void PointCloudDensification::dequeue() +{ + if (pointcloud_cache_.size() > param_.pointcloud_cache_size()) { + pointcloud_cache_.pop_back(); + } +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp new file mode 100644 index 0000000000000..11ae8e7064b1c --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp @@ -0,0 +1,138 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/preprocess/voxel_generator.hpp" + +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +VoxelGeneratorTemplate::VoxelGeneratorTemplate( + const DensificationParam & param, const CenterPointConfig & config) +: config_(config) +{ + pd_ptr_ = std::make_unique(param); + range_[0] = config.range_min_x_; + range_[1] = config.range_min_y_; + range_[2] = config.range_min_z_; + range_[3] = config.range_max_x_; + range_[4] = config.range_max_y_; + range_[5] = config.range_max_z_; + grid_size_[0] = config.grid_size_x_; + grid_size_[1] = config.grid_size_y_; + grid_size_[2] = config.grid_size_z_; + recip_voxel_size_[0] = 1 / config.voxel_size_x_; + recip_voxel_size_[1] = 1 / config.voxel_size_y_; + recip_voxel_size_[2] = 1 / config.voxel_size_z_; +} + +bool VoxelGeneratorTemplate::enqueuePointCloud( + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const tf2_ros::Buffer & tf_buffer) +{ + return pd_ptr_->enqueuePointCloud(input_pointcloud_msg, tf_buffer); +} + +std::size_t VoxelGenerator::pointsToVoxels( + std::vector & voxels, std::vector & coordinates, + std::vector & num_points_per_voxel) +{ + // voxels (float): (max_voxel_size * max_point_in_voxel_size * point_feature_size), point info in + // each voxel coordinates (int): (max_voxel_size * point_dim_size), the index from voxel to its 3D + // position num_points_per_voxel (float): (max_voxel_size), the number of points in each voxel + + const std::size_t grid_size = config_.grid_size_z_ * config_.grid_size_y_ * config_.grid_size_x_; + std::vector coord_to_voxel_idx( + grid_size, -1); // the index from 3D position to the voxel id + + std::size_t voxel_cnt = 0; // @return + std::vector point; + point.resize(config_.point_feature_size_); + std::vector coord_zyx; // record which grid the zyx coord of current point belong to + coord_zyx.resize(config_.point_dim_size_); + bool out_of_range; + std::size_t point_cnt; + int32_t c, coord_idx, voxel_idx; + Eigen::Vector3f point_current, point_past; + + for (auto pc_cache_iter = pd_ptr_->getPointCloudCacheIter(); !pd_ptr_->isCacheEnd(pc_cache_iter); + pc_cache_iter++) { + auto pc_msg = pc_cache_iter->pointcloud_msg; + auto affine_past2current = + pd_ptr_->getAffineWorldToCurrent() * pc_cache_iter->affine_past2world; + float time_lag = static_cast( + pd_ptr_->getCurrentTimestamp() - rclcpp::Time(pc_msg.header.stamp).seconds()); + + for (sensor_msgs::PointCloud2ConstIterator x_iter(pc_msg, "x"), y_iter(pc_msg, "y"), + z_iter(pc_msg, "z"); + x_iter != x_iter.end(); ++x_iter, ++y_iter, ++z_iter) { + point_past << *x_iter, *y_iter, *z_iter; + point_current = affine_past2current * point_past; + + point[0] = point_current.x(); + point[1] = point_current.y(); + point[2] = point_current.z(); + point[3] = time_lag; + + out_of_range = false; + for (std::size_t di = 0; di < config_.point_dim_size_; di++) { + c = static_cast((point[di] - range_[di]) * recip_voxel_size_[di]); + if (c < 0 || c >= grid_size_[di]) { + out_of_range = true; + break; + } + coord_zyx[config_.point_dim_size_ - di - 1] = c; + } + if (out_of_range) { + continue; + } + + coord_idx = coord_zyx[0] * config_.grid_size_y_ * config_.grid_size_x_ + + coord_zyx[1] * config_.grid_size_x_ + coord_zyx[2]; + voxel_idx = coord_to_voxel_idx[coord_idx]; + if (voxel_idx == -1) { + voxel_idx = voxel_cnt; + if (voxel_cnt >= config_.max_voxel_size_) { + continue; + } + + voxel_cnt++; + coord_to_voxel_idx[coord_idx] = voxel_idx; + for (std::size_t di = 0; di < config_.point_dim_size_; di++) { + coordinates[voxel_idx * config_.point_dim_size_ + di] = coord_zyx[di]; + } + } + + point_cnt = num_points_per_voxel[voxel_idx]; + if (point_cnt < config_.max_point_in_voxel_size_) { + for (std::size_t fi = 0; fi < config_.point_feature_size_; fi++) { + voxels + [voxel_idx * config_.max_point_in_voxel_size_ * config_.point_feature_size_ + + point_cnt * config_.point_feature_size_ + fi] = point[fi]; + } + num_points_per_voxel[voxel_idx]++; + } + } + } + + return voxel_cnt; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp b/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp new file mode 100644 index 0000000000000..94639d71e66c0 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/ros_utils.cpp @@ -0,0 +1,118 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/ros_utils.hpp" + +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +using Label = autoware_perception_msgs::msg::ObjectClassification; + +void box3DToDetectedObject( + const Box3D & box3d, const std::vector & class_names, + const bool rename_car_to_truck_and_bus, const bool has_twist, + autoware_perception_msgs::msg::DetectedObject & obj) +{ + // TODO(yukke42): the value of classification confidence of DNN, not probability. + obj.existence_probability = box3d.score; + + // classification + autoware_perception_msgs::msg::ObjectClassification classification; + classification.probability = 1.0f; + if (box3d.label >= 0 && static_cast(box3d.label) < class_names.size()) { + classification.label = getSemanticType(class_names[box3d.label]); + } else { + classification.label = Label::UNKNOWN; + RCLCPP_WARN_STREAM( + rclcpp::get_logger("lidar_centerpoint"), "Unexpected label: UNKNOWN is set."); + } + + float l = box3d.length; + float w = box3d.width; + if (classification.label == Label::CAR && rename_car_to_truck_and_bus) { + // Note: object size is referred from multi_object_tracker + if ((w * l > 2.2 * 5.5) && (w * l <= 2.5 * 7.9)) { + classification.label = Label::TRUCK; + } else if (w * l > 2.5 * 7.9) { + classification.label = Label::BUS; + } + } + + if (isCarLikeVehicleLabel(classification.label)) { + obj.kinematics.orientation_availability = + autoware_perception_msgs::msg::DetectedObjectKinematics::SIGN_UNKNOWN; + } + + obj.classification.emplace_back(classification); + + // pose and shape + // mmdet3d yaw format to ros yaw format + float yaw = -box3d.yaw - autoware::universe_utils::pi / 2; + obj.kinematics.pose_with_covariance.pose.position = + autoware::universe_utils::createPoint(box3d.x, box3d.y, box3d.z); + obj.kinematics.pose_with_covariance.pose.orientation = + autoware::universe_utils::createQuaternionFromYaw(yaw); + obj.shape.type = autoware_perception_msgs::msg::Shape::BOUNDING_BOX; + obj.shape.dimensions = + autoware::universe_utils::createTranslation(box3d.length, box3d.width, box3d.height); + + // twist + if (has_twist) { + float vel_x = box3d.vel_x; + float vel_y = box3d.vel_y; + geometry_msgs::msg::Twist twist; + twist.linear.x = std::sqrt(std::pow(vel_x, 2) + std::pow(vel_y, 2)); + twist.angular.z = 2 * (std::atan2(vel_y, vel_x) - yaw); + obj.kinematics.twist_with_covariance.twist = twist; + obj.kinematics.has_twist = has_twist; + } +} + +uint8_t getSemanticType(const std::string & class_name) +{ + if (class_name == "CAR") { + return Label::CAR; + } else if (class_name == "TRUCK") { + return Label::TRUCK; + } else if (class_name == "BUS") { + return Label::BUS; + } else if (class_name == "TRAILER") { + return Label::TRAILER; + } else if (class_name == "BICYCLE") { + return Label::BICYCLE; + } else if (class_name == "MOTORBIKE") { + return Label::MOTORCYCLE; + } else if (class_name == "PEDESTRIAN") { + return Label::PEDESTRIAN; + } else { + return Label::UNKNOWN; + } +} + +bool isCarLikeVehicleLabel(const uint8_t label) +{ + return label == Label::CAR || label == Label::TRUCK || label == Label::BUS || + label == Label::TRAILER; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/lib/utils.cpp b/perception/lidar_centerpoint_tvm/lib/utils.cpp new file mode 100644 index 0000000000000..caf9cb84fa1c7 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/lib/utils.cpp @@ -0,0 +1,41 @@ +// Copyright 2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/utils.hpp" + +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +// cspell: ignore divup +std::size_t divup(const std::size_t a, const std::size_t b) +{ + if (a == 0) { + throw std::runtime_error("A dividend of divup isn't positive."); + } + if (b == 0) { + throw std::runtime_error("A divisor of divup isn't positive."); + } + + return (a + b - 1) / b; +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware diff --git a/perception/lidar_centerpoint_tvm/package.xml b/perception/lidar_centerpoint_tvm/package.xml new file mode 100644 index 0000000000000..c71a27e4a1677 --- /dev/null +++ b/perception/lidar_centerpoint_tvm/package.xml @@ -0,0 +1,32 @@ + + + + lidar_centerpoint_tvm + 0.1.0 + lidar_centerpoint_tvm + Liu Zhengfei + Xinyu Wang + Apache 2.0 + + ament_cmake_auto + autoware_cmake + + autoware_perception_msgs + autoware_universe_utils + pcl_ros + rclcpp + rclcpp_components + tf2_eigen + tf2_geometry_msgs + tf2_sensor_msgs + tvm_utility + tvm_vendor + + ament_cmake_gtest + ament_lint_auto + autoware_lint_common + + + ament_cmake + + diff --git a/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py b/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py new file mode 100755 index 0000000000000..5a1135379615f --- /dev/null +++ b/perception/lidar_centerpoint_tvm/scripts/lidar_centerpoint_visualizer.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# Copyright 2022 TIER IV, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import open3d as o3d +import rclpy +from rclpy.node import Node + + +def main(args=None): + rclpy.init(args=args) + + node = Node("lidar_centerpoint_visualizer") + node.declare_parameter("pcd_path", rclpy.Parameter.Type.STRING) + node.declare_parameter("detections_path", rclpy.Parameter.Type.STRING) + + pcd_path = node.get_parameter("pcd_path").get_parameter_value().string_value + detections_path = node.get_parameter("detections_path").get_parameter_value().string_value + + while not os.path.exists(pcd_path) and not os.path.exists(detections_path): + time.sleep(1.0) + + if not rclpy.ok(): + rclpy.shutdown() + return + + mesh = o3d.io.read_triangle_mesh(detections_path) + pcd = o3d.io.read_point_cloud(pcd_path) + + mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0]) + + detection_lines = o3d.geometry.LineSet.create_from_triangle_mesh(mesh) + detection_lines.paint_uniform_color([1.0, 0.0, 1.0]) + + o3d.visualization.draw_geometries([mesh_frame, pcd, detection_lines]) + + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/perception/lidar_centerpoint_tvm/src/node.cpp b/perception/lidar_centerpoint_tvm/src/node.cpp new file mode 100644 index 0000000000000..9cf82b1c6bcae --- /dev/null +++ b/perception/lidar_centerpoint_tvm/src/node.cpp @@ -0,0 +1,154 @@ +// Copyright 2021-2022 AutoCore Ltd., TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/node.hpp" + +#include +#include +#include +#include +#include + +#ifdef ROS_DISTRO_GALACTIC +#include +#else +#include +#endif + +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +LidarCenterPointTVMNode::LidarCenterPointTVMNode(const rclcpp::NodeOptions & node_options) +: Node("lidar_centerpoint_tvm", node_options), tf_buffer_(this->get_clock()) +{ + const float score_threshold = + static_cast(this->declare_parameter("score_threshold", 0.35)); + const float circle_nms_dist_threshold = + static_cast(this->declare_parameter("circle_nms_dist_threshold", 1.5)); + const float yaw_norm_threshold = + static_cast(this->declare_parameter("yaw_norm_threshold", 0.0)); + const std::string densification_world_frame_id = + this->declare_parameter("densification_world_frame_id", "map"); + const int32_t densification_num_past_frames = + this->declare_parameter("densification_num_past_frames", 1); + + class_names_ = this->declare_parameter>("class_names"); + rename_car_to_truck_and_bus_ = this->declare_parameter("rename_car_to_truck_and_bus", false); + has_twist_ = this->declare_parameter("has_twist", false); + + const std::size_t point_feature_size = + static_cast(this->declare_parameter("point_feature_size")); + const std::size_t max_voxel_size = + static_cast(this->declare_parameter("max_voxel_size")); + const auto point_cloud_range = this->declare_parameter>("point_cloud_range"); + const auto voxel_size = this->declare_parameter>("voxel_size"); + const std::size_t downsample_factor = + static_cast(this->declare_parameter("downsample_factor")); + const std::size_t encoder_in_feature_size = + static_cast(this->declare_parameter("encoder_in_feature_size")); + const auto data_path = this->declare_parameter("data_path"); + + DensificationParam densification_param( + densification_world_frame_id, densification_num_past_frames); + + if (point_cloud_range.size() != 6) { + RCLCPP_WARN_STREAM( + rclcpp::get_logger("lidar_centerpoint"), + "The size of point_cloud_range != 6: use the default parameters."); + } + if (voxel_size.size() != 3) { + RCLCPP_WARN_STREAM( + rclcpp::get_logger("lidar_centerpoint"), + "The size of voxel_size != 3: use the default parameters."); + } + CenterPointConfig config( + class_names_.size(), point_feature_size, max_voxel_size, point_cloud_range, voxel_size, + downsample_factor, encoder_in_feature_size, score_threshold, circle_nms_dist_threshold, + yaw_norm_threshold); + detector_ptr_ = std::make_unique(densification_param, config, data_path); + + pointcloud_sub_ = this->create_subscription( + "~/input/pointcloud", rclcpp::SensorDataQoS{}.keep_last(1), + std::bind(&LidarCenterPointTVMNode::pointCloudCallback, this, std::placeholders::_1)); + objects_pub_ = this->create_publisher( + "~/output/objects", rclcpp::QoS{1}); + + // initialize debug tool + { + using autoware::universe_utils::DebugPublisher; + using autoware::universe_utils::StopWatch; + stop_watch_ptr_ = std::make_unique>(); + debug_publisher_ptr_ = std::make_unique(this, "lidar_centerpoint_tvm"); + stop_watch_ptr_->tic("cyclic_time"); + stop_watch_ptr_->tic("processing_time"); + } +} + +void LidarCenterPointTVMNode::pointCloudCallback( + const sensor_msgs::msg::PointCloud2::ConstSharedPtr input_pointcloud_msg) +{ + const auto objects_sub_count = + objects_pub_->get_subscription_count() + objects_pub_->get_intra_process_subscription_count(); + if (objects_sub_count < 1) { + return; + } + + if (stop_watch_ptr_) { + stop_watch_ptr_->toc("processing_time", true); + } + + std::vector det_boxes3d; + bool is_success = detector_ptr_->detect(*input_pointcloud_msg, tf_buffer_, det_boxes3d); + if (!is_success) { + return; + } + + autoware_perception_msgs::msg::DetectedObjects output_msg; + output_msg.header = input_pointcloud_msg->header; + for (const auto & box3d : det_boxes3d) { + autoware_perception_msgs::msg::DetectedObject obj; + box3DToDetectedObject(box3d, class_names_, rename_car_to_truck_and_bus_, has_twist_, obj); + output_msg.objects.emplace_back(obj); + } + + if (objects_sub_count > 0) { + objects_pub_->publish(output_msg); + } + + // add processing time for debug + if (debug_publisher_ptr_ && stop_watch_ptr_) { + const double cyclic_time_ms = stop_watch_ptr_->toc("cyclic_time", true); + const double processing_time_ms = stop_watch_ptr_->toc("processing_time", true); + debug_publisher_ptr_->publish( + "debug/cyclic_time_ms", cyclic_time_ms); + debug_publisher_ptr_->publish( + "debug/processing_time_ms", processing_time_ms); + } +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#include +RCLCPP_COMPONENTS_REGISTER_NODE( + autoware::perception::lidar_centerpoint_tvm::LidarCenterPointTVMNode) diff --git a/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp b/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp new file mode 100644 index 0000000000000..224664e51ea2d --- /dev/null +++ b/perception/lidar_centerpoint_tvm/src/single_inference_node.cpp @@ -0,0 +1,239 @@ +// Copyright 2022 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lidar_centerpoint_tvm/single_inference_node.hpp" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#ifdef ROS_DISTRO_GALACTIC +#include +#else +#include +#endif + +#include +#include +#include + +namespace autoware +{ +namespace perception +{ +namespace lidar_centerpoint_tvm +{ + +SingleInferenceLidarCenterPointNode::SingleInferenceLidarCenterPointNode( + const rclcpp::NodeOptions & node_options) +: Node("lidar_center_point_tvm", node_options), tf_buffer_(this->get_clock()) +{ + const float score_threshold = + static_cast(this->declare_parameter("score_threshold", 0.35)); + const float circle_nms_dist_threshold = + static_cast(this->declare_parameter("circle_nms_dist_threshold", 1.5)); + const float yaw_norm_threshold = + static_cast(this->declare_parameter("yaw_norm_threshold", 0.0)); + const std::string densification_world_frame_id = + this->declare_parameter("densification_world_frame_id", "map"); + const int densification_num_past_frames = + this->declare_parameter("densification_num_past_frames", 0); + + class_names_ = this->declare_parameter>("class_names"); + has_twist_ = this->declare_parameter("has_twist", false); + const std::size_t point_feature_size = + static_cast(this->declare_parameter("point_feature_size")); + const std::size_t max_voxel_size = + static_cast(this->declare_parameter("max_voxel_size")); + const auto point_cloud_range = this->declare_parameter>("point_cloud_range"); + const auto voxel_size = this->declare_parameter>("voxel_size"); + const std::size_t downsample_factor = + static_cast(this->declare_parameter("downsample_factor")); + const std::size_t encoder_in_feature_size = + static_cast(this->declare_parameter("encoder_in_feature_size")); + const auto pcd_path = this->declare_parameter("pcd_path"); + const auto detections_path = this->declare_parameter("detections_path"); + const auto data_path = this->declare_parameter("data_path"); + DensificationParam densification_param( + densification_world_frame_id, densification_num_past_frames); + + if (point_cloud_range.size() != 6) { + RCLCPP_WARN_STREAM( + rclcpp::get_logger("single_inference_lidar_centerpoint"), + "The size of point_cloud_range != 6: use the default parameters."); + } + if (voxel_size.size() != 3) { + RCLCPP_WARN_STREAM( + rclcpp::get_logger("single_inference_lidar_centerpoint"), + "The size of voxel_size != 3: use the default parameters."); + } + CenterPointConfig config( + class_names_.size(), point_feature_size, max_voxel_size, point_cloud_range, voxel_size, + downsample_factor, encoder_in_feature_size, score_threshold, circle_nms_dist_threshold, + yaw_norm_threshold); + detector_ptr_ = std::make_unique(densification_param, config, data_path); + + detect(pcd_path, detections_path); + exit(0); +} + +std::vector SingleInferenceLidarCenterPointNode::getVertices( + const autoware_perception_msgs::msg::Shape & shape, const Eigen::Affine3d & pose) const +{ + std::vector vertices; + Eigen::Vector3d vertex; + + vertex.x() = -shape.dimensions.x / 2.0; + vertex.y() = -shape.dimensions.y / 2.0; + vertex.z() = shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = -shape.dimensions.x / 2.0; + vertex.y() = shape.dimensions.y / 2.0; + vertex.z() = shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = -shape.dimensions.x / 2.0; + vertex.y() = shape.dimensions.y / 2.0; + vertex.z() = -shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = shape.dimensions.x / 2.0; + vertex.y() = shape.dimensions.y / 2.0; + vertex.z() = shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = shape.dimensions.x / 2.0; + vertex.y() = shape.dimensions.y / 2.0; + vertex.z() = -shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = shape.dimensions.x / 2.0; + vertex.y() = -shape.dimensions.y / 2.0; + vertex.z() = shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = shape.dimensions.x / 2.0; + vertex.y() = -shape.dimensions.y / 2.0; + vertex.z() = -shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + vertex.x() = -shape.dimensions.x / 2.0; + vertex.y() = -shape.dimensions.y / 2.0; + vertex.z() = -shape.dimensions.z / 2.0; + vertices.push_back(pose * vertex); + + return vertices; +} + +void SingleInferenceLidarCenterPointNode::detect( + const std::string & pcd_path, const std::string & detections_path) +{ + sensor_msgs::msg::PointCloud2 msg; + pcl::PointCloud::Ptr pc_ptr(new pcl::PointCloud()); + + pcl::io::loadPCDFile(pcd_path, *pc_ptr); + pcl::toROSMsg(*pc_ptr, msg); + msg.header.frame_id = "lidar_frame"; + + std::vector det_boxes3d; + bool is_success = detector_ptr_->detect(msg, tf_buffer_, det_boxes3d); + if (!is_success) { + return; + } + + autoware_perception_msgs::msg::DetectedObjects output_msg; + output_msg.header = msg.header; + for (const auto & box3d : det_boxes3d) { + autoware_perception_msgs::msg::DetectedObject obj; + box3DToDetectedObject(box3d, class_names_, rename_car_to_truck_and_bus_, has_twist_, obj); + output_msg.objects.emplace_back(obj); + } + + dumpDetectionsAsMesh(output_msg, detections_path); + + RCLCPP_INFO( + rclcpp::get_logger("single_inference_lidar_centerpoint_tvm"), + "The detection results were saved as meshes in %s", detections_path.c_str()); +} + +void SingleInferenceLidarCenterPointNode::dumpDetectionsAsMesh( + const autoware_perception_msgs::msg::DetectedObjects & objects_msg, + const std::string & output_path) const +{ + std::ofstream ofs(output_path, std::ofstream::out); + std::stringstream vertices_stream; + std::stringstream faces_stream; + int index = 0; + int num_detections = static_cast(objects_msg.objects.size()); + + ofs << "ply" << std::endl; + ofs << "format ascii 1.0" << std::endl; + ofs << "comment created by lidar_centerpoint" << std::endl; + ofs << "element vertex " << 8 * num_detections << std::endl; + ofs << "property float x" << std::endl; + ofs << "property float y" << std::endl; + ofs << "property float z" << std::endl; + ofs << "element face " << 12 * num_detections << std::endl; + ofs << "property list uchar uint vertex_indices" << std::endl; + ofs << "end_header" << std::endl; + + auto streamFace = [&faces_stream](int v1, int v2, int v3) { + faces_stream << std::to_string(3) << " " << std::to_string(v1) << " " << std::to_string(v2) + << " " << std::to_string(v3) << std::endl; + }; + + for (const auto & object : objects_msg.objects) { + Eigen::Affine3d pose_affine; + tf2::fromMsg(object.kinematics.pose_with_covariance.pose, pose_affine); + + std::vector vertices = getVertices(object.shape, pose_affine); + + for (const auto & vertex : vertices) { + vertices_stream << vertex.x() << " " << vertex.y() << " " << vertex.z() << std::endl; + } + + streamFace(index + 1, index + 3, index + 4); + streamFace(index + 3, index + 5, index + 6); + streamFace(index + 0, index + 7, index + 5); + streamFace(index + 7, index + 2, index + 4); + streamFace(index + 5, index + 3, index + 1); + streamFace(index + 7, index + 0, index + 2); + streamFace(index + 2, index + 1, index + 4); + streamFace(index + 4, index + 3, index + 6); + streamFace(index + 5, index + 7, index + 6); + streamFace(index + 6, index + 7, index + 4); + streamFace(index + 0, index + 5, index + 1); + index += 8; + } + + ofs << vertices_stream.str(); + ofs << faces_stream.str(); + + ofs.close(); +} + +} // namespace lidar_centerpoint_tvm +} // namespace perception +} // namespace autoware + +#include +RCLCPP_COMPONENTS_REGISTER_NODE( + autoware::perception::lidar_centerpoint_tvm::SingleInferenceLidarCenterPointNode) From e6b37c625c464fed71df9bf0a9e074d619eef865 Mon Sep 17 00:00:00 2001 From: Kotaro Yoshimoto Date: Wed, 26 Jun 2024 07:39:22 +0000 Subject: [PATCH 8/8] chore: fix pre-commit errors Signed-off-by: Kotaro Yoshimoto --- .github/workflows/cppcheck-differential.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cppcheck-differential.yaml b/.github/workflows/cppcheck-differential.yaml index af5f844ad535e..7fe37ea006596 100644 --- a/.github/workflows/cppcheck-differential.yaml +++ b/.github/workflows/cppcheck-differential.yaml @@ -42,8 +42,8 @@ jobs: continue-on-error: true id: cppcheck run: | - echo "Running Cppcheck on changed files: ${{ steps.get-changed-files.outputs.changed-files }}" - cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions ${{ steps.get-changed-files.outputs.changed-files }} 2> cppcheck-report.txt + echo "Running Cppcheck on changed files: ${{ steps.get-changed-files.outputs.changed-files }}" + cppcheck --enable=all --inconclusive --check-level=exhaustive --error-exitcode=1 --suppressions-list=.cppcheck_suppressions ${{ steps.get-changed-files.outputs.changed-files }} 2> cppcheck-report.txt shell: bash - name: Show cppcheck-report result