Skip to content

Commit

Permalink
Revert "chore: delete lidar_centerpoint_tvm package for test"
Browse files Browse the repository at this point in the history
This reverts commit 3b8bcb3.
  • Loading branch information
HansRobo authored Jun 26, 2024
1 parent 63d4da2 commit 981afbd
Show file tree
Hide file tree
Showing 33 changed files with 2,800 additions and 0 deletions.
Empty file.
100 changes: 100 additions & 0 deletions perception/lidar_centerpoint_tvm/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
cmake_minimum_required(VERSION 3.14)
project(lidar_centerpoint_tvm)

find_package(autoware_cmake REQUIRED)
autoware_package()

set(tvm_runtime_DIR ${tvm_vendor_DIR})
find_package(tvm_runtime CONFIG REQUIRED)

# Gather neural network information.
set(${PROJECT_NAME}_BACKEND llvm CACHE STRING "${PROJECT_NAME} neural network backend")
set(MODEL_NAME_ENCODER centerpoint_encoder)

# Get neural network.
set(NN_DEPENDENCY_ENCODER "")
get_neural_network(${MODEL_NAME_ENCODER} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_ENCODER)

set(MODEL_NAME_BACKBONE centerpoint_backbone)

# Get neural network.
set(NN_DEPENDENCY_BACKBONE "")
get_neural_network(${MODEL_NAME_BACKBONE} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY_BACKBONE)

if((NOT NN_DEPENDENCY_ENCODER STREQUAL "") AND (NOT NN_DEPENDENCY_BACKBONE STREQUAL ""))
## centerpoint_tvm ##
ament_auto_add_library(${PROJECT_NAME} SHARED
data/models/${MODEL_NAME_ENCODER}/inference_engine_tvm_config.hpp
data/models/${MODEL_NAME_BACKBONE}/inference_engine_tvm_config.hpp
data/models/${MODEL_NAME_BACKBONE}/preprocessing_inference_engine_tvm_config.hpp
lib/centerpoint_tvm.cpp
lib/utils.cpp
lib/ros_utils.cpp
lib/preprocess/pointcloud_densification.cpp
lib/preprocess/voxel_generator.cpp
lib/preprocess/generate_features.cpp
lib/postprocess/circle_nms.cpp
lib/postprocess/generate_detected_boxes.cpp
)

add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_ENCODER})
add_dependencies(${PROJECT_NAME} ${NN_DEPENDENCY_BACKBONE})

target_compile_options(${PROJECT_NAME} PRIVATE "-Wno-sign-conversion" "-Wno-conversion")

target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC
"${tvm_vendor_INCLUDE_DIRS}"
)

target_link_libraries(${PROJECT_NAME}
${tvm_runtime_LIBRARIES}
)

target_include_directories(${PROJECT_NAME} PRIVATE
data/models
)

## node ##
ament_auto_add_library(lidar_centerpoint_tvm_component SHARED
src/node.cpp
)

target_link_libraries(lidar_centerpoint_tvm_component
${PROJECT_NAME}
)

rclcpp_components_register_node(lidar_centerpoint_tvm_component
PLUGIN "autoware::perception::lidar_centerpoint_tvm::LidarCenterPointTVMNode"
EXECUTABLE lidar_centerpoint_tvm_node
)

## single inference node ##
ament_auto_add_library(single_inference_lidar_centerpoint_tvm_component SHARED
src/single_inference_node.cpp
)

target_link_libraries(single_inference_lidar_centerpoint_tvm_component
${tvm_runtime_LIBRARIES}
)


rclcpp_components_register_node(single_inference_lidar_centerpoint_tvm_component
PLUGIN "autoware::perception::lidar_centerpoint_tvm::SingleInferenceLidarCenterPointNode"
EXECUTABLE single_inference_lidar_centerpoint_tvm_node
)

install(PROGRAMS
scripts/lidar_centerpoint_visualizer.py
DESTINATION lib/${PROJECT_NAME}
)

ament_export_dependencies(ament_cmake_python)

ament_auto_package(INSTALL_TO_SHARE
launch
config
)
else()
message(WARNING "Neural network not found, skipping package.")
ament_auto_package()
endif()
77 changes: 77 additions & 0 deletions perception/lidar_centerpoint_tvm/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# lidar_centerpoint_tvm

## Design

### Usage

lidar_centerpoint_tvm is a package for detecting dynamic 3D objects using TVM compiled centerpoint module for different backends. To use this package, replace `lidar_centerpoint` with `lidar_centerpoint_tvm` in perception launch files(for example, `lidar_based_detection.launch.xml` is lidar based detection is chosen.).

#### Neural network

This package will not build without a neural network for its inference.
The network is provided by the `tvm_utility` package.
See its design page for more information on how to enable downloading pre-compiled networks (by setting the `DOWNLOAD_ARTIFACTS` cmake variable), or how to handle user-compiled networks.

#### Backend

The backend used for the inference can be selected by setting the `lidar_centerpoint_tvm_BACKEND` cmake variable.
The current available options are `llvm` for a CPU backend, and `vulkan` or `opencl` for a GPU backend.
It defaults to `llvm`.

### Inputs / Outputs

### Input

| Name | Type | Description |
| -------------------- | ------------------------------- | ---------------- |
| `~/input/pointcloud` | `sensor_msgs::msg::PointCloud2` | input pointcloud |

### Output

| Name | Type | Description |
| -------------------------- | ------------------------------------------------ | -------------------- |
| `~/output/objects` | `autoware_perception_msgs::msg::DetectedObjects` | detected objects |
| `debug/cyclic_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | cyclic time (msg) |
| `debug/processing_time_ms` | `tier4_debug_msgs::msg::Float64Stamped` | processing time (ms) |

## Parameters

### Core Parameters

| Name | Type | Default Value | Description |
| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- |
| `score_threshold` | float | `0.1` | detected objects with score less than threshold are ignored |
| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
| `densification_num_past_frames` | int | `1` | the number of past frames to fuse with the current frame |

### Bounding Box

The lidar segmentation node establishes a bounding box for the detected obstacles.
The `L-fit` method of fitting a bounding box to a cluster is used for that.

### Limitation and Known Issue

Due to an accuracy issue of `centerpoint` model, `vulkan` cannot be used at the moment.
As for 'llvm' backend, real-time performance cannot be achieved.

### Scatter Implementation

Scatter function can be implemented using either TVMScript or C++. For C++ implementation, please refer to <https://github.com/angry-crab/autoware.universe/blob/c020419fe52e359287eccb1b77e93bdc1a681e24/perception/lidar_centerpoint_tvm/lib/network/scatter.cpp#L65>

## Reference

[1] Yin, Tianwei, Xingyi Zhou, and Philipp Krähenbühl. "Center-based 3d object detection and tracking." arXiv preprint arXiv:2006.11275 (2020).

[2] Lang, Alex H., et al. "PointPillars: Fast encoders for object detection from point clouds." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019.

[3] <https://github.com/tianweiy/CenterPoint>

[4] <https://github.com/Abraham423/CenterPoint>

[5] <https://github.com/open-mmlab/OpenPCDet>

## Related issues

<!-- Required -->

- #908: Run Lidar Centerpoint with TVM
10 changes: 10 additions & 0 deletions perception/lidar_centerpoint_tvm/config/centerpoint.param.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
/**:
ros__parameters:
class_names: ["CAR", "PEDESTRIAN", "BICYCLE"]
rename_car_to_truck_and_bus: true
point_feature_size: 4
max_voxel_size: 40000
point_cloud_range: [-89.6, -89.6, -3.0, 89.6, 89.6, 5.0]
voxel_size: [0.32, 0.32, 8.0]
downsample_factor: 1
encoder_in_feature_size: 9
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
// Copyright 2021 Arm Limited and Contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "tvm_utility/pipeline.hpp"

#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT
#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT

namespace model_zoo
{
namespace perception
{
namespace lidar_obstacle_detection
{
namespace centerpoint_backbone
{
namespace onnx_centerpoint_backbone
{

static const tvm_utility::pipeline::InferenceEngineTVMConfig config{
{3, 0, 0}, // modelzoo_version

"centerpoint_backbone", // network_name
"llvm", // network_backend

"./deploy_lib.so", // network_module_path
"./deploy_graph.json", // network_graph_path
"./deploy_param.params", // network_params_path

// cspell: ignore DLCPU
kDLCPU, // tvm_device_type
0, // tvm_device_id

{{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}}, // network_inputs

{{"heatmap", kDLFloat, 32, 1, {1, 3, 560, 560}},
{"reg", kDLFloat, 32, 1, {1, 2, 560, 560}},
{"height", kDLFloat, 32, 1, {1, 1, 560, 560}},
{"dim", kDLFloat, 32, 1, {1, 3, 560, 560}},
{"rot", kDLFloat, 32, 1, {1, 2, 560, 560}},
{"vel", kDLFloat, 32, 1, {1, 2, 560, 560}}} // network_outputs
};

} // namespace onnx_centerpoint_backbone
} // namespace centerpoint_backbone
} // namespace lidar_obstacle_detection
} // namespace perception
} // namespace model_zoo
// NOLINTNEXTLINE
#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__INFERENCE_ENGINE_TVM_CONFIG_HPP_
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
// Copyright 2021 Arm Limited and Contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "tvm_utility/pipeline.hpp"

#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT
#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT

namespace model_zoo
{
namespace perception
{
namespace lidar_obstacle_detection
{
namespace centerpoint_backbone
{
namespace onnx_centerpoint_backbone
{
namespace preprocessing
{

static const tvm_utility::pipeline::InferenceEngineTVMConfig config{
{3, 0, 0}, // modelzoo_version

"centerpoint_backbone", // network_name
"llvm", // network_backend

"./preprocess.so", // network_module_path
"./", // network_graph_path
"./", // network_params_path

// cspell: ignore DLCPU
kDLCPU, // tvm_device_type
0, // tvm_device_id

{{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}},
{"coords", kDLInt, 32, 1, {40000, 3}}}, // network_inputs

{{"spatial_features", kDLFloat, 32, 1, {1, 32, 560, 560}}} // network_outputs
};

} // namespace preprocessing
} // namespace onnx_centerpoint_backbone
} // namespace centerpoint_backbone
} // namespace lidar_obstacle_detection
} // namespace perception
} // namespace model_zoo
// NOLINTNEXTLINE
#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_BACKBONE__PREPROCESSING_INFERENCE_ENGINE_TVM_CONFIG_HPP_
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright 2021 Arm Limited and Contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "tvm_utility/pipeline.hpp"

#ifndef PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT
#define PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT

namespace model_zoo
{
namespace perception
{
namespace lidar_obstacle_detection
{
namespace centerpoint_encoder
{
namespace onnx_centerpoint_encoder
{

static const tvm_utility::pipeline::InferenceEngineTVMConfig config{
{3, 0, 0}, // modelzoo_version

"centerpoint_encoder", // network_name
"llvm", // network_backend

"./deploy_lib.so", // network_module_path
"./deploy_graph.json", // network_graph_path
"./deploy_param.params", // network_params_path

// cspell: ignore DLCPU
kDLCPU, // tvm_device_type
0, // tvm_device_id

{{"input_features", kDLFloat, 32, 1, {40000, 32, 9}}}, // network_inputs

{{"pillar_features", kDLFloat, 32, 1, {40000, 1, 32}}} // network_outputs
};

} // namespace onnx_centerpoint_encoder
} // namespace centerpoint_encoder
} // namespace lidar_obstacle_detection
} // namespace perception
} // namespace model_zoo
// NOLINTNEXTLINE
#endif // PERCEPTION__LIDAR_CENTERPOINT_TVM__DATA__MODELS__CENTERPOINT_ENCODER__INFERENCE_ENGINE_TVM_CONFIG_HPP_
Loading

0 comments on commit 981afbd

Please sign in to comment.