You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I am using C++ with TensorRT 10 and CUDA 11.6, and I have created a custom plugin. However, when using it, I found that the parameter void* workspace in the function FpsamplePlugin::enqueue is null. Why is this happening?
At the same time, I ran the TensorRT sample code sample_non_zero_plugin, and in that case, the void* workspace parameter is fine. So, I think the issue might be with the definition of my plugin. However, I've searched for a long time and couldn't resolve it.
class FpsamplePlugin : public nvinfer1::IPluginV3,
public nvinfer1::IPluginV3OneCore,
public nvinfer1::IPluginV3OneBuild,
public nvinfer1::IPluginV3OneRuntime {
You should assign size_t getWorkspaceSize(int32_t) const noexcept override;
OK, Thanks.
if use
class FpsamplePlugin : public nvinfer1::IPluginV3,
public nvinfer1::IPluginV3OneCore,
public nvinfer1::IPluginV3OneBuild,
public nvinfer1::IPluginV3OneRuntime
Description
I am using C++ with TensorRT 10 and CUDA 11.6, and I have created a custom plugin. However, when using it, I found that the parameter void* workspace in the function FpsamplePlugin::enqueue is null. Why is this happening?
At the same time, I ran the TensorRT sample code sample_non_zero_plugin, and in that case, the void* workspace parameter is fine. So, I think the issue might be with the definition of my plugin. However, I've searched for a long time and couldn't resolve it.
########## headder file:
#pragma once
#include
#include
#include <NvInferPlugin.h>
#include <cuda_runtime_api.h>
using namespace nvinfer1;
namespace nvinfer1 {
class FpsamplePlugin : public nvinfer1::IPluginV3,
public nvinfer1::IPluginV3OneCore,
public nvinfer1::IPluginV3OneBuild,
public nvinfer1::IPluginV3OneRuntime {
public:
FpsamplePlugin( int32_t nsample );
private:
int32_t m_nsample;
PluginFieldCollection m_pfc;
std::vector m_pluginAttributes;
};
class FpsamplePluginCreator : public nvinfer1::IPluginCreatorV3One {
public:
FpsamplePluginCreator();
private:
PluginFieldCollection m_pfc;
std::vector m_pluginAttributes;
};
} // namespace nvinfer1
########## cpp file:
#include "FpsamplePlugin.h"
#include
#include
#include "../cuda_impl/cuda_impl.h"
namespace {
char const* const FPSAMPLE_PLUGIN_VERSION{ "1" };
char const* const FPSAMPLE_PLUGIN_NAME{ "KDTreeFpsample" };
char const* const FPSAMPLE_PLUGIN_NAMESPACE{ "" };
} // namespace
nvinfer1::FpsamplePlugin::FpsamplePlugin( int32_t nsample )
: m_nsample( nsample ) {
m_pluginAttributes.clear();
};
IPluginCapability* FpsamplePlugin::getCapabilityInterface( PluginCapabilityType type ) noexcept {
try {
if ( type == PluginCapabilityType::kBUILD ) {
return static_cast<IPluginV3OneBuild*>( this );
}
if ( type == PluginCapabilityType::kRUNTIME ) {
return static_cast<IPluginV3OneRuntime*>( this );
}
assert( type == PluginCapabilityType::kCORE );
return static_cast<IPluginV3OneCore*>( this );
}
catch ( ... ) {
// log error
}
return nullptr;
}
IPluginV3* FpsamplePlugin::clone() noexcept {
return new FpsamplePlugin( m_nsample );
}
AsciiChar const* FpsamplePlugin::getPluginName() const noexcept {
return FPSAMPLE_PLUGIN_NAME;
}
AsciiChar const* FpsamplePlugin::getPluginVersion() const noexcept {
return FPSAMPLE_PLUGIN_VERSION;
}
AsciiChar const* FpsamplePlugin::getPluginNamespace() const noexcept {
return FPSAMPLE_PLUGIN_NAMESPACE;
}
int32_t FpsamplePlugin::configurePlugin( DynamicPluginTensorDesc const* in, int32_t nbInputs,
DynamicPluginTensorDesc const* out, int32_t nbOutputs ) noexcept {
return 0;
}
int32_t FpsamplePlugin::getOutputDataTypes( DataType* outputTypes, int32_t nbOutputs, const DataType* inputTypes,
int32_t nbInputs ) const noexcept {
// 确保输入输出数量符合预期
assert( nbInputs == m_in_n );
assert( nbOutputs == m_out_n );
}
int32_t FpsamplePlugin::getOutputShapes( const DimsExprs* inputs, int32_t nbInputs, const DimsExprs* shapeInputs,
int32_t nbShapeInputs, DimsExprs* outputs, int32_t nbOutputs,
IExprBuilder& exprBuilder ) noexcept {
// 确保输入输出数量符合预期
assert( nbInputs == m_in_n );
assert( nbOutputs == m_out_n );
}
bool FpsamplePlugin::supportsFormatCombination( int32_t pos, const DynamicPluginTensorDesc* inOut, int32_t nbInputs,
int32_t nbOutputs ) noexcept {
// 确保 pos 在合法范围内
assert( pos < ( nbInputs + nbOutputs ) );
}
int32_t FpsamplePlugin::getNbOutputs() const noexcept {
return m_out_n;
}
int32_t FpsamplePlugin::onShapeChange( PluginTensorDesc const* in, int32_t nbInputs, PluginTensorDesc const* out,
int32_t nbOutputs ) noexcept {
return 0;
}
int32_t FpsamplePlugin::enqueue( PluginTensorDesc const* inputDesc, PluginTensorDesc const* outputDesc,
void const* const* inputs, void* const* outputs, void* workspace,
cudaStream_t stream ) noexcept {
}
IPluginV3* FpsamplePlugin::attachToContext( IPluginResourceContext* context ) noexcept {
return clone();
}
PluginFieldCollection const* FpsamplePlugin::getFieldsToSerialize() noexcept {
return &m_pfc;
}
/// /////////////////////////////////////////////////////
/// FpsamplePluginCreator
FpsamplePluginCreator::FpsamplePluginCreator() {
m_pluginAttributes.clear();
}
IPluginV3* FpsamplePluginCreator::createPlugin( AsciiChar const* name, PluginFieldCollection const* fc,
TensorRTPhase phase ) noexcept {
assert( fc->nbFields == 1 );
assert( fc->fields[ 0 ].type == PluginFieldType::kINT32 );
fc->fields[ 0 ].name;
}
PluginFieldCollection const* FpsamplePluginCreator::getFieldNames() noexcept {
return &m_pfc;
}
AsciiChar const* FpsamplePluginCreator::getPluginName() const noexcept {
return FPSAMPLE_PLUGIN_NAME;
}
AsciiChar const* FpsamplePluginCreator::getPluginVersion() const noexcept {
return FPSAMPLE_PLUGIN_VERSION;
}
AsciiChar const* FpsamplePluginCreator::getPluginNamespace() const noexcept {
return FPSAMPLE_PLUGIN_NAMESPACE;
}
Environment
TensorRT Version:
TensorRT-10.6.0.26
NVIDIA GPU:
NVIDIA GeForce RTX 4050 Laptop GPU
CUDA Version:
cuda 11.6
Operating System:
win11
The text was updated successfully, but these errors were encountered: