Skip to content

Commit

Permalink
[onert/train] Support averagePool2D operation for training (#14149)
Browse files Browse the repository at this point in the history
This commit enables averagePool2D for training.

ONE-DCO-1.0-Signed-off-by: JuYoung Lee rsb98759@gmail.com
Co-authored-by: Jang Jiseob <ragmani0216@gmail.com>
  • Loading branch information
icodo98 and ragmani authored Oct 7, 2024
1 parent cb14d8c commit c82f8cb
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 1 deletion.
4 changes: 4 additions & 0 deletions runtime/onert/backend/train/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ ops::PoolType convertPoolType(ir::operation::Pool2D::PoolType type_ir)
// TODO Implement AVG PoolType
case ir::operation::Pool2D::PoolType::MAX:
return ops::PoolType::kMax;
case ir::operation::Pool2D::PoolType::AVG:
return ops::PoolType::kAvg;
default:
throw std::runtime_error("train KernelGenerator : Not supported operation yet");
}
Expand Down Expand Up @@ -496,6 +498,8 @@ void KernelGenerator::visit(const ir::train::operation::Pool2D &node)
{
case train::ops::PoolType::kMax:
return cpu::ops::PoolType::kMax;
case train::ops::PoolType::kAvg:
return cpu::ops::PoolType::kAvg;
default:
throw std::runtime_error("PoolLayer: Unsupported pool type yet");
}
Expand Down
78 changes: 78 additions & 0 deletions runtime/onert/backend/train/ops/PoolLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include "../Tensor.h"

#include <cker/Utils.h>
#include <cker/operation/AveragePool.h>
#include <cker/train/operation/AveragePool.h>
#include <cker/train/operation/MaxPool.h>
#include <cker/train/operation/ReLU.h>

Expand Down Expand Up @@ -108,6 +110,77 @@ class MaxPool2D final : public TrainingKernelRegistry
}
};

class AveragePool2D final : public TrainingKernelRegistry
{
private:
const ir::Activation _activation;
const IPortableTensor *_output;
nnfw::cker::PoolParams _op_params;

std::unique_ptr<Tensor> _act_back_prop_output;
std::unique_ptr<Tensor> _arg_avg_index;

public:
AveragePool2D(const uint32_t paddingLeft, const uint32_t, const uint32_t paddingTop,
const uint32_t, const uint32_t strideWidth, const uint32_t strideHeight,
const uint32_t kernelWidth, const uint32_t kernelHeight,
const ir::Activation activation, const IPortableTensor *output)
: _activation(activation), _output(output)
{
{
_op_params.stride_height = strideHeight;
_op_params.stride_width = strideWidth;
_op_params.filter_height = kernelHeight;
_op_params.filter_width = kernelWidth;
assert(paddingTop < (1 << 8));
assert(paddingLeft < (1 << 8));
_op_params.padding_values.height = static_cast<uint8_t>(paddingTop);
_op_params.padding_values.width = static_cast<uint8_t>(paddingLeft);
CalculateActivationRange<float>(activation, &_op_params.float_activation_min,
&_op_params.float_activation_max);
}

if (activation != ir::Activation::NONE)
{
_act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
_act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
}
};

~AveragePool2D() {}

public:
void forward(const IPortableTensor *in, IPortableTensor *out)
{
auto out_shape = getShape(out);
auto out_data = getBuffer<float>(out);

// avgpool forward
nnfw::cker::AveragePool<float>(_op_params, getShape(in), getBuffer<float>(in), out_shape,
out_data);
}

void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
{
// activation backward
try
{
back_prop_out =
backpropActivation(_activation, _output, back_prop_out, _act_back_prop_output.get());
}
catch (const std::exception &e)
{
throw std::runtime_error{"train PoolLayer: " + std::string(e.what())};
}
assert(back_prop_out != nullptr);

// averagepool baackward
nnfw::cker::train::AveragePool2DGrad(_op_params, getShape(back_prop_out),
getBuffer<float>(back_prop_out), getShape(back_prop_in),
getBuffer<float>(back_prop_in));
}
};

} // namespace

PoolLayer::PoolLayer()
Expand Down Expand Up @@ -140,6 +213,11 @@ void PoolLayer::configureBackward(const uint32_t paddingLeft, const uint32_t pad
strideWidth, strideHeight, kernelWidth, kernelHeight,
activation, output);
break;
case PoolType::kAvg:
_kernel = std::make_unique<AveragePool2D>(paddingLeft, paddingRight, paddingTop,
paddingBottom, strideWidth, strideHeight,
kernelWidth, kernelHeight, activation, output);
break;
default:
throw std::runtime_error("PoolLayer: Unsupported pool type");
}
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/backend/train/ops/PoolLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class TrainingKernelRegistry
enum class PoolType
{
kMax,
kAvg,
};

class PoolLayer : public ::onert::exec::train::ITrainableFunction, public cpu::ops::PoolLayer
Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/core/src/ir/train/UseDefGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,8 @@ void UseDefGenerator::visit(const train::operation::Pad &node)

void UseDefGenerator::visit(const train::operation::Pool2D &node)
{
if (node.param().op_type != ir::operation::Pool2D::PoolType::MAX)
if (node.param().op_type != ir::operation::Pool2D::PoolType::MAX &&
node.param().op_type != ir::operation::Pool2D::PoolType::AVG)
{
throw std::runtime_error{"UseDefGenerator: Not yet supported pool type"};
}
Expand Down

0 comments on commit c82f8cb

Please sign in to comment.