Skip to content

Commit

Permalink
Final cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
tomdol committed Jun 25, 2023
1 parent 13c7c6b commit af7c540
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 34 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -93,23 +93,6 @@ T reduction_neutral_value(const Reduction reduction_type) {
}
}

template <>
char reduction_neutral_value<char>(const Reduction reduction_type) {
switch (reduction_type) {
case Reduction::SUM:
case Reduction::MAX:
return 0;
case Reduction::PROD:
case Reduction::MIN:
return 1;
default:
OPENVINO_ASSERT(false, "Neutral value not available for this type of reduction");
return 0;
}
}

// todo: ^specialize for bool? min and max wont work like this

template <typename T>
std::function<T(const T, const T)> reduction_functor_for(const Reduction reduction_type) {
switch (reduction_type) {
Expand Down Expand Up @@ -138,12 +121,10 @@ std::function<char(const char, const char)> reduction_functor_for<char>(const Re
case Reduction::MAX:
return [](const char a, const char b) {
return a > b ? a : b;
// return a || b;
};
case Reduction::MIN:
return [](const char a, const char b) {
return a < b ? a : b;
// return a && b;
};
case Reduction::PROD:
return [](const char a, const char b) {
Expand All @@ -160,24 +141,21 @@ std::function<char(const char, const char)> reduction_functor_for<char>(const Re
}

template <typename T>
T arithmetic_mean_int(const T accumulator, const int32_t N) {
typename std::enable_if<std::is_floating_point<T>::value || std::is_class<T>::value, T>::type arithmetic_mean(
const T accumulator,
const int32_t N) {
return accumulator / N;
}

template <typename T>
typename std::enable_if<std::is_integral<T>::value, T>::type arithmetic_mean(const T accumulator, const int32_t N) {
const auto old_mode = std::fegetround();
std::fesetround(FE_DOWNWARD);
const T value = static_cast<T>(std::nearbyint(static_cast<double>(accumulator) / N));
std::fesetround(old_mode);
return value;
}

template <typename T>
T arithmetic_mean(const T accumulator, const int32_t N) {
const auto et = element::from<T>();
if (et.is_integral_number()) {
return arithmetic_mean_int(accumulator, N);
} else {
return accumulator / N;
}
}

template <typename DataType, typename IndicesType>
void scatter_elem_update_with_reduction(const DataType* input_data,
const IndicesType* indices,
Expand Down
120 changes: 116 additions & 4 deletions src/core/tests/eval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1654,8 +1654,8 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_sum_exclusive) {
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 4, 4, 0}),
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 4, 4, 0}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 1, 0, 1, 1, 1}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
Expand Down Expand Up @@ -1710,8 +1710,8 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod_exclusive) {
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 4, 4, 0}),
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 4, 4, 0}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 0, 1, 1, 1, 1}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
Expand All @@ -1721,6 +1721,118 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod_exclusive) {
ASSERT_EQ(cval, out);
}

TEST(eval, evaluate_static_scatter_elements_update_boolean_min) {
const Shape data_shape{6};
const Shape indices_shape{8};
auto arg1 = make_shared<op::Parameter>(element::boolean, data_shape);
auto arg2 = make_shared<op::Parameter>(element::i32, indices_shape);
auto arg3 = make_shared<op::Parameter>(element::boolean, indices_shape);
auto arg4 = make_shared<op::Parameter>(element::i64, Shape{});
auto scatter_elements_update =
make_shared<ov::op::v12::ScatterElementsUpdate>(arg1,
arg2,
arg3,
arg4,
ov::op::v12::ScatterElementsUpdate::Reduction::MIN,
true);
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 0, 0, 1, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
EXPECT_EQ(result_tensor->get_shape(), data_shape);
const auto cval = read_vector<char>(result_tensor);
const vector<char> out{0, 0, 0, 1, 0, 0};
ASSERT_EQ(cval, out);
}

TEST(eval, evaluate_static_scatter_elements_update_boolean_min_exclusive) {
const Shape data_shape{6};
const Shape indices_shape{8};
auto arg1 = make_shared<op::Parameter>(element::boolean, data_shape);
auto arg2 = make_shared<op::Parameter>(element::i32, indices_shape);
auto arg3 = make_shared<op::Parameter>(element::boolean, indices_shape);
auto arg4 = make_shared<op::Parameter>(element::i64, Shape{});
auto scatter_elements_update =
make_shared<ov::op::v12::ScatterElementsUpdate>(arg1,
arg2,
arg3,
arg4,
ov::op::v12::ScatterElementsUpdate::Reduction::MIN,
false);
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 0, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 0, 1, 1, 0, 1, 1, 1}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
EXPECT_EQ(result_tensor->get_shape(), data_shape);
const auto cval = read_vector<char>(result_tensor);
const vector<char> out{0, 0, 1, 1, 0, 1};
ASSERT_EQ(cval, out);
}

TEST(eval, evaluate_static_scatter_elements_update_boolean_max) {
const Shape data_shape{6};
const Shape indices_shape{8};
auto arg1 = make_shared<op::Parameter>(element::boolean, data_shape);
auto arg2 = make_shared<op::Parameter>(element::i32, indices_shape);
auto arg3 = make_shared<op::Parameter>(element::boolean, indices_shape);
auto arg4 = make_shared<op::Parameter>(element::i64, Shape{});
auto scatter_elements_update =
make_shared<ov::op::v12::ScatterElementsUpdate>(arg1,
arg2,
arg3,
arg4,
ov::op::v12::ScatterElementsUpdate::Reduction::MAX,
true);
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 0, 1, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 1, 0, 1, 0, 1, 0, 0}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
EXPECT_EQ(result_tensor->get_shape(), data_shape);
const auto cval = read_vector<char>(result_tensor);
const vector<char> out{1, 1, 0, 1, 1, 0};
ASSERT_EQ(cval, out);
}

TEST(eval, evaluate_static_scatter_elements_update_boolean_max_exclusive) {
const Shape data_shape{6};
const Shape indices_shape{8};
auto arg1 = make_shared<op::Parameter>(element::boolean, data_shape);
auto arg2 = make_shared<op::Parameter>(element::i32, indices_shape);
auto arg3 = make_shared<op::Parameter>(element::boolean, indices_shape);
auto arg4 = make_shared<op::Parameter>(element::i64, Shape{});
auto scatter_elements_update =
make_shared<ov::op::v12::ScatterElementsUpdate>(arg1,
arg2,
arg3,
arg4,
ov::op::v12::ScatterElementsUpdate::Reduction::MAX,
false);
auto fun = make_shared<Function>(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4});
auto result_tensor = make_shared<HostTensor>();
ASSERT_TRUE(fun->evaluate({result_tensor},
{make_host_tensor<element::Type_t::boolean>(data_shape, {1, 0, 1, 0, 1, 0}),
make_host_tensor<element::Type_t::i32>(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}),
make_host_tensor<element::Type_t::boolean>(indices_shape, {0, 1, 1, 0, 0, 1, 0, 0}),
make_host_tensor<element::Type_t::i64>({}, {0})}));
EXPECT_EQ(result_tensor->get_element_type(), element::boolean);
EXPECT_EQ(result_tensor->get_shape(), data_shape);
const auto cval = read_vector<char>(result_tensor);
const vector<char> out{0, 1, 1, 0, 1, 0};
ASSERT_EQ(cval, out);
}

TEST(eval, topk_v1) {
Shape shape{2, 3, 2};
Shape rshape{2, 2, 2};
Expand Down

0 comments on commit af7c540

Please sign in to comment.