Skip to content

Commit

Permalink
Merge pull request #706 from kopylovanat/atomic-ref-std
Browse files Browse the repository at this point in the history
Add tests sycl::atomic_ref atomicity with respect to host code
  • Loading branch information
bader authored Jun 22, 2023
2 parents 863dcdf + f6ad2e2 commit 9d4f9f9
Show file tree
Hide file tree
Showing 3 changed files with 122 additions and 0 deletions.
47 changes: 47 additions & 0 deletions tests/atomic_ref_stress/atomic_ref_stress_atomic64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,4 +142,51 @@ DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL)
atomic_ref_stress_test::run_ordering<double>{}("double");
});

DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL)
("sycl::atomic_ref atomicity with respect to atomic operations in host code. "
"long long type",
"[atomic_ref_stress]")({
#ifdef __cpp_lib_atomic_ref
auto queue = once_per_unit::get_queue();
if (!queue.get_device().has(sycl::aspect::atomic64))
SKIP(
"Device does not support atomic64 operations. "
"Skipping the test case.");
if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations))
SKIP(
"Device does not support usm_atomic_shared_allocations. "
"Skipping the test case.");

atomic_ref_stress_test::run_atomicity_with_host_code<long long>{}(
"long long");
#else
SKIP("std::atomic_ref is not available");
#endif
});

DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL)
("sycl::atomic_ref atomicity with respect to atomic operations in host code. "
"double type",
"[atomic_ref_stress]")({
#ifdef __cpp_lib_atomic_ref
auto queue = once_per_unit::get_queue();
if (!queue.get_device().has(sycl::aspect::atomic64))
SKIP(
"Device does not support atomic64 operations. "
"Skipping the test case.");
if (!queue.get_device().has(sycl::aspect::fp64))
SKIP(
"Device does not support fp64 operations. "
"Skipping the test case.");
if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations))
SKIP(
"Device does not support usm_atomic_shared_allocations. "
"Skipping the test case.");

atomic_ref_stress_test::run_atomicity_with_host_code<double>{}("double");
#else
SKIP("std::atomic_ref is not available");
#endif
});

} // namespace atomic_ref_stress_test_atomic64
57 changes: 57 additions & 0 deletions tests/atomic_ref_stress/atomic_ref_stress_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,48 @@ class ordering {
CHECK(std::all_of(res.cbegin(), res.cend(), [=](T i) { return i; }));
}
};
#ifdef __cpp_lib_atomic_ref
template <typename T, typename MemoryOrderT, typename AddressSpaceT>
class atomicity_with_host_code {
static constexpr sycl::memory_order MemoryOrder = MemoryOrderT::value;
static constexpr sycl::access::address_space AddressSpace =
AddressSpaceT::value;

public:
void operator()(const std::string& type_name,
const std::string& memory_order_name,
const std::string& memory_scope_name,
const std::string& address_space_name) {
INFO(atomic_ref::tests::common::get_section_name(
type_name, memory_order_name, memory_scope_name, address_space_name,
"atomicity_with_host_code"));
auto queue = once_per_unit::get_queue();
if (!atomic_ref::tests::common::memory_order_is_supported(queue,
MemoryOrder))
return;
if (!atomic_ref::tests::common::memory_scope_is_supported(
queue, sycl::memory_scope::system))
return;
const size_t size =
queue.get_device().get_info<sycl::info::device::max_work_group_size>();
const size_t count = size * 2;
T* pval = sycl::malloc_shared<T>(1, queue);
*pval = 0;
std::atomic_ref<T> a_host{*pval};
auto event = queue.submit([&](sycl::handler& cgh) {
cgh.parallel_for({size}, [=](auto i) {
sycl::atomic_ref<T, MemoryOrder, sycl::memory_scope::system,
AddressSpace>
a_dev{*pval};
a_dev++;
});
});
for (int i = 0; i < count; i++) a_host++;
event.wait();
CHECK(*pval == size + count);
}
};
#endif
template <typename T>
struct run_atomicity_device_scope {
void operator()(const std::string &type_name) {
Expand Down Expand Up @@ -322,6 +363,22 @@ struct run_ordering {
address_spaces, type_name);
}
};
#ifdef __cpp_lib_atomic_ref
template <typename T>
struct run_atomicity_with_host_code {
void operator()(const std::string& type_name) {
const auto memory_orders =
value_pack<sycl::memory_order, sycl::memory_order::relaxed,
sycl::memory_order::acq_rel,
sycl::memory_order::seq_cst>::generate_named();
const auto address_spaces = value_pack<
sycl::access::address_space, sycl::access::address_space::global_space,
sycl::access::address_space::generic_space>::generate_named();

for_all_combinations<atomicity_with_host_code, T>(
memory_orders, address_spaces, type_name);
}
};
#endif
} // namespace atomic_ref_stress_test
#endif // SYCL_CTS_ATOMIC_REF_STRESS_TEST_H
18 changes: 18 additions & 0 deletions tests/atomic_ref_stress/atomic_ref_stress_core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,4 +60,22 @@ DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL)
for_all_types<atomic_ref_stress_test::run_ordering>(type_pack);
});

DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL)
("sycl::atomic_ref atomicity with respect to atomic operations in host code. "
"core types",
"[atomic_ref_stress]")({
#ifdef __cpp_lib_atomic_ref
auto queue = once_per_unit::get_queue();
if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations))
SKIP(
"Device does not support usm_atomic_shared_allocations. "
"Skipping the test case.");
const auto type_pack = named_type_pack<int, float>::generate("int", "float");
for_all_types<atomic_ref_stress_test::run_atomicity_with_host_code>(
type_pack);
#else
SKIP("std::atomic_ref is not available");
#endif
});

} // namespace atomic_ref_stress_test_core

0 comments on commit 9d4f9f9

Please sign in to comment.