diff --git a/tests/atomic_ref_stress/atomic_ref_stress_atomic64.cpp b/tests/atomic_ref_stress/atomic_ref_stress_atomic64.cpp index 873304bc7..08ef358bc 100644 --- a/tests/atomic_ref_stress/atomic_ref_stress_atomic64.cpp +++ b/tests/atomic_ref_stress/atomic_ref_stress_atomic64.cpp @@ -142,4 +142,51 @@ DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL) atomic_ref_stress_test::run_ordering{}("double"); }); +DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL) +("sycl::atomic_ref atomicity with respect to atomic operations in host code. " + "long long type", + "[atomic_ref_stress]")({ +#ifdef __cpp_lib_atomic_ref + auto queue = once_per_unit::get_queue(); + if (!queue.get_device().has(sycl::aspect::atomic64)) + SKIP( + "Device does not support atomic64 operations. " + "Skipping the test case."); + if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations)) + SKIP( + "Device does not support usm_atomic_shared_allocations. " + "Skipping the test case."); + + atomic_ref_stress_test::run_atomicity_with_host_code{}( + "long long"); +#else + SKIP("std::atomic_ref is not available"); +#endif +}); + +DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL) +("sycl::atomic_ref atomicity with respect to atomic operations in host code. " + "double type", + "[atomic_ref_stress]")({ +#ifdef __cpp_lib_atomic_ref + auto queue = once_per_unit::get_queue(); + if (!queue.get_device().has(sycl::aspect::atomic64)) + SKIP( + "Device does not support atomic64 operations. " + "Skipping the test case."); + if (!queue.get_device().has(sycl::aspect::fp64)) + SKIP( + "Device does not support fp64 operations. " + "Skipping the test case."); + if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations)) + SKIP( + "Device does not support usm_atomic_shared_allocations. " + "Skipping the test case."); + + atomic_ref_stress_test::run_atomicity_with_host_code{}("double"); +#else + SKIP("std::atomic_ref is not available"); +#endif +}); + } // namespace atomic_ref_stress_test_atomic64 diff --git a/tests/atomic_ref_stress/atomic_ref_stress_common.h b/tests/atomic_ref_stress/atomic_ref_stress_common.h index dc7e0d5eb..f927d02ae 100644 --- a/tests/atomic_ref_stress/atomic_ref_stress_common.h +++ b/tests/atomic_ref_stress/atomic_ref_stress_common.h @@ -245,7 +245,48 @@ class ordering { CHECK(std::all_of(res.cbegin(), res.cend(), [=](T i) { return i; })); } }; +#ifdef __cpp_lib_atomic_ref +template +class atomicity_with_host_code { + static constexpr sycl::memory_order MemoryOrder = MemoryOrderT::value; + static constexpr sycl::access::address_space AddressSpace = + AddressSpaceT::value; + public: + void operator()(const std::string& type_name, + const std::string& memory_order_name, + const std::string& memory_scope_name, + const std::string& address_space_name) { + INFO(atomic_ref::tests::common::get_section_name( + type_name, memory_order_name, memory_scope_name, address_space_name, + "atomicity_with_host_code")); + auto queue = once_per_unit::get_queue(); + if (!atomic_ref::tests::common::memory_order_is_supported(queue, + MemoryOrder)) + return; + if (!atomic_ref::tests::common::memory_scope_is_supported( + queue, sycl::memory_scope::system)) + return; + const size_t size = + queue.get_device().get_info(); + const size_t count = size * 2; + T* pval = sycl::malloc_shared(1, queue); + *pval = 0; + std::atomic_ref a_host{*pval}; + auto event = queue.submit([&](sycl::handler& cgh) { + cgh.parallel_for({size}, [=](auto i) { + sycl::atomic_ref + a_dev{*pval}; + a_dev++; + }); + }); + for (int i = 0; i < count; i++) a_host++; + event.wait(); + CHECK(*pval == size + count); + } +}; +#endif template struct run_atomicity_device_scope { void operator()(const std::string &type_name) { @@ -322,6 +363,22 @@ struct run_ordering { address_spaces, type_name); } }; +#ifdef __cpp_lib_atomic_ref +template +struct run_atomicity_with_host_code { + void operator()(const std::string& type_name) { + const auto memory_orders = + value_pack::generate_named(); + const auto address_spaces = value_pack< + sycl::access::address_space, sycl::access::address_space::global_space, + sycl::access::address_space::generic_space>::generate_named(); + for_all_combinations( + memory_orders, address_spaces, type_name); + } +}; +#endif } // namespace atomic_ref_stress_test #endif // SYCL_CTS_ATOMIC_REF_STRESS_TEST_H diff --git a/tests/atomic_ref_stress/atomic_ref_stress_core.cpp b/tests/atomic_ref_stress/atomic_ref_stress_core.cpp index 301b83cba..b04868af7 100644 --- a/tests/atomic_ref_stress/atomic_ref_stress_core.cpp +++ b/tests/atomic_ref_stress/atomic_ref_stress_core.cpp @@ -60,4 +60,22 @@ DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL) for_all_types(type_pack); }); +DISABLED_FOR_TEST_CASE(ComputeCpp, hipSYCL) +("sycl::atomic_ref atomicity with respect to atomic operations in host code. " + "core types", + "[atomic_ref_stress]")({ +#ifdef __cpp_lib_atomic_ref + auto queue = once_per_unit::get_queue(); + if (!queue.get_device().has(sycl::aspect::usm_atomic_shared_allocations)) + SKIP( + "Device does not support usm_atomic_shared_allocations. " + "Skipping the test case."); + const auto type_pack = named_type_pack::generate("int", "float"); + for_all_types( + type_pack); +#else + SKIP("std::atomic_ref is not available"); +#endif +}); + } // namespace atomic_ref_stress_test_core