diff --git a/source/common/unified_malloc_framework/include/umf/memory_pool.h b/source/common/unified_malloc_framework/include/umf/memory_pool.h index 8c3c2241b5..8fb9e86d31 100644 --- a/source/common/unified_malloc_framework/include/umf/memory_pool.h +++ b/source/common/unified_malloc_framework/include/umf/memory_pool.h @@ -129,7 +129,8 @@ enum umf_result_t umfFree(void *ptr); enum umf_result_t umfPoolGetLastAllocationError(umf_memory_pool_handle_t hPool); /// -/// \brief Retrieve memory pool associated with a given ptr. +/// \brief Retrieve memory pool associated with a given ptr. Only memory allocated +/// with the usage of a memory provider is being tracked. /// \param ptr pointer to memory belonging to a memory pool /// \return Handle to a memory pool that contains ptr or NULL if pointer does not belong to any UMF pool. umf_memory_pool_handle_t umfPoolByPtr(const void *ptr); diff --git a/test/unified_malloc_framework/common/pool.hpp b/test/unified_malloc_framework/common/pool.hpp index 8eb35ad45c..e6e74afb22 100644 --- a/test/unified_malloc_framework/common/pool.hpp +++ b/test/unified_malloc_framework/common/pool.hpp @@ -31,6 +31,46 @@ auto wrapPoolUnique(umf_memory_pool_handle_t hPool) { return umf::pool_unique_handle_t(hPool, &umfPoolDestroy); } +bool isReallocSupported(umf_memory_pool_handle_t hPool) { + static constexpr size_t allocSize = 1; + bool supported; + auto *ptr = umfPoolMalloc(hPool, allocSize); + auto *new_ptr = umfPoolRealloc(hPool, ptr, allocSize * 2); + + if (new_ptr) { + supported = true; + } else if (umfPoolGetLastAllocationError(hPool) == + UMF_RESULT_ERROR_NOT_SUPPORTED) { + supported = false; + } else { + throw std::runtime_error("realloc failed with unexpected error"); + } + + umfPoolFree(hPool, new_ptr); + + return supported; +} + +bool isCallocSupported(umf_memory_pool_handle_t hPool) { + static constexpr size_t num = 1; + static constexpr size_t size = sizeof(int); + bool supported; + auto *ptr = umfPoolCalloc(hPool, num, size); + + if (ptr) { + supported = true; + } else if (umfPoolGetLastAllocationError(hPool) == + UMF_RESULT_ERROR_NOT_SUPPORTED) { + supported = false; + } else { + throw std::runtime_error("calloc failed with unexpected error"); + } + + umfPoolFree(hPool, ptr); + + return supported; +} + struct pool_base { umf_result_t initialize(umf_memory_provider_handle_t *, size_t) noexcept { return UMF_RESULT_SUCCESS; @@ -97,6 +137,8 @@ struct proxy_pool : public pool_base { } void *realloc(void *ptr, size_t size) noexcept { // TODO: not supported + umf::getPoolLastStatusRef() = + UMF_RESULT_ERROR_NOT_SUPPORTED; return nullptr; } void *aligned_malloc(size_t size, size_t alignment) noexcept { @@ -116,6 +158,9 @@ struct proxy_pool : public pool_base { EXPECT_EQ_NOEXCEPT(ret, UMF_RESULT_SUCCESS); return ret; } + enum umf_result_t get_last_allocation_error() { + return umf::getPoolLastStatusRef(); + } umf_memory_provider_handle_t provider; }; diff --git a/test/unified_malloc_framework/memoryPool.hpp b/test/unified_malloc_framework/memoryPool.hpp index ab9cc3661c..facaa7d73d 100644 --- a/test/unified_malloc_framework/memoryPool.hpp +++ b/test/unified_malloc_framework/memoryPool.hpp @@ -5,9 +5,12 @@ #include "pool.hpp" +#include #include #include #include +#include +#include #ifndef UMF_TEST_MEMORY_POOL_OPS_HPP #define UMF_TEST_MEMORY_POOL_OPS_HPP @@ -30,6 +33,7 @@ struct umfPoolTest : umf_test::test, } umf::pool_unique_handle_t pool; + static constexpr int NTHREADS = 5; }; struct umfMultiPoolTest : umfPoolTest { @@ -57,21 +61,43 @@ TEST_P(umfPoolTest, allocFree) { umfPoolFree(pool.get(), ptr); } -TEST_P(umfPoolTest, pow2AlignedAlloc) { -#ifdef _WIN32 - // TODO: implement support for windows - GTEST_SKIP(); -#endif +TEST_P(umfPoolTest, reallocFree) { + if (!umf_test::isReallocSupported(pool.get())) { + GTEST_SKIP(); + } + static constexpr size_t allocSize = 64; + static constexpr size_t multiplier = 3; + auto *ptr = umfPoolMalloc(pool.get(), allocSize); + ASSERT_NE(ptr, nullptr); + auto *new_ptr = umfPoolRealloc(pool.get(), ptr, allocSize * multiplier); + ASSERT_NE(new_ptr, nullptr); + std::memset(new_ptr, 0, allocSize * multiplier); + umfPoolFree(pool.get(), new_ptr); +} +TEST_P(umfPoolTest, callocFree) { + if (!umf_test::isCallocSupported(pool.get())) { + GTEST_SKIP(); + } + static constexpr size_t num = 10; + static constexpr size_t size = sizeof(int); + auto *ptr = umfPoolCalloc(pool.get(), num, size); + ASSERT_NE(ptr, nullptr); + for (size_t i = 0; i < num; ++i) { + ASSERT_EQ(((int *)ptr)[i], 0); + } + umfPoolFree(pool.get(), ptr); +} + +void pow2AlignedAllocHelper(umf_memory_pool_handle_t pool) { static constexpr size_t maxAlignment = (1u << 22); static constexpr size_t numAllocs = 4; - for (size_t alignment = 1; alignment <= maxAlignment; alignment <<= 1) { std::cout << alignment << std::endl; std::vector allocs; for (size_t alloc = 0; alloc < numAllocs; alloc++) { - auto *ptr = umfPoolAlignedMalloc(pool.get(), alignment, alignment); + auto *ptr = umfPoolAlignedMalloc(pool, alignment, alignment); ASSERT_NE(ptr, nullptr); ASSERT_TRUE(reinterpret_cast(ptr) % alignment == 0); std::memset(ptr, 0, alignment); @@ -79,11 +105,152 @@ TEST_P(umfPoolTest, pow2AlignedAlloc) { } for (auto &ptr : allocs) { - umfPoolFree(pool.get(), ptr); + umfPoolFree(pool, ptr); } } } +TEST_P(umfPoolTest, pow2AlignedAlloc) { +#ifdef _WIN32 + // TODO: implement support for windows + GTEST_SKIP(); +#endif + pow2AlignedAllocHelper(pool.get()); +} + +TEST_P(umfPoolTest, freeNullptr) { + void *ptr = nullptr; + auto ret = umfPoolFree(pool.get(), ptr); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); +} + +TEST_P(umfPoolTest, multiThreadedMallocFree) { + static constexpr size_t allocSize = 64; + auto poolMalloc = [](size_t allocSize, umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.emplace_back(poolMalloc, allocSize, pool.get()); + ; + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedpow2AlignedAlloc) { +#ifdef _WIN32 + // TODO: implement support for windows + GTEST_SKIP(); +#endif + + auto poolpow2AlignedAlloc = [](umf_memory_pool_handle_t pool) { + pow2AlignedAllocHelper(pool); + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.emplace_back(poolpow2AlignedAlloc, pool.get()); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedReallocFree) { + if (!umf_test::isReallocSupported(pool.get())) { + GTEST_SKIP(); + } + static constexpr size_t allocSize = 64; + static constexpr size_t multiplier = 3; + auto poolRealloc = [](size_t allocSize, size_t multiplier, + umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + auto *ptr = + umfPoolRealloc(pool, allocation, allocSize * multiplier); + umfPoolFree(pool, ptr); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.emplace_back(poolRealloc, allocSize, multiplier, pool.get()); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedCallocFree) { + if (!umf_test::isCallocSupported(pool.get())) { + GTEST_SKIP(); + } + static constexpr size_t num = 10; + auto poolCalloc = [](size_t num, size_t size, + umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolCalloc(pool, num, size)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.emplace_back(poolCalloc, num, sizeof(int), pool.get()); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedMallocFreeRandomSizes) { + auto poolMalloc = [](size_t allocSize, umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.emplace_back(poolMalloc, rand() % 64 + 1, pool.get()); + } + + for (auto &thread : threads) { + thread.join(); + } +} + // TODO: add similar tests for realloc/aligned_alloc, etc. // TODO: add multithreaded tests TEST_P(umfMultiPoolTest, memoryTracking) { diff --git a/test/unified_malloc_framework/memoryPoolAPI.cpp b/test/unified_malloc_framework/memoryPoolAPI.cpp index d7fbe8efe2..cfbe068719 100644 --- a/test/unified_malloc_framework/memoryPoolAPI.cpp +++ b/test/unified_malloc_framework/memoryPoolAPI.cpp @@ -82,7 +82,7 @@ TEST_F(test, memoryPoolTrace) { ASSERT_EQ(providerCalls.size(), provider_call_count); ret = umfPoolGetLastAllocationError(tracingPool.get()); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); ASSERT_EQ(poolCalls["get_last_native_error"], 1); ASSERT_EQ(poolCalls.size(), ++pool_call_count);