diff --git a/source/common/unified_malloc_framework/include/umf/memory_pool.h b/source/common/unified_malloc_framework/include/umf/memory_pool.h index 294ce6a46c..3276bee0ec 100644 --- a/source/common/unified_malloc_framework/include/umf/memory_pool.h +++ b/source/common/unified_malloc_framework/include/umf/memory_pool.h @@ -123,7 +123,8 @@ void umfFree(void *ptr); enum umf_result_t umfPoolGetLastAllocationError(umf_memory_pool_handle_t hPool); /// -/// \brief Retrieve memory pool associated with a given ptr. +/// \brief Retrieve memory pool associated with a given ptr. Only memory allocated +/// with the usage of a memory provider is being tracked. /// \param ptr pointer to memory belonging to a memory pool /// \return Handle to a memory pool that contains ptr or NULL if pointer does not belong to any UMF pool. umf_memory_pool_handle_t umfPoolByPtr(const void *ptr); diff --git a/test/unified_malloc_framework/common/pool.hpp b/test/unified_malloc_framework/common/pool.hpp index 5be080ef33..729e7f2eb4 100644 --- a/test/unified_malloc_framework/common/pool.hpp +++ b/test/unified_malloc_framework/common/pool.hpp @@ -94,6 +94,8 @@ struct proxy_pool : public pool_base { } void *realloc(void *ptr, size_t size) noexcept { // TODO: not supported + umf::getPoolLastStatusRef() = + UMF_RESULT_ERROR_NOT_SUPPORTED; return nullptr; } void *aligned_malloc(size_t size, size_t alignment) noexcept { @@ -112,6 +114,9 @@ struct proxy_pool : public pool_base { auto ret = umfMemoryProviderFree(provider, ptr, 0); EXPECT_EQ_NOEXCEPT(ret, UMF_RESULT_SUCCESS); } + enum umf_result_t get_last_allocation_error() { + return umf::getPoolLastStatusRef(); + } umf_memory_provider_handle_t provider; }; diff --git a/test/unified_malloc_framework/memoryPool.hpp b/test/unified_malloc_framework/memoryPool.hpp index ab9cc3661c..224d027228 100644 --- a/test/unified_malloc_framework/memoryPool.hpp +++ b/test/unified_malloc_framework/memoryPool.hpp @@ -5,9 +5,12 @@ #include "pool.hpp" +#include #include #include #include +#include +#include #ifndef UMF_TEST_MEMORY_POOL_OPS_HPP #define UMF_TEST_MEMORY_POOL_OPS_HPP @@ -30,6 +33,7 @@ struct umfPoolTest : umf_test::test, } umf::pool_unique_handle_t pool; + static constexpr int NTHREADS = 5; }; struct umfMultiPoolTest : umfPoolTest { @@ -57,6 +61,33 @@ TEST_P(umfPoolTest, allocFree) { umfPoolFree(pool.get(), ptr); } +TEST_P(umfPoolTest, reallocFree) { + static constexpr size_t allocSize = 64; + static constexpr size_t multiplier = 3; + auto *ptr = umfPoolMalloc(pool.get(), allocSize); + ASSERT_NE(ptr, nullptr); + auto *new_ptr = umfPoolRealloc(pool.get(), ptr, allocSize * multiplier); + umf_result_t ret = umfPoolGetLastAllocationError(pool.get()); + if (ret == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP(); + } + ASSERT_NE(new_ptr, nullptr); + std::memset(new_ptr, 0, allocSize * multiplier); + umfPoolFree(pool.get(), new_ptr); +} + +TEST_P(umfPoolTest, callocFree) { + static constexpr size_t num = 10; + static constexpr size_t size = sizeof(int); + auto *ptr = umfPoolCalloc(pool.get(), num, size); + umf_result_t ret = umfPoolGetLastAllocationError(pool.get()); + if (ret == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP(); + } + ASSERT_NE(ptr, nullptr); + umfPoolFree(pool.get(), ptr); +} + TEST_P(umfPoolTest, pow2AlignedAlloc) { #ifdef _WIN32 // TODO: implement support for windows @@ -84,6 +115,208 @@ TEST_P(umfPoolTest, pow2AlignedAlloc) { } } +TEST_P(umfPoolTest, freeNullptr) { + void *ptr = nullptr; + umfPoolFree(pool.get(), ptr); +} + +TEST_P(umfPoolTest, allocOutOfMem) { + static constexpr size_t numProviders = 1; + size_t numProvidersRet = 0; + std::array retProviders; + + auto ret = umfPoolGetMemoryProviders(pool.get(), numProviders, + retProviders.data(), &numProvidersRet); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(numProvidersRet, numProviders); + + for (auto provider : retProviders) { + if (std::string(umfMemoryProviderGetName(provider)) == + std::string("null")) { + GTEST_SKIP(); + } + } + + // test whether memory is kept in a pool accordingly to MaxSize + static constexpr size_t allocSize = 16; + // MaxSize equals 16 * 1024 * 1024; + static constexpr size_t maxAllocs = 1024 * 1024; + + // allocate until oom + void *ptr = nullptr; + std::vector allocations; + + for (size_t i = 0; i <= maxAllocs; ++i) { + allocations.emplace_back(umfPoolMalloc(pool.get(), allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + ASSERT_EQ(umfPoolByPtr(allocations.back()), pool.get()); + + // free some memory + umfPoolFree(pool.get(), allocations.back()); + + allocations.pop_back(); + + ptr = umfPoolMalloc(pool.get(), allocSize); + ASSERT_NE(ptr, nullptr); + + umfPoolFree(pool.get(), ptr); + + for (auto allocation : allocations) { + umfPoolFree(pool.get(), allocation); + } +} + +TEST_P(umfPoolTest, multiThreadedMallocFree) { + static constexpr size_t allocSize = 64; + auto poolMalloc = [](size_t allocSize, umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.push_back(std::thread(poolMalloc, allocSize, pool.get())); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedpow2AlignedAlloc) { +#ifdef _WIN32 + // TODO: implement support for windows + GTEST_SKIP(); +#endif + + static constexpr size_t maxAlignment = (1u << 22); + static constexpr size_t numAllocs = 4; + auto poolPow2AlignedAlloc = [](size_t maxAlignment, size_t numAllocs, + umf_memory_pool_handle_t pool) { + for (size_t alignment = 1; alignment <= maxAlignment; alignment <<= 1) { + std::cout << alignment << std::endl; + std::vector allocs; + + for (size_t alloc = 0; alloc < numAllocs; alloc++) { + auto *ptr = umfPoolAlignedMalloc(pool, alignment, alignment); + ASSERT_NE(ptr, nullptr); + ASSERT_TRUE(reinterpret_cast(ptr) % alignment == 0); + std::memset(ptr, 0, alignment); + allocs.push_back(ptr); + } + + for (auto &ptr : allocs) { + umfPoolFree(pool, ptr); + } + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.push_back(std::thread(poolPow2AlignedAlloc, maxAlignment, + numAllocs, pool.get())); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedReallocFree) { + static constexpr size_t allocSize = 64; + static constexpr size_t multiplier = 3; + auto poolRealloc = [](size_t allocSize, size_t multiplier, + umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + auto *ptr = + umfPoolRealloc(pool, allocation, allocSize * multiplier); + umf_result_t ret = umfPoolGetLastAllocationError(pool); + if (ret == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP(); + } + umfPoolFree(pool, ptr); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.push_back( + std::thread(poolRealloc, allocSize, multiplier, pool.get())); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedCallocFree) { + static constexpr size_t num = 10; + auto poolCalloc = [](size_t num, size_t size, + umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolCalloc(pool, num, size)); + umf_result_t ret = umfPoolGetLastAllocationError(pool); + if (ret == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP(); + } + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.push_back( + std::thread(poolCalloc, num, sizeof(int), pool.get())); + } + + for (auto &thread : threads) { + thread.join(); + } +} + +TEST_P(umfPoolTest, multiThreadedMallocFreeRandomSizes) { + auto poolMalloc = [](size_t allocSize, umf_memory_pool_handle_t pool) { + std::vector allocations; + for (size_t i = 0; i <= 10; ++i) { + allocations.emplace_back(umfPoolMalloc(pool, allocSize)); + ASSERT_NE(allocations.back(), nullptr); + } + + for (auto allocation : allocations) { + umfPoolFree(pool, allocation); + } + }; + + std::vector threads; + for (int i = 0; i < NTHREADS; i++) { + threads.push_back(std::thread(poolMalloc, rand() % 64 + 1, pool.get())); + } + + for (auto &thread : threads) { + thread.join(); + } +} + // TODO: add similar tests for realloc/aligned_alloc, etc. // TODO: add multithreaded tests TEST_P(umfMultiPoolTest, memoryTracking) { diff --git a/test/unified_malloc_framework/memoryPoolAPI.cpp b/test/unified_malloc_framework/memoryPoolAPI.cpp index d7fbe8efe2..cfbe068719 100644 --- a/test/unified_malloc_framework/memoryPoolAPI.cpp +++ b/test/unified_malloc_framework/memoryPoolAPI.cpp @@ -82,7 +82,7 @@ TEST_F(test, memoryPoolTrace) { ASSERT_EQ(providerCalls.size(), provider_call_count); ret = umfPoolGetLastAllocationError(tracingPool.get()); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); ASSERT_EQ(poolCalls["get_last_native_error"], 1); ASSERT_EQ(poolCalls.size(), ++pool_call_count);