diff --git a/source/adapters/level_zero/context.cpp b/source/adapters/level_zero/context.cpp index 2bd893b043..9f91af5ba4 100644 --- a/source/adapters/level_zero/context.cpp +++ b/source/adapters/level_zero/context.cpp @@ -36,7 +36,12 @@ UR_APIEXPORT ur_result_t UR_APICALL urContextCreate( ur_context_handle_t_ *Context = new ur_context_handle_t_(ZeContext, DeviceCount, Devices, true); - Context->initialize(); + auto Ret = Context->initialize(); + if (Ret) { + delete Context; + return Ret; + } + *RetContext = reinterpret_cast(Context); if (IndirectAccessTrackingEnabled) { std::scoped_lock Lock(Platform->ContextsMutex); @@ -178,111 +183,120 @@ UR_APIEXPORT ur_result_t UR_APICALL urContextSetExtendedDeleter( return UR_RESULT_ERROR_UNSUPPORTED_FEATURE; } +// Template helper function for creating USM pools for given pool descriptor. +template +std::pair +createUMFPoolForDesc(usm::pool_descriptor &Desc, Args &&...args) { + umf_result_t UmfRet = UMF_RESULT_SUCCESS; + umf::provider_unique_handle_t MemProvider = nullptr; + + switch (Desc.type) { + case UR_USM_TYPE_HOST: { + std::tie(UmfRet, MemProvider) = + umf::memoryProviderMakeUnique(Desc.hContext, + Desc.hDevice); + break; + } + case UR_USM_TYPE_DEVICE: { + std::tie(UmfRet, MemProvider) = + umf::memoryProviderMakeUnique(Desc.hContext, + Desc.hDevice); + break; + } + case UR_USM_TYPE_SHARED: { + if (Desc.deviceReadOnly) { + std::tie(UmfRet, MemProvider) = + umf::memoryProviderMakeUnique( + Desc.hContext, Desc.hDevice); + } else { + std::tie(UmfRet, MemProvider) = + umf::memoryProviderMakeUnique(Desc.hContext, + Desc.hDevice); + } + break; + } + default: + UmfRet = UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (UmfRet) + return std::pair{ + umf::umf2urResult(UmfRet), nullptr}; + + umf::pool_unique_handle_t Pool = nullptr; + std::tie(UmfRet, Pool) = + umf::poolMakeUnique({std::move(MemProvider)}, args...); + + return std::pair{ + umf::umf2urResult(UmfRet), std::move(Pool)}; +}; + ur_result_t ur_context_handle_t_::initialize() { - // Helper lambda to create various USM allocators for a device. - // Note that the CCS devices and their respective subdevices share a - // common ze_device_handle and therefore, also share USM allocators. - auto createUSMAllocators = [this](ur_device_handle_t Device) { - auto MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - DeviceMemPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple(umf::poolMakeUnique( - {std::move(MemProvider)}, - DisjointPoolConfigInstance - .Configs[usm::DisjointPoolMemType::Device]) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - SharedMemPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple(umf::poolMakeUnique( - {std::move(MemProvider)}, - DisjointPoolConfigInstance - .Configs[usm::DisjointPoolMemType::Shared]) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - SharedReadOnlyMemPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple( - umf::poolMakeUnique( - {std::move(MemProvider)}, - DisjointPoolConfigInstance - .Configs[usm::DisjointPoolMemType::SharedReadOnly]) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - DeviceMemProxyPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple( - umf::poolMakeUnique({std::move(MemProvider)}) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - SharedMemProxyPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple( - umf::poolMakeUnique({std::move(MemProvider)}) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), Device) - .second; - SharedReadOnlyMemProxyPools.emplace( - std::piecewise_construct, std::make_tuple(Device->ZeDevice), - std::make_tuple( - umf::poolMakeUnique({std::move(MemProvider)}) - .second)); - }; + auto Context = reinterpret_cast(this); + ur_result_t Ret; - // Recursive helper to call createUSMAllocators for all sub-devices - std::function createUSMAllocatorsRecursive; - createUSMAllocatorsRecursive = - [createUSMAllocators, - &createUSMAllocatorsRecursive](ur_device_handle_t Device) -> void { - createUSMAllocators(Device); - for (auto &SubDevice : Device->SubDevices) - createUSMAllocatorsRecursive(SubDevice); - }; + // Initialize pool managers. + std::tie(Ret, PoolManager) = + usm::pool_manager::create(); + if (Ret) { + urPrint("urContextCreate: unexpected internal error\n"); + return Ret; + } - // Create USM pool for each pair (device, context). - // - for (auto &Device : Devices) { - createUSMAllocatorsRecursive(Device); + std::tie(Ret, ProxyPoolManager) = + usm::pool_manager::create(); + if (Ret) { + urPrint("urContextCreate: unexpected internal error\n"); + return Ret; + } + + std::vector Descs; + // Create pool descriptor for every device and subdevice. + std::tie(Ret, Descs) = usm::pool_descriptor::create(nullptr, Context); + if (Ret) { + urPrint("urContextCreate: unexpected internal error\n"); + return Ret; } - // Create USM pool for host. Device and Shared USM allocations - // are device-specific. Host allocations are not device-dependent therefore - // we don't need a map with device as key. - auto MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), nullptr) - .second; - HostMemPool = - umf::poolMakeUnique( - {std::move(MemProvider)}, - DisjointPoolConfigInstance.Configs[usm::DisjointPoolMemType::Host]) - .second; - - MemProvider = umf::memoryProviderMakeUnique( - reinterpret_cast(this), nullptr) - .second; - HostMemProxyPool = - umf::poolMakeUnique({std::move(MemProvider)}).second; - - // We may allocate memory to this root device so create allocators. - if (SingleRootDevice && - DeviceMemPools.find(SingleRootDevice->ZeDevice) == DeviceMemPools.end()) { - createUSMAllocators(SingleRootDevice); + + auto descTypeToDisjointPoolType = + [](usm::pool_descriptor &Desc) -> usm::DisjointPoolMemType { + switch (Desc.type) { + case UR_USM_TYPE_HOST: + return usm::DisjointPoolMemType::Host; + case UR_USM_TYPE_DEVICE: + return usm::DisjointPoolMemType::Device; + case UR_USM_TYPE_SHARED: + return (Desc.deviceReadOnly) ? usm::DisjointPoolMemType::SharedReadOnly + : usm::DisjointPoolMemType::Shared; + default: + // Should not be reached. + ur::unreachable(); + } + }; + + // Create USM pool for each pool descriptor and add it to pool manager. + for (auto &Desc : Descs) { + umf::pool_unique_handle_t Pool = nullptr; + auto PoolType = descTypeToDisjointPoolType(Desc); + + std::tie(Ret, Pool) = createUMFPoolForDesc( + Desc, DisjointPoolConfigInstance.Configs[PoolType]); + if (Ret) { + urPrint("urContextCreate: unexpected internal error\n"); + return Ret; + } + + PoolManager.addPool(Desc, Pool); + + umf::pool_unique_handle_t ProxyPool = nullptr; + std::tie(Ret, ProxyPool) = createUMFPoolForDesc(Desc); + if (Ret) { + urPrint("urContextCreate: unexpected internal error\n"); + return Ret; + } + + ProxyPoolManager.addPool(Desc, ProxyPool); } // Create the immediate command list to be used for initializations. diff --git a/source/adapters/level_zero/context.hpp b/source/adapters/level_zero/context.hpp index 96935d470e..c6f292e5b4 100644 --- a/source/adapters/level_zero/context.hpp +++ b/source/adapters/level_zero/context.hpp @@ -26,6 +26,7 @@ #include "queue.hpp" #include +#include struct ur_context_handle_t_ : _ur_object { ur_context_handle_t_(ze_context_handle_t ZeContext, uint32_t NumDevices, @@ -96,15 +97,8 @@ struct ur_context_handle_t_ : _ur_object { // Store USM pool for USM shared and device allocations. There is 1 memory // pool per each pair of (context, device) per each memory type. - std::unordered_map - DeviceMemPools; - std::unordered_map - SharedMemPools; - std::unordered_map - SharedReadOnlyMemPools; - - // Store the host memory pool. It does not depend on any device. - umf::pool_unique_handle_t HostMemPool; + usm::pool_manager PoolManager; + usm::pool_manager ProxyPoolManager; // Allocation-tracking proxy pools for direct allocations. No pooling used. std::unordered_map @@ -252,3 +246,8 @@ struct ur_context_handle_t_ : _ur_object { // mutex guarding the container with contexts because the context can be removed // from the list of tracked contexts. ur_result_t ContextReleaseHelper(ur_context_handle_t Context); + +// Template helper function for creating USM pools for given pool descriptor. +template +std::pair +createUMFPoolForDesc(usm::pool_descriptor &Desc, Args &&...args); diff --git a/source/adapters/level_zero/device.cpp b/source/adapters/level_zero/device.cpp index 0b8e12c67a..d986adc5e8 100644 --- a/source/adapters/level_zero/device.cpp +++ b/source/adapters/level_zero/device.cpp @@ -683,7 +683,7 @@ UR_APIEXPORT ur_result_t UR_APICALL urDeviceGetInfo( } } } - return ReturnValue(std::min(GlobalMemSize, FreeMemory)); + return ReturnValue((std::min)(GlobalMemSize, FreeMemory)); } case UR_DEVICE_INFO_MEMORY_CLOCK_RATE: { // If there are not any memory modules then return 0. diff --git a/source/adapters/level_zero/usm.cpp b/source/adapters/level_zero/usm.cpp index 11245b5760..8b407e4d99 100644 --- a/source/adapters/level_zero/usm.cpp +++ b/source/adapters/level_zero/usm.cpp @@ -12,6 +12,7 @@ #include #include +#include "common.hpp" #include "context.hpp" #include "event.hpp" #include "usm.hpp" @@ -20,34 +21,6 @@ #include -ur_result_t umf2urResult(umf_result_t umfResult) { - if (umfResult == UMF_RESULT_SUCCESS) - return UR_RESULT_SUCCESS; - - switch (umfResult) { - case UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY: - return UR_RESULT_ERROR_OUT_OF_HOST_MEMORY; - case UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC: { - auto hProvider = umfGetLastFailedMemoryProvider(); - if (hProvider == nullptr) - return UR_RESULT_ERROR_UNKNOWN; - - ur_result_t Err = UR_RESULT_ERROR_UNKNOWN; - umfMemoryProviderGetLastNativeError(hProvider, nullptr, - reinterpret_cast(&Err)); - return Err; - } - case UMF_RESULT_ERROR_INVALID_ARGUMENT: - return UR_RESULT_ERROR_INVALID_ARGUMENT; - case UMF_RESULT_ERROR_INVALID_ALIGNMENT: - return UR_RESULT_ERROR_UNSUPPORTED_ALIGNMENT; - case UMF_RESULT_ERROR_NOT_SUPPORTED: - return UR_RESULT_ERROR_UNSUPPORTED_FEATURE; - default: - return UR_RESULT_ERROR_UNKNOWN; - }; -} - usm::DisjointPoolAllConfigs InitializeDisjointPoolConfig() { const char *PoolUrTraceVal = std::getenv("UR_L0_USM_ALLOCATOR_TRACE"); const char *PoolPiTraceVal = @@ -335,23 +308,35 @@ UR_APIEXPORT ur_result_t UR_APICALL urUSMHostAlloc( // There is a single allocator for Host USM allocations, so we don't need to // find the allocator depending on context as we do for Shared and Device // allocations. - umf_memory_pool_handle_t hPoolInternal = nullptr; + std::optional hPoolInternalOpt = std::nullopt; + usm::pool_descriptor Desc = {nullptr, Context, nullptr, UR_USM_TYPE_HOST, + false}; if (!UseUSMAllocator || // L0 spec says that allocation fails if Alignment != 2^n, in order to // keep the same behavior for the allocator, just call L0 API directly and // return the error code. ((Align & (Align - 1)) != 0)) { - hPoolInternal = Context->HostMemProxyPool.get(); + hPoolInternalOpt = Context->ProxyPoolManager.getPool(Desc); } else if (Pool) { - hPoolInternal = Pool->HostMemPool.get(); + // Getting user-created pool requires 'poolHandle' field. + Desc.poolHandle = Pool; + hPoolInternalOpt = Pool->PoolManager.getPool(Desc); } else { - hPoolInternal = Context->HostMemPool.get(); + hPoolInternalOpt = Context->PoolManager.getPool(Desc); + } + + if (!hPoolInternalOpt.has_value()) { + // Internal error, every L0 context and usm pool should have Host, Device, + // Shared and SharedReadOnly UMF pools. + urPrint("urUSMHostAlloc: unexpected internal error\n"); + return UR_RESULT_ERROR_UNKNOWN; } + auto hPoolInternal = hPoolInternalOpt.value(); *RetMem = umfPoolAlignedMalloc(hPoolInternal, Size, Align); if (*RetMem == nullptr) { auto umfRet = umfPoolGetLastAllocationError(hPoolInternal); - return umf2urResult(umfRet); + return umf::umf2urResult(umfRet); } if (IndirectAccessTrackingEnabled) { @@ -406,31 +391,35 @@ UR_APIEXPORT ur_result_t UR_APICALL urUSMDeviceAlloc( ContextLock.lock(); } - umf_memory_pool_handle_t hPoolInternal = nullptr; + std::optional hPoolInternalOpt = std::nullopt; + usm::pool_descriptor Desc = {nullptr, Context, Device, UR_USM_TYPE_DEVICE, + false}; if (!UseUSMAllocator || // L0 spec says that allocation fails if Alignment != 2^n, in order to // keep the same behavior for the allocator, just call L0 API directly and // return the error code. ((Alignment & (Alignment - 1)) != 0)) { - auto It = Context->DeviceMemProxyPools.find(Device->ZeDevice); - if (It == Context->DeviceMemProxyPools.end()) - return UR_RESULT_ERROR_INVALID_VALUE; - - hPoolInternal = It->second.get(); + hPoolInternalOpt = Context->ProxyPoolManager.getPool(Desc); } else if (Pool) { - hPoolInternal = Pool->DeviceMemPools[Device].get(); + // Getting user-created pool requires 'poolHandle' field. + Desc.poolHandle = Pool; + hPoolInternalOpt = Pool->PoolManager.getPool(Desc); } else { - auto It = Context->DeviceMemPools.find(Device->ZeDevice); - if (It == Context->DeviceMemPools.end()) - return UR_RESULT_ERROR_INVALID_VALUE; + hPoolInternalOpt = Context->PoolManager.getPool(Desc); + } - hPoolInternal = It->second.get(); + if (!hPoolInternalOpt.has_value()) { + // Internal error, every L0 context and usm pool should have Host, Device, + // Shared and SharedReadOnly UMF pools. + urPrint("urUSMDeviceAlloc: unexpected internal error\n"); + return UR_RESULT_ERROR_UNKNOWN; } + auto hPoolInternal = hPoolInternalOpt.value(); *RetMem = umfPoolAlignedMalloc(hPoolInternal, Size, Alignment); if (*RetMem == nullptr) { auto umfRet = umfPoolGetLastAllocationError(hPoolInternal); - return umf2urResult(umfRet); + return umf::umf2urResult(umfRet); } if (IndirectAccessTrackingEnabled) { @@ -506,37 +495,35 @@ UR_APIEXPORT ur_result_t UR_APICALL urUSMSharedAlloc( UR_CALL(urContextRetain(Context)); } - umf_memory_pool_handle_t hPoolInternal = nullptr; + std::optional hPoolInternalOpt = std::nullopt; + usm::pool_descriptor Desc = {nullptr, Context, Device, UR_USM_TYPE_SHARED, + DeviceReadOnly}; if (!UseUSMAllocator || // L0 spec says that allocation fails if Alignment != 2^n, in order to // keep the same behavior for the allocator, just call L0 API directly and // return the error code. ((Alignment & (Alignment - 1)) != 0)) { - auto &Allocator = (DeviceReadOnly ? Context->SharedReadOnlyMemProxyPools - : Context->SharedMemProxyPools); - auto It = Allocator.find(Device->ZeDevice); - if (It == Allocator.end()) - return UR_RESULT_ERROR_INVALID_VALUE; - - hPoolInternal = It->second.get(); + hPoolInternalOpt = Context->ProxyPoolManager.getPool(Desc); } else if (Pool) { - hPoolInternal = (DeviceReadOnly) - ? Pool->SharedReadOnlyMemPools[Device].get() - : Pool->SharedMemPools[Device].get(); + // Getting user-created pool requires 'poolHandle' field. + Desc.poolHandle = Pool; + hPoolInternalOpt = Pool->PoolManager.getPool(Desc); } else { - auto &Allocator = (DeviceReadOnly ? Context->SharedReadOnlyMemPools - : Context->SharedMemPools); - auto It = Allocator.find(Device->ZeDevice); - if (It == Allocator.end()) - return UR_RESULT_ERROR_INVALID_VALUE; + hPoolInternalOpt = Context->PoolManager.getPool(Desc); + } - hPoolInternal = It->second.get(); + if (!hPoolInternalOpt.has_value()) { + // Internal error, every L0 context and usm pool should have Host, Device, + // Shared and SharedReadOnly UMF pools. + urPrint("urUSMSharedAlloc: unexpected internal error\n"); + return UR_RESULT_ERROR_UNKNOWN; } + auto hPoolInternal = hPoolInternalOpt.value(); *RetMem = umfPoolAlignedMalloc(hPoolInternal, Size, Alignment); if (*RetMem == nullptr) { auto umfRet = umfPoolGetLastAllocationError(hPoolInternal); - return umf2urResult(umfRet); + return umf::umf2urResult(umfRet); } if (IndirectAccessTrackingEnabled) { @@ -628,28 +615,9 @@ UR_APIEXPORT ur_result_t UR_APICALL urUSMGetMemAllocInfo( std::shared_lock ContextLock(Context->Mutex); - auto SearchMatchingPool = - [](std::unordered_map - &PoolMap, - umf_memory_pool_handle_t UMFPool) { - for (auto &PoolPair : PoolMap) { - if (PoolPair.second.get() == UMFPool) { - return true; - } - } - return false; - }; - for (auto &Pool : Context->UsmPoolHandles) { - if (SearchMatchingPool(Pool->DeviceMemPools, UMFPool)) { - return ReturnValue(Pool); - } - if (SearchMatchingPool(Pool->SharedMemPools, UMFPool)) { - return ReturnValue(Pool); - } - if (Pool->HostMemPool.get() == UMFPool) { + if (Pool->PoolManager.hasPool(UMFPool)) return ReturnValue(Pool); - } } return UR_RESULT_ERROR_INVALID_VALUE; @@ -829,50 +797,53 @@ ur_usm_pool_handle_t_::ur_usm_pool_handle_t_(ur_context_handle_t Context, pNext = const_cast(BaseDesc->pNext); } - auto MemProvider = - umf::memoryProviderMakeUnique(Context, nullptr) - .second; - - HostMemPool = - umf::poolMakeUnique( - {std::move(MemProvider)}, - this->DisjointPoolConfigs.Configs[usm::DisjointPoolMemType::Host]) - .second; - - for (auto device : Context->Devices) { - MemProvider = - umf::memoryProviderMakeUnique(Context, device) - .second; - DeviceMemPools.emplace( - std::piecewise_construct, std::make_tuple(device), - std::make_tuple(umf::poolMakeUnique( - {std::move(MemProvider)}, - this->DisjointPoolConfigs - .Configs[usm::DisjointPoolMemType::Device]) - .second)); - - MemProvider = - umf::memoryProviderMakeUnique(Context, device) - .second; - SharedMemPools.emplace( - std::piecewise_construct, std::make_tuple(device), - std::make_tuple(umf::poolMakeUnique( - {std::move(MemProvider)}, - this->DisjointPoolConfigs - .Configs[usm::DisjointPoolMemType::Shared]) - .second)); - - MemProvider = umf::memoryProviderMakeUnique( - Context, device) - .second; - SharedReadOnlyMemPools.emplace( - std::piecewise_construct, std::make_tuple(device), - std::make_tuple( - umf::poolMakeUnique( - {std::move(MemProvider)}, - this->DisjointPoolConfigs - .Configs[usm::DisjointPoolMemType::SharedReadOnly]) - .second)); + ur_result_t Ret; + std::tie(Ret, PoolManager) = + usm::pool_manager::create(); + if (Ret) { + urPrint("urUSMPoolCreate: unexpected internal error\n"); + throw UsmAllocationException(Ret); + } + + std::vector Descs; + // Create pool descriptor for every device and subdevice. + std::tie(Ret, Descs) = usm::pool_descriptor::create( + reinterpret_cast(this), Context); + if (Ret) { + urPrint("urUSMPoolCreate: unexpected internal error\n"); + throw UsmAllocationException(Ret); + } + + auto descTypeToDisjointPoolType = + [](usm::pool_descriptor &Desc) -> usm::DisjointPoolMemType { + switch (Desc.type) { + case UR_USM_TYPE_HOST: + return usm::DisjointPoolMemType::Host; + case UR_USM_TYPE_DEVICE: + return usm::DisjointPoolMemType::Device; + case UR_USM_TYPE_SHARED: + return (Desc.deviceReadOnly) ? usm::DisjointPoolMemType::SharedReadOnly + : usm::DisjointPoolMemType::Shared; + default: + assert(0 && "Invalid pool descriptor type!"); + // Added to suppress 'not all control paths return a value' warning. + return usm::DisjointPoolMemType::All; + } + }; + + // Create USM pool for each pool descriptor and add it to pool manager. + for (auto &Desc : Descs) { + umf::pool_unique_handle_t Pool = nullptr; + auto PoolType = descTypeToDisjointPoolType(Desc); + + std::tie(Ret, Pool) = createUMFPoolForDesc( + Desc, DisjointPoolConfigInstance.Configs[PoolType]); + if (Ret) { + urPrint("urUSMPoolCreate: unexpected internal error\n"); + throw UsmAllocationException(Ret); + } + + PoolManager.addPool(Desc, Pool); } } @@ -1018,7 +989,7 @@ ur_result_t USMFreeHelper(ur_context_handle_t Context, void *Ptr, auto umfRet = umfPoolFree(hPool, Ptr); if (IndirectAccessTrackingEnabled) UR_CALL(ContextReleaseHelper(Context)); - return umf2urResult(umfRet); + return umf::umf2urResult(umfRet); } UR_APIEXPORT ur_result_t UR_APICALL urUSMImportExp(ur_context_handle_t Context, diff --git a/source/adapters/level_zero/usm.hpp b/source/adapters/level_zero/usm.hpp index 958fca9354..78a335a04d 100644 --- a/source/adapters/level_zero/usm.hpp +++ b/source/adapters/level_zero/usm.hpp @@ -12,6 +12,7 @@ #include "common.hpp" #include +#include usm::DisjointPoolAllConfigs InitializeDisjointPoolConfig(); @@ -21,13 +22,7 @@ struct ur_usm_pool_handle_t_ : _ur_object { usm::DisjointPoolAllConfigs DisjointPoolConfigs = InitializeDisjointPoolConfig(); - std::unordered_map - DeviceMemPools; - std::unordered_map - SharedMemPools; - std::unordered_map - SharedReadOnlyMemPools; - umf::pool_unique_handle_t HostMemPool; + usm::pool_manager PoolManager; ur_context_handle_t Context{}; diff --git a/source/common/ur_pool_manager.hpp b/source/common/ur_pool_manager.hpp index 2215bd0575..4accd55631 100644 --- a/source/common/ur_pool_manager.hpp +++ b/source/common/ur_pool_manager.hpp @@ -249,13 +249,19 @@ template struct pool_manager { std::optional getPool(const D &desc) noexcept { auto it = descToPoolMap.find(desc); if (it == descToPoolMap.end()) { - logger::error("Pool descriptor doesn't match any existing pool: {}", - desc); + logger::error( + "Pool descriptor: {}, doesn't match any existing pool", desc); return std::nullopt; } return it->second.get(); } + + bool hasPool(umf_memory_pool_handle_t hPool) noexcept { + return std::any_of( + descToPoolMap.begin(), descToPoolMap.end(), + [&hPool](const auto &pair) { return hPool == pair.second.get(); }); + } }; } // namespace usm