diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 4c4baa325e6e..30e13faaf0d6 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -116,8 +116,8 @@ static inline uintptr_t get_l3_heap_start(void) * - main_fw_load_offset * - main fw size in manifest */ - return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *) - ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); + return (uintptr_t)((void __sparse_cache *) + ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); } /** @@ -145,14 +145,71 @@ static bool is_l3_heap_pointer(void *ptr) uintptr_t l3_heap_start = get_l3_heap_start(); uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size(); - if (is_cached(ptr)) - ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr); - if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end)) return true; return false; } + +static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) +{ + k_spinlock_key_t key; + void *ret; +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + struct sys_memory_stats stats; +#endif + if (!cpu_is_primary(arch_proc_id())) { + tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + k_panic(); + } + + key = k_spin_lock(&h->lock); + ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); + k_spin_unlock(&h->lock, key); + +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + sys_heap_runtime_stats_get(&h->heap, &stats); + tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", + stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); +#endif + + return ret; +} + +static void __sparse_cache *l3_heap_alloc_aligned_cached(struct k_heap *h, + size_t min_align, size_t bytes) +{ + void __sparse_cache *ptr; + + /* + * Zephyr sys_heap stores metadata at start of each + * heap allocation. To ensure no allocated cached buffer + * overlaps the same cacheline with the metadata chunk, + * align both allocation start and size of allocation + * to cacheline. Only cached allocations are supported in + * l3_heap. + */ + min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align); + bytes = ALIGN_UP(bytes, min_align); + + ptr = (void __sparse_cache *)l3_heap_alloc_aligned(h, min_align, bytes); + + return ptr; +} + +static void l3_heap_free(struct k_heap *h, void *mem) +{ + if (!cpu_is_primary(arch_proc_id())) { + tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + k_panic(); + } + + k_spinlock_key_t key = k_spin_lock(&h->lock); + + sys_heap_free(&h->heap, mem); + k_spin_unlock(&h->lock, key); +} + #endif static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) @@ -251,6 +308,15 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) if (caps & SOF_MEM_CAPS_L3) { #if CONFIG_L3_HEAP heap = &l3_heap; + /* Uncached L3_HEAP should be not used */ + if (!zone_is_cached(zone)) + k_panic(); + ptr = (__sparse_force void *)l3_heap_alloc_aligned_cached(heap, 0, bytes); + + if (!ptr && zone == SOF_MEM_ZONE_SYS) + k_panic(); + + return ptr; #else k_panic(); #endif @@ -352,7 +418,7 @@ void rfree(void *ptr) #if CONFIG_L3_HEAP if (is_l3_heap_pointer(ptr)) { - heap_free(&l3_heap, ptr); + l3_heap_free(&l3_heap, ptr); return; } #endif