Skip to content

Commit

Permalink
zephyr: lib: alloc: Use cached memory for L3 Heap
Browse files Browse the repository at this point in the history
This is recommended HW flow for Intel ADSP code.
L3 Heap should be accessed thought cached pointers
including its management data.

Signed-off-by: Jaroslaw Stelter <Jaroslaw.Stelter@intel.com>
  • Loading branch information
jxstelter committed Dec 15, 2023
1 parent 267d069 commit 00c5b88
Showing 1 changed file with 43 additions and 6 deletions.
49 changes: 43 additions & 6 deletions zephyr/lib/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ static inline uintptr_t get_l3_heap_start(void)
* - main_fw_load_offset
* - main fw size in manifest
*/
return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
return (uintptr_t)((__sparse_force void __sparse_cache *)
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
}

/**
Expand Down Expand Up @@ -144,14 +144,31 @@ static bool is_l3_heap_pointer(void *ptr)
uintptr_t l3_heap_start = get_l3_heap_start();
uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size();

if (is_cached(ptr))
ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr);

if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end))
return true;

return false;
}
/**
* Flush cached L3 heap management data.
*/
static inline void l3_heap_flush(struct k_heap *h)
{
if (h == &l3_heap)
sys_cache_data_flush_and_invd_range((void *)get_l3_heap_start(),
L3_MEM_PAGE_SIZE);
}

/**
* Invalidate cached L3 heap management data.
*/
static inline void l3_heap_invalidate(struct k_heap *h)
{
if (h == &l3_heap)
sys_cache_data_invd_range((void *)get_l3_heap_start(),
L3_MEM_PAGE_SIZE);
}

#endif

static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
Expand All @@ -163,7 +180,13 @@ static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes
#endif

key = k_spin_lock(&h->lock);
#if CONFIG_L3_HEAP
l3_heap_invalidate(h);
#endif
ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes);
#if CONFIG_L3_HEAP
l3_heap_flush(h);
#endif
k_spin_unlock(&h->lock, key);

#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
Expand Down Expand Up @@ -212,15 +235,24 @@ static void heap_free(struct k_heap *h, void *mem)

if (is_cached(mem)) {
mem_uncached = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)mem);
#if CONFIG_L3_HEAP
if (h == &l3_heap)
mem_uncached = (__sparse_force void __sparse_cache *)mem;
#endif
sys_cache_data_flush_and_invd_range(mem,
sys_heap_usable_size(&h->heap, mem_uncached));

mem = mem_uncached;
}
#endif

#if CONFIG_L3_HEAP
l3_heap_invalidate(h);
#endif
sys_heap_free(&h->heap, mem);

#if CONFIG_L3_HEAP
l3_heap_flush(h);
#endif
k_spin_unlock(&h->lock, key);
}

Expand Down Expand Up @@ -250,6 +282,9 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
if (caps & SOF_MEM_CAPS_L3) {
#if CONFIG_L3_HEAP
heap = &l3_heap;
/* Uncached L3_HEAP should be not used */
if (!zone_is_cached(zone))
k_panic();
#else
k_panic();
#endif
Expand Down Expand Up @@ -341,6 +376,7 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
/*
* Free's memory allocated by above alloc calls.
*/
__attribute__((optimize("-O0")))
void rfree(void *ptr)
{
if (!ptr)
Expand All @@ -362,6 +398,7 @@ static int heap_init(void)

#if CONFIG_L3_HEAP
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
l3_heap_flush(&l3_heap);
#endif

return 0;
Expand Down

0 comments on commit 00c5b88

Please sign in to comment.