From 08b9151b4f0e7cd2342c45191af5999423ad3241 Mon Sep 17 00:00:00 2001 From: Rajiv Mohan Date: Mon, 15 Jan 2024 10:00:17 -0800 Subject: [PATCH] lib: xlnx: Avoid mapping in MPU for region mapped by bsp Avoiding memory mapping in MPU config table for region that are already mapped by bsp and no change in attribute property. Signed-off-by: Rajiv Mohan Signed-off-by: Tanmay Shah --- lib/system/generic/xlnx/sys.c | 55 ++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/lib/system/generic/xlnx/sys.c b/lib/system/generic/xlnx/sys.c index e4669897..1b6271f3 100644 --- a/lib/system/generic/xlnx/sys.c +++ b/lib/system/generic/xlnx/sys.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, Xilinx Inc. and Contributors. All rights reserved. - * Copyright (c) 2022-2023 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (c) 2022-2024 Advanced Micro Devices, Inc. All Rights Reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -72,12 +72,65 @@ void metal_weak metal_generic_default_poll(void) metal_asm volatile("wfi"); } +/* + * VERSAL_NET is used since XMpu_Config structure is + * different for r52(versal net) and r5(zynqmp) to avoid build failure + */ +#ifdef VERSAL_NET +void *metal_machine_io_mem_map_versal_net(void *va, metal_phys_addr_t pa, + size_t size, unsigned int flags) +{ + void *__attribute__((unused)) physaddr; + u32 req_end_addr = pa + size; + XMpu_Config mpu_config; + u32 req_addr = pa; + u32 mmap_req = 1; + u32 base_end_addr; + u32 cnt; + + /* Get the MPU Config enties */ + Xil_GetMPUConfig(mpu_config); + + for (cnt = 0; cnt < MAX_POSSIBLE_MPU_REGS; cnt++) { + + if (!mpu_config[cnt].flags & XMPU_VALID_REGION) + continue; + + base_end_addr = mpu_config[cnt].Size + mpu_config[cnt].BaseAddress; + + if (mpu_config[cnt].BaseAddress <= req_addr && base_end_addr >= req_end_addr) { + /* + * Mapping available for requested region in MPU table + * If no change in Attribute for region then additional + * mapping in MPU table is not required + */ + if (mpu_config[cnt].Attribute == flags) { + mmap_req = 0; + break; + } + } + } + + /* if mapping is required we call Xil_MemMap to get the mapping done */ + if (mmap_req == 1) { + physaddr = Xil_MemMap(pa, size, flags); + metal_assert(physaddr == (void *)pa); + } + + return va; +} +#endif + void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa, size_t size, unsigned int flags) { void *__attribute__((unused)) physaddr; +#ifdef VERSAL_NET + va = metal_machine_io_mem_map_versal_net(va, pa, size, flags); +#else physaddr = Xil_MemMap(pa, size, flags); metal_assert(physaddr == (void *)pa); +#endif return va; }