18c260e80SJens Wiklander /* SPDX-License-Identifier: BSD-2-Clause */
28c260e80SJens Wiklander /*
38c260e80SJens Wiklander * Copyright (c) 2016, Linaro Limited
48c260e80SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V.
58c260e80SJens Wiklander */
6fbe66cf8SEtienne Carriere #ifndef __MM_CORE_MMU_H
7fbe66cf8SEtienne Carriere #define __MM_CORE_MMU_H
88c260e80SJens Wiklander
98c260e80SJens Wiklander #ifndef __ASSEMBLER__
108c260e80SJens Wiklander #include <assert.h>
118c260e80SJens Wiklander #include <compiler.h>
128c260e80SJens Wiklander #include <kernel/user_ta.h>
1399e1ad26SJens Wiklander #include <mm/tee_mm.h>
148c260e80SJens Wiklander #include <mm/tee_mmu_types.h>
158c260e80SJens Wiklander #include <types_ext.h>
168c260e80SJens Wiklander #include <util.h>
178c260e80SJens Wiklander #endif
188c260e80SJens Wiklander
198c260e80SJens Wiklander #include <mm/core_mmu_arch.h>
208c260e80SJens Wiklander #include <platform_config.h>
218c260e80SJens Wiklander
228c260e80SJens Wiklander /* A small page is the smallest unit of memory that can be mapped */
238c260e80SJens Wiklander #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT)
248c260e80SJens Wiklander #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1)
258c260e80SJens Wiklander
268c260e80SJens Wiklander /*
278c260e80SJens Wiklander * PGDIR is the translation table above the translation table that holds
288c260e80SJens Wiklander * the pages.
298c260e80SJens Wiklander */
308c260e80SJens Wiklander #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT)
318c260e80SJens Wiklander #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
328c260e80SJens Wiklander
338c260e80SJens Wiklander /* TA user space code, data, stack and heap are mapped using this granularity */
348c260e80SJens Wiklander #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT)
358c260e80SJens Wiklander #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
368c260e80SJens Wiklander
378c260e80SJens Wiklander /* TA user space parameters are mapped using this granularity */
388c260e80SJens Wiklander #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT)
398c260e80SJens Wiklander #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
408c260e80SJens Wiklander
418c260e80SJens Wiklander /*
428c260e80SJens Wiklander * Identify mapping constraint: virtual base address is the physical start addr.
438c260e80SJens Wiklander * If platform did not set some macros, some get default value.
448c260e80SJens Wiklander */
458c260e80SJens Wiklander #ifndef TEE_RAM_VA_SIZE
468c260e80SJens Wiklander #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
478c260e80SJens Wiklander #endif
488c260e80SJens Wiklander
498c260e80SJens Wiklander #ifndef TEE_LOAD_ADDR
508c260e80SJens Wiklander #define TEE_LOAD_ADDR TEE_RAM_START
518c260e80SJens Wiklander #endif
528c260e80SJens Wiklander
538c260e80SJens Wiklander #ifndef STACK_ALIGNMENT
548c260e80SJens Wiklander #define STACK_ALIGNMENT (sizeof(long) * U(2))
558c260e80SJens Wiklander #endif
568c260e80SJens Wiklander
578c260e80SJens Wiklander #ifndef __ASSEMBLER__
588c260e80SJens Wiklander /*
598c260e80SJens Wiklander * Memory area type:
608c260e80SJens Wiklander * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE)
618c260e80SJens Wiklander * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure)
628c260e80SJens Wiklander * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure)
638c260e80SJens Wiklander * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure)
648c260e80SJens Wiklander * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
658c260e80SJens Wiklander * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
668c260e80SJens Wiklander * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
678c260e80SJens Wiklander * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
6896f43358SJens Wiklander * MEM_AREA_NEX_DYN_VASPACE: nexus private dynamic memory map (secure)
6996f43358SJens Wiklander * MEM_AREA_TEE_DYN_VASPACE: core private dynamic memory (secure)
708c260e80SJens Wiklander * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
718c260e80SJens Wiklander * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
728c260e80SJens Wiklander * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
738c260e80SJens Wiklander * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
74ab1ba412SJens Wiklander * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE.
758c260e80SJens Wiklander * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
768c260e80SJens Wiklander * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
77fc7e0cc3SEtienne Carriere * MEM_AREA_ROM_SEC: Secure read only memory storing some secrets
788c260e80SJens Wiklander * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
798c260e80SJens Wiklander * MEM_AREA_IO_SEC: Secure HW mapped registers
808c260e80SJens Wiklander * MEM_AREA_EXT_DT: Memory loads external device tree
81d4bd157eSJens Wiklander * MEM_AREA_MANIFEST_DT: Memory loads manifest device tree
82486e6cfbSRaymond Mao * MEM_AREA_TRANSFER_LIST: Memory area mapped for Transfer List
838c260e80SJens Wiklander * MEM_AREA_RES_VASPACE: Reserved virtual memory space
848c260e80SJens Wiklander * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
858c260e80SJens Wiklander * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
868c260e80SJens Wiklander * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
878c260e80SJens Wiklander * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
888c260e80SJens Wiklander * MEM_AREA_MAXTYPE: lower invalid 'type' value
898c260e80SJens Wiklander */
908c260e80SJens Wiklander enum teecore_memtypes {
91b8ef8d0bSJens Wiklander MEM_AREA_TEE_RAM = 1,
928c260e80SJens Wiklander MEM_AREA_TEE_RAM_RX,
938c260e80SJens Wiklander MEM_AREA_TEE_RAM_RO,
948c260e80SJens Wiklander MEM_AREA_TEE_RAM_RW,
958c260e80SJens Wiklander MEM_AREA_INIT_RAM_RO,
968c260e80SJens Wiklander MEM_AREA_INIT_RAM_RX,
978c260e80SJens Wiklander MEM_AREA_NEX_RAM_RO,
988c260e80SJens Wiklander MEM_AREA_NEX_RAM_RW,
9996f43358SJens Wiklander MEM_AREA_NEX_DYN_VASPACE,
10096f43358SJens Wiklander MEM_AREA_TEE_DYN_VASPACE,
1018c260e80SJens Wiklander MEM_AREA_TEE_COHERENT,
1028c260e80SJens Wiklander MEM_AREA_TEE_ASAN,
1038c260e80SJens Wiklander MEM_AREA_IDENTITY_MAP_RX,
1048c260e80SJens Wiklander MEM_AREA_NSEC_SHM,
105ab1ba412SJens Wiklander MEM_AREA_NEX_NSEC_SHM,
1068c260e80SJens Wiklander MEM_AREA_RAM_NSEC,
1078c260e80SJens Wiklander MEM_AREA_RAM_SEC,
108fc7e0cc3SEtienne Carriere MEM_AREA_ROM_SEC,
1098c260e80SJens Wiklander MEM_AREA_IO_NSEC,
1108c260e80SJens Wiklander MEM_AREA_IO_SEC,
1118c260e80SJens Wiklander MEM_AREA_EXT_DT,
112d4bd157eSJens Wiklander MEM_AREA_MANIFEST_DT,
113486e6cfbSRaymond Mao MEM_AREA_TRANSFER_LIST,
1148c260e80SJens Wiklander MEM_AREA_RES_VASPACE,
1158c260e80SJens Wiklander MEM_AREA_SHM_VASPACE,
1168c260e80SJens Wiklander MEM_AREA_TS_VASPACE,
1178c260e80SJens Wiklander MEM_AREA_PAGER_VASPACE,
1188c260e80SJens Wiklander MEM_AREA_SDP_MEM,
1198c260e80SJens Wiklander MEM_AREA_DDR_OVERALL,
1208c260e80SJens Wiklander MEM_AREA_SEC_RAM_OVERALL,
1218c260e80SJens Wiklander MEM_AREA_MAXTYPE
1228c260e80SJens Wiklander };
1238c260e80SJens Wiklander
teecore_memtype_name(enum teecore_memtypes type)1248c260e80SJens Wiklander static inline const char *teecore_memtype_name(enum teecore_memtypes type)
1258c260e80SJens Wiklander {
1268c260e80SJens Wiklander static const char * const names[] = {
1278c260e80SJens Wiklander [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
1288c260e80SJens Wiklander [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
1298c260e80SJens Wiklander [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
1308c260e80SJens Wiklander [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
1318c260e80SJens Wiklander [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
1328c260e80SJens Wiklander [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
1338c260e80SJens Wiklander [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
1348c260e80SJens Wiklander [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
13596f43358SJens Wiklander [MEM_AREA_NEX_DYN_VASPACE] = "NEX_DYN_VASPACE",
13696f43358SJens Wiklander [MEM_AREA_TEE_DYN_VASPACE] = "TEE_DYN_VASPACE",
1378c260e80SJens Wiklander [MEM_AREA_TEE_ASAN] = "TEE_ASAN",
1388c260e80SJens Wiklander [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
1398c260e80SJens Wiklander [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
1408c260e80SJens Wiklander [MEM_AREA_NSEC_SHM] = "NSEC_SHM",
141ab1ba412SJens Wiklander [MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM",
1428c260e80SJens Wiklander [MEM_AREA_RAM_NSEC] = "RAM_NSEC",
1438c260e80SJens Wiklander [MEM_AREA_RAM_SEC] = "RAM_SEC",
144fc7e0cc3SEtienne Carriere [MEM_AREA_ROM_SEC] = "ROM_SEC",
1458c260e80SJens Wiklander [MEM_AREA_IO_NSEC] = "IO_NSEC",
1468c260e80SJens Wiklander [MEM_AREA_IO_SEC] = "IO_SEC",
1478c260e80SJens Wiklander [MEM_AREA_EXT_DT] = "EXT_DT",
148d4bd157eSJens Wiklander [MEM_AREA_MANIFEST_DT] = "MANIFEST_DT",
149486e6cfbSRaymond Mao [MEM_AREA_TRANSFER_LIST] = "TRANSFER_LIST",
1508c260e80SJens Wiklander [MEM_AREA_RES_VASPACE] = "RES_VASPACE",
1518c260e80SJens Wiklander [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
1528c260e80SJens Wiklander [MEM_AREA_TS_VASPACE] = "TS_VASPACE",
1538c260e80SJens Wiklander [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
1548c260e80SJens Wiklander [MEM_AREA_SDP_MEM] = "SDP_MEM",
1558c260e80SJens Wiklander [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
1568c260e80SJens Wiklander [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
1578c260e80SJens Wiklander };
1588c260e80SJens Wiklander
1598c260e80SJens Wiklander COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
1608c260e80SJens Wiklander return names[type];
1618c260e80SJens Wiklander }
1628c260e80SJens Wiklander
1638c260e80SJens Wiklander #ifdef CFG_CORE_RWDATA_NOEXEC
1648c260e80SJens Wiklander #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW
1658c260e80SJens Wiklander #else
1668c260e80SJens Wiklander #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM
1678c260e80SJens Wiklander #endif
1688c260e80SJens Wiklander
1698c260e80SJens Wiklander struct core_mmu_phys_mem {
1708c260e80SJens Wiklander const char *name;
1718c260e80SJens Wiklander enum teecore_memtypes type;
1728c260e80SJens Wiklander __extension__ union {
1738c260e80SJens Wiklander #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
1748c260e80SJens Wiklander struct {
1758c260e80SJens Wiklander uint32_t lo_addr;
1768c260e80SJens Wiklander uint32_t hi_addr;
1778c260e80SJens Wiklander };
1788c260e80SJens Wiklander #endif
1798c260e80SJens Wiklander paddr_t addr;
1808c260e80SJens Wiklander };
1818c260e80SJens Wiklander __extension__ union {
1828c260e80SJens Wiklander #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
1838c260e80SJens Wiklander struct {
1848c260e80SJens Wiklander uint32_t lo_size;
1858c260e80SJens Wiklander uint32_t hi_size;
1868c260e80SJens Wiklander };
1878c260e80SJens Wiklander #endif
1888c260e80SJens Wiklander paddr_size_t size;
1898c260e80SJens Wiklander };
1908c260e80SJens Wiklander };
1918c260e80SJens Wiklander
1928c260e80SJens Wiklander #define __register_memory(_name, _type, _addr, _size, _section) \
1938c260e80SJens Wiklander SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
1948c260e80SJens Wiklander { .name = (_name), .type = (_type), .addr = (_addr), \
1958c260e80SJens Wiklander .size = (_size) }
1968c260e80SJens Wiklander
1978c260e80SJens Wiklander #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
1988c260e80SJens Wiklander #define __register_memory_ul(_name, _type, _addr, _size, _section) \
1998c260e80SJens Wiklander SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
2008c260e80SJens Wiklander { .name = (_name), .type = (_type), .lo_addr = (_addr), \
2018c260e80SJens Wiklander .lo_size = (_size) }
2028c260e80SJens Wiklander #else
2038c260e80SJens Wiklander #define __register_memory_ul(_name, _type, _addr, _size, _section) \
2048c260e80SJens Wiklander __register_memory(_name, _type, _addr, _size, _section)
2058c260e80SJens Wiklander #endif
2068c260e80SJens Wiklander
2078c260e80SJens Wiklander #define register_phys_mem(type, addr, size) \
2088c260e80SJens Wiklander __register_memory(#addr, (type), (addr), (size), \
2098c260e80SJens Wiklander phys_mem_map)
2108c260e80SJens Wiklander
2118c260e80SJens Wiklander #define register_phys_mem_ul(type, addr, size) \
2128c260e80SJens Wiklander __register_memory_ul(#addr, (type), (addr), (size), \
2138c260e80SJens Wiklander phys_mem_map)
2148c260e80SJens Wiklander
2158c260e80SJens Wiklander /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
2168c260e80SJens Wiklander #define register_phys_mem_pgdir(type, addr, size) \
217a45f7eeaSJens Wiklander __register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
218a45f7eeaSJens Wiklander ROUNDUP(size + addr - \
219a45f7eeaSJens Wiklander ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
220a45f7eeaSJens Wiklander CORE_MMU_PGDIR_SIZE), phys_mem_map)
2218c260e80SJens Wiklander
2228c260e80SJens Wiklander #ifdef CFG_SECURE_DATA_PATH
2238c260e80SJens Wiklander #define register_sdp_mem(addr, size) \
2248c260e80SJens Wiklander __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
2258c260e80SJens Wiklander phys_sdp_mem)
2268c260e80SJens Wiklander #else
2278c260e80SJens Wiklander #define register_sdp_mem(addr, size) \
2288c260e80SJens Wiklander static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
2298c260e80SJens Wiklander __unused
2308c260e80SJens Wiklander #endif
2318c260e80SJens Wiklander
2328c260e80SJens Wiklander /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
2338c260e80SJens Wiklander #define register_dynamic_shm(addr, size) \
2348c260e80SJens Wiklander __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
2358c260e80SJens Wiklander phys_ddr_overall_compat)
2368c260e80SJens Wiklander
2378c260e80SJens Wiklander /*
2388c260e80SJens Wiklander * register_ddr() - Define a memory range
2398c260e80SJens Wiklander * @addr: Base address
2408c260e80SJens Wiklander * @size: Length
2418c260e80SJens Wiklander *
2428c260e80SJens Wiklander * This macro can be used multiple times to define disjoint ranges. While
2438c260e80SJens Wiklander * initializing holes are carved out of these ranges where it overlaps with
2448c260e80SJens Wiklander * special memory, for instance memory registered with register_sdp_mem().
2458c260e80SJens Wiklander *
2468c260e80SJens Wiklander * The memory that remains is accepted as non-secure shared memory when
2478c260e80SJens Wiklander * communicating with normal world.
2488c260e80SJens Wiklander *
2498c260e80SJens Wiklander * This macro is an alternative to supply the memory description with a
2508c260e80SJens Wiklander * devicetree blob.
2518c260e80SJens Wiklander */
2528c260e80SJens Wiklander #define register_ddr(addr, size) \
2538c260e80SJens Wiklander __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
2548c260e80SJens Wiklander (size), phys_ddr_overall)
2558c260e80SJens Wiklander
2568c260e80SJens Wiklander #define phys_ddr_overall_begin \
2578c260e80SJens Wiklander SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
2588c260e80SJens Wiklander
2598c260e80SJens Wiklander #define phys_ddr_overall_end \
2608c260e80SJens Wiklander SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
2618c260e80SJens Wiklander
2628c260e80SJens Wiklander #define phys_ddr_overall_compat_begin \
2638c260e80SJens Wiklander SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
2648c260e80SJens Wiklander
2658c260e80SJens Wiklander #define phys_ddr_overall_compat_end \
2668c260e80SJens Wiklander SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
2678c260e80SJens Wiklander
2688c260e80SJens Wiklander #define phys_sdp_mem_begin \
2698c260e80SJens Wiklander SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
2708c260e80SJens Wiklander
2718c260e80SJens Wiklander #define phys_sdp_mem_end \
2728c260e80SJens Wiklander SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
2738c260e80SJens Wiklander
2748c260e80SJens Wiklander #define phys_mem_map_begin \
2758c260e80SJens Wiklander SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
2768c260e80SJens Wiklander
2778c260e80SJens Wiklander #define phys_mem_map_end \
2788c260e80SJens Wiklander SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
2798c260e80SJens Wiklander
2809b0ee59dSJens Wiklander /* Virtual memory pool for core mappings */
2819b0ee59dSJens Wiklander extern tee_mm_pool_t core_virt_mem_pool;
28299e1ad26SJens Wiklander
283fdf696b7SJens Wiklander /* Virtual memory pool for shared memory mappings */
284fdf696b7SJens Wiklander extern tee_mm_pool_t core_virt_shm_pool;
28599e1ad26SJens Wiklander
2868c260e80SJens Wiklander #ifdef CFG_CORE_RESERVED_SHM
2878c260e80SJens Wiklander /* Default NSec shared memory allocated from NSec world */
2888c260e80SJens Wiklander extern unsigned long default_nsec_shm_paddr;
2898c260e80SJens Wiklander extern unsigned long default_nsec_shm_size;
2908c260e80SJens Wiklander #endif
2918c260e80SJens Wiklander
2920b751ce4SJens Wiklander /*
2930b751ce4SJens Wiklander * Physical load address of OP-TEE updated during boot if needed to reflect
2940b751ce4SJens Wiklander * the value used.
2950b751ce4SJens Wiklander */
2960b751ce4SJens Wiklander #ifdef CFG_CORE_PHYS_RELOCATABLE
2970b751ce4SJens Wiklander extern unsigned long core_mmu_tee_load_pa;
2980b751ce4SJens Wiklander #else
2990b751ce4SJens Wiklander extern const unsigned long core_mmu_tee_load_pa;
3000b751ce4SJens Wiklander #endif
3010b751ce4SJens Wiklander
3028c260e80SJens Wiklander void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
3038c260e80SJens Wiklander void core_init_mmu_regs(struct core_mmu_config *cfg);
304bea90f04SAlvin Chang /* Copy static memory map from temporary boot_mem to heap */
305f1284346SJens Wiklander void core_mmu_save_mem_map(void);
3068c260e80SJens Wiklander
3073aaf25d2SEtienne Carriere /* Arch specific function to help optimizing 1 MMU xlat table */
3083aaf25d2SEtienne Carriere bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr);
3098c260e80SJens Wiklander
3108c260e80SJens Wiklander /*
3118c260e80SJens Wiklander * struct mmu_partition - stores MMU partition.
3128c260e80SJens Wiklander *
3138c260e80SJens Wiklander * Basically it represent whole MMU mapping. It is possible
3148c260e80SJens Wiklander * to create multiple partitions, and change them in runtime,
3158c260e80SJens Wiklander * effectively changing how OP-TEE sees memory.
3168c260e80SJens Wiklander * This is opaque struct which is defined differently for
3178c260e80SJens Wiklander * v7 and LPAE MMUs
3188c260e80SJens Wiklander *
3198c260e80SJens Wiklander * This structure used mostly when virtualization is enabled.
320b76b2296SJerome Forissier * When CFG_NS_VIRTUALIZATION==n only default partition exists.
3218c260e80SJens Wiklander */
3228c260e80SJens Wiklander struct mmu_partition;
3238c260e80SJens Wiklander
3248c260e80SJens Wiklander /*
3258c260e80SJens Wiklander * core_mmu_get_user_va_range() - Return range of user va space
3268c260e80SJens Wiklander * @base: Lowest user virtual address
3278c260e80SJens Wiklander * @size: Size in bytes of user address space
3288c260e80SJens Wiklander */
3298c260e80SJens Wiklander void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
3308c260e80SJens Wiklander
3318c260e80SJens Wiklander /*
3328c260e80SJens Wiklander * enum core_mmu_fault - different kinds of faults
3338c260e80SJens Wiklander * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
3348c260e80SJens Wiklander * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
3358c260e80SJens Wiklander * @CORE_MMU_FAULT_TRANSLATION: translation fault
3368c260e80SJens Wiklander * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
3378c260e80SJens Wiklander * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
3388c260e80SJens Wiklander * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
3398c260e80SJens Wiklander * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
340fb873b88SJens Wiklander * @CORE_MMU_FAULT_TAG_CHECK: tag check fault
341325d4963SGatien Chevallier * @CORE_MMU_FAULT_SYNC_EXTERNAL: synchronous external abort
3428c260e80SJens Wiklander * @CORE_MMU_FAULT_OTHER: Other/unknown fault
3438c260e80SJens Wiklander */
3448c260e80SJens Wiklander enum core_mmu_fault {
3458c260e80SJens Wiklander CORE_MMU_FAULT_ALIGNMENT,
3468c260e80SJens Wiklander CORE_MMU_FAULT_DEBUG_EVENT,
3478c260e80SJens Wiklander CORE_MMU_FAULT_TRANSLATION,
3488c260e80SJens Wiklander CORE_MMU_FAULT_WRITE_PERMISSION,
3498c260e80SJens Wiklander CORE_MMU_FAULT_READ_PERMISSION,
3508c260e80SJens Wiklander CORE_MMU_FAULT_ASYNC_EXTERNAL,
3518c260e80SJens Wiklander CORE_MMU_FAULT_ACCESS_BIT,
352fb873b88SJens Wiklander CORE_MMU_FAULT_TAG_CHECK,
353325d4963SGatien Chevallier CORE_MMU_FAULT_SYNC_EXTERNAL,
3548c260e80SJens Wiklander CORE_MMU_FAULT_OTHER,
3558c260e80SJens Wiklander };
3568c260e80SJens Wiklander
3578c260e80SJens Wiklander /*
3588c260e80SJens Wiklander * core_mmu_get_fault_type() - get fault type
3598c260e80SJens Wiklander * @fault_descr: Content of fault status or exception syndrome register
3608c260e80SJens Wiklander * @returns an enum describing the content of fault status register.
3618c260e80SJens Wiklander */
3628c260e80SJens Wiklander enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
3638c260e80SJens Wiklander
3648c260e80SJens Wiklander /*
3658c260e80SJens Wiklander * core_mm_type_to_attr() - convert memory type to attribute
3668c260e80SJens Wiklander * @t: memory type
3678c260e80SJens Wiklander * @returns an attribute that can be passed to core_mm_set_entry() and friends
3688c260e80SJens Wiklander */
3698c260e80SJens Wiklander uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
3708c260e80SJens Wiklander
core_mmu_type_is_nex_shared(enum teecore_memtypes t)3716a2e17e9SJens Wiklander static inline bool core_mmu_type_is_nex_shared(enum teecore_memtypes t)
3726a2e17e9SJens Wiklander {
3736a2e17e9SJens Wiklander return IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
3746a2e17e9SJens Wiklander (t == MEM_AREA_NEX_DYN_VASPACE || t == MEM_AREA_NEX_NSEC_SHM);
3756a2e17e9SJens Wiklander }
3766a2e17e9SJens Wiklander
3778c260e80SJens Wiklander /*
3788c260e80SJens Wiklander * core_mmu_create_user_map() - Create user mode mapping
3798c260e80SJens Wiklander * @uctx: Pointer to user mode context
3808c260e80SJens Wiklander * @map: MMU configuration to use when activating this VA space
3818c260e80SJens Wiklander */
3828c260e80SJens Wiklander void core_mmu_create_user_map(struct user_mode_ctx *uctx,
3838c260e80SJens Wiklander struct core_mmu_user_map *map);
3848c260e80SJens Wiklander /*
3858c260e80SJens Wiklander * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
3868c260e80SJens Wiklander * @map: MMU configuration for current user VA space.
3878c260e80SJens Wiklander */
3888c260e80SJens Wiklander void core_mmu_get_user_map(struct core_mmu_user_map *map);
3898c260e80SJens Wiklander
3908c260e80SJens Wiklander /*
3918c260e80SJens Wiklander * core_mmu_set_user_map() - Set new MMU configuration for user VA space
3928c260e80SJens Wiklander * @map: User context MMU configuration or NULL to set core VA space
3938c260e80SJens Wiklander *
3948c260e80SJens Wiklander * Activate user VA space mapping and set its ASID if @map is not NULL,
3958c260e80SJens Wiklander * otherwise activate core mapping and set ASID to 0.
3968c260e80SJens Wiklander */
3978c260e80SJens Wiklander void core_mmu_set_user_map(struct core_mmu_user_map *map);
3988c260e80SJens Wiklander
3998c260e80SJens Wiklander /*
4008c260e80SJens Wiklander * struct core_mmu_table_info - Properties for a translation table
4018c260e80SJens Wiklander * @table: Pointer to translation table
4028c260e80SJens Wiklander * @va_base: VA base address of the transaltion table
4038c260e80SJens Wiklander * @level: Translation table level
404a21afdffSAlvin Chang * @next_level: Finer grained translation table level according to @level.
4058c260e80SJens Wiklander * @shift: The shift of each entry in the table
4068c260e80SJens Wiklander * @num_entries: Number of entries in this table.
4078c260e80SJens Wiklander */
4088c260e80SJens Wiklander struct core_mmu_table_info {
4098c260e80SJens Wiklander void *table;
4108c260e80SJens Wiklander vaddr_t va_base;
4118c260e80SJens Wiklander unsigned num_entries;
412b76b2296SJerome Forissier #ifdef CFG_NS_VIRTUALIZATION
4138c260e80SJens Wiklander struct mmu_partition *prtn;
4148c260e80SJens Wiklander #endif
41521581f27SAlvin Chang uint8_t level;
41621581f27SAlvin Chang uint8_t shift;
417a21afdffSAlvin Chang uint8_t next_level;
4188c260e80SJens Wiklander };
4198c260e80SJens Wiklander
4208c260e80SJens Wiklander /*
4218c260e80SJens Wiklander * core_mmu_find_table() - Locates a translation table
4228c260e80SJens Wiklander * @prtn: MMU partition where search should be performed
4238c260e80SJens Wiklander * @va: Virtual address for the table to cover
4248c260e80SJens Wiklander * @max_level: Don't traverse beyond this level
4258c260e80SJens Wiklander * @tbl_info: Pointer to where to store properties.
4268c260e80SJens Wiklander * @return true if a translation table was found, false on error
4278c260e80SJens Wiklander */
4288c260e80SJens Wiklander bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
4298c260e80SJens Wiklander unsigned max_level,
4308c260e80SJens Wiklander struct core_mmu_table_info *tbl_info);
4318c260e80SJens Wiklander
4328c260e80SJens Wiklander /*
4338c260e80SJens Wiklander * core_mmu_entry_to_finer_grained() - divide mapping at current level into
4348c260e80SJens Wiklander * smaller ones so memory can be mapped with finer granularity
4358c260e80SJens Wiklander * @tbl_info: table where target record located
4368c260e80SJens Wiklander * @idx: index of record for which a pdgir must be setup.
4378c260e80SJens Wiklander * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu)
4388c260e80SJens Wiklander * @return true on successful, false on error
4398c260e80SJens Wiklander */
4408c260e80SJens Wiklander bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
4418c260e80SJens Wiklander unsigned int idx, bool secure);
4428c260e80SJens Wiklander
4438c260e80SJens Wiklander void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
4448c260e80SJens Wiklander paddr_t pa, uint32_t attr);
4458c260e80SJens Wiklander
4468c260e80SJens Wiklander void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
4478c260e80SJens Wiklander
4488c260e80SJens Wiklander /*
4498c260e80SJens Wiklander * core_mmu_set_entry() - Set entry in translation table
4508c260e80SJens Wiklander * @tbl_info: Translation table properties
4518c260e80SJens Wiklander * @idx: Index of entry to update
4528c260e80SJens Wiklander * @pa: Physical address to assign entry
4538c260e80SJens Wiklander * @attr: Attributes to assign entry
4548c260e80SJens Wiklander */
4558c260e80SJens Wiklander void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
4568c260e80SJens Wiklander paddr_t pa, uint32_t attr);
4578c260e80SJens Wiklander
4588c260e80SJens Wiklander void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
4598c260e80SJens Wiklander paddr_t *pa, uint32_t *attr);
4608c260e80SJens Wiklander
4618c260e80SJens Wiklander /*
4628c260e80SJens Wiklander * core_mmu_get_entry() - Get entry from translation table
4638c260e80SJens Wiklander * @tbl_info: Translation table properties
4648c260e80SJens Wiklander * @idx: Index of entry to read
4658c260e80SJens Wiklander * @pa: Physical address is returned here if pa is not NULL
4668c260e80SJens Wiklander * @attr: Attributues are returned here if attr is not NULL
4678c260e80SJens Wiklander */
4688c260e80SJens Wiklander void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
4698c260e80SJens Wiklander paddr_t *pa, uint32_t *attr);
4708c260e80SJens Wiklander
4718c260e80SJens Wiklander /*
4728c260e80SJens Wiklander * core_mmu_va2idx() - Translate from virtual address to table index
4738c260e80SJens Wiklander * @tbl_info: Translation table properties
4748c260e80SJens Wiklander * @va: Virtual address to translate
4758c260e80SJens Wiklander * @returns index in transaltion table
4768c260e80SJens Wiklander */
core_mmu_va2idx(struct core_mmu_table_info * tbl_info,vaddr_t va)4778c260e80SJens Wiklander static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
4788c260e80SJens Wiklander vaddr_t va)
4798c260e80SJens Wiklander {
480*e1482ae7SYu-Chien Peter Lin #ifdef RV64
481*e1482ae7SYu-Chien Peter Lin if (tbl_info->level == CORE_MMU_BASE_TABLE_LEVEL)
482*e1482ae7SYu-Chien Peter Lin va &= ~GENMASK_64(63, RISCV_MMU_VA_WIDTH);
483*e1482ae7SYu-Chien Peter Lin #endif
4848c260e80SJens Wiklander return (va - tbl_info->va_base) >> tbl_info->shift;
4858c260e80SJens Wiklander }
4868c260e80SJens Wiklander
4878c260e80SJens Wiklander /*
4888c260e80SJens Wiklander * core_mmu_idx2va() - Translate from table index to virtual address
4898c260e80SJens Wiklander * @tbl_info: Translation table properties
4908c260e80SJens Wiklander * @idx: Index to translate
4918c260e80SJens Wiklander * @returns Virtual address
4928c260e80SJens Wiklander */
core_mmu_idx2va(struct core_mmu_table_info * tbl_info,unsigned idx)4938c260e80SJens Wiklander static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
4948c260e80SJens Wiklander unsigned idx)
4958c260e80SJens Wiklander {
4968c260e80SJens Wiklander return (idx << tbl_info->shift) + tbl_info->va_base;
4978c260e80SJens Wiklander }
4988c260e80SJens Wiklander
4998c260e80SJens Wiklander /*
5008c260e80SJens Wiklander * core_mmu_get_block_offset() - Get offset inside a block/page
5018c260e80SJens Wiklander * @tbl_info: Translation table properties
5028c260e80SJens Wiklander * @pa: Physical address
5038c260e80SJens Wiklander * @returns offset within one block of the translation table
5048c260e80SJens Wiklander */
core_mmu_get_block_offset(struct core_mmu_table_info * tbl_info,paddr_t pa)5058c260e80SJens Wiklander static inline size_t core_mmu_get_block_offset(
5068c260e80SJens Wiklander struct core_mmu_table_info *tbl_info, paddr_t pa)
5078c260e80SJens Wiklander {
5088c260e80SJens Wiklander return pa & ((1 << tbl_info->shift) - 1);
5098c260e80SJens Wiklander }
5108c260e80SJens Wiklander
5118c260e80SJens Wiklander /*
5128c260e80SJens Wiklander * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
5138c260e80SJens Wiklander * empty virtual address space that is used for dymanic mappings
5148c260e80SJens Wiklander * @mm: memory region to be checked
5158c260e80SJens Wiklander * @returns result of the check
5168c260e80SJens Wiklander */
core_mmu_is_dynamic_vaspace(struct tee_mmap_region * mm)5178c260e80SJens Wiklander static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
5188c260e80SJens Wiklander {
51996f43358SJens Wiklander switch (mm->type) {
52096f43358SJens Wiklander case MEM_AREA_RES_VASPACE:
52196f43358SJens Wiklander case MEM_AREA_SHM_VASPACE:
52296f43358SJens Wiklander case MEM_AREA_NEX_DYN_VASPACE:
52396f43358SJens Wiklander case MEM_AREA_TEE_DYN_VASPACE:
52496f43358SJens Wiklander return true;
52596f43358SJens Wiklander default:
52696f43358SJens Wiklander return false;
52796f43358SJens Wiklander }
5288c260e80SJens Wiklander }
5298c260e80SJens Wiklander
5308c260e80SJens Wiklander /*
5318c260e80SJens Wiklander * core_mmu_map_pages() - map list of pages at given virtual address
5328c260e80SJens Wiklander * @vstart: Virtual address where mapping begins
5338c260e80SJens Wiklander * @pages: Array of page addresses
5348c260e80SJens Wiklander * @num_pages: Number of pages
5358c260e80SJens Wiklander * @memtype: Type of memmory to be mapped
5368c260e80SJens Wiklander *
5378c260e80SJens Wiklander * Note: This function asserts that pages are not mapped executeable for
5388c260e80SJens Wiklander * kernel (privileged) mode.
5398c260e80SJens Wiklander *
5408c260e80SJens Wiklander * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
5418c260e80SJens Wiklander */
5428c260e80SJens Wiklander TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
5438c260e80SJens Wiklander enum teecore_memtypes memtype);
5448c260e80SJens Wiklander
5458c260e80SJens Wiklander /*
5468c260e80SJens Wiklander * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
5478c260e80SJens Wiklander * @vstart: Virtual address where mapping begins
5488c260e80SJens Wiklander * @pstart: Physical address of the first page
5498c260e80SJens Wiklander * @num_pages: Number of pages
5508c260e80SJens Wiklander * @memtype: Type of memmory to be mapped
5518c260e80SJens Wiklander *
5528c260e80SJens Wiklander * Note: This function asserts that pages are not mapped executeable for
5538c260e80SJens Wiklander * kernel (privileged) mode.
5548c260e80SJens Wiklander *
5558c260e80SJens Wiklander * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
5568c260e80SJens Wiklander */
5578c260e80SJens Wiklander TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
5588c260e80SJens Wiklander size_t num_pages,
5598c260e80SJens Wiklander enum teecore_memtypes memtype);
5608c260e80SJens Wiklander
5618c260e80SJens Wiklander /*
5628c260e80SJens Wiklander * core_mmu_unmap_pages() - remove mapping at given virtual address
5638c260e80SJens Wiklander * @vstart: Virtual address where mapping begins
5648c260e80SJens Wiklander * @num_pages: Number of pages to unmap
5658c260e80SJens Wiklander */
5668c260e80SJens Wiklander void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
5678c260e80SJens Wiklander
5688c260e80SJens Wiklander /*
5698c260e80SJens Wiklander * core_mmu_user_mapping_is_active() - Report if user mapping is active
5708c260e80SJens Wiklander * @returns true if a user VA space is active, false if user VA space is
5718c260e80SJens Wiklander * inactive.
5728c260e80SJens Wiklander */
5738c260e80SJens Wiklander bool core_mmu_user_mapping_is_active(void);
5748c260e80SJens Wiklander
5758c260e80SJens Wiklander /*
576b1e25277SYu Chien Peter Lin * core_mmu_user_va_range_is_defined() - check if user va range is defined
577b1e25277SYu Chien Peter Lin * @returns true if a user VA space is defined, false if not.
578b1e25277SYu Chien Peter Lin */
579b1e25277SYu Chien Peter Lin bool core_mmu_user_va_range_is_defined(void);
580b1e25277SYu Chien Peter Lin
581b1e25277SYu Chien Peter Lin /*
5828c260e80SJens Wiklander * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
5838c260e80SJens Wiklander * @returns true if the attributes can be used, false if not.
5848c260e80SJens Wiklander */
5858c260e80SJens Wiklander bool core_mmu_mattr_is_ok(uint32_t mattr);
5868c260e80SJens Wiklander
58710b19e73SJens Wiklander TEE_Result core_mmu_for_each_map(void *ptr,
58810b19e73SJens Wiklander TEE_Result (*fn)(struct tee_mmap_region *map,
58910b19e73SJens Wiklander void *ptr));
59010b19e73SJens Wiklander
5918c260e80SJens Wiklander void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
5928c260e80SJens Wiklander vaddr_t *e);
5938c260e80SJens Wiklander
5948c260e80SJens Wiklander enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
5958c260e80SJens Wiklander
5968c260e80SJens Wiklander /* routines to retreive shared mem configuration */
core_mmu_is_shm_cached(void)5978c260e80SJens Wiklander static inline bool core_mmu_is_shm_cached(void)
5988c260e80SJens Wiklander {
599f950bedcSJelle Sels return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM));
6008c260e80SJens Wiklander }
6018c260e80SJens Wiklander
6028c260e80SJens Wiklander TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
6038c260e80SJens Wiklander size_t len);
6048c260e80SJens Wiklander void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
6058c260e80SJens Wiklander size_t len);
6068c260e80SJens Wiklander
6078c260e80SJens Wiklander /*
6088c260e80SJens Wiklander * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
6098c260e80SJens Wiklander * length. If more than one mapping of
6108c260e80SJens Wiklander * specified type is present, NULL will be
6118c260e80SJens Wiklander * returned.
6128c260e80SJens Wiklander * @type: memory type
6138c260e80SJens Wiklander * @len: length in bytes
6148c260e80SJens Wiklander */
6158c260e80SJens Wiklander struct tee_mmap_region *
6168c260e80SJens Wiklander core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
6178c260e80SJens Wiklander
6188c260e80SJens Wiklander /*
619fe16b87bSAlvin Chang * tlbi_va_range() - Invalidate TLB for virtual address range
6208c260e80SJens Wiklander * @va: start virtual address, must be a multiple of @granule
6218c260e80SJens Wiklander * @len: length in bytes of range, must be a multiple of @granule
6228c260e80SJens Wiklander * @granule: granularity of mapping, supported values are
6238c260e80SJens Wiklander * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
6248c260e80SJens Wiklander * match the actual mappings.
6258c260e80SJens Wiklander */
626fe16b87bSAlvin Chang void tlbi_va_range(vaddr_t va, size_t len, size_t granule);
6278c260e80SJens Wiklander
6288c260e80SJens Wiklander /*
629fe16b87bSAlvin Chang * tlbi_va_range_asid() - Invalidate TLB for virtual address range for
6308c260e80SJens Wiklander * a specific ASID
6318c260e80SJens Wiklander * @va: start virtual address, must be a multiple of @granule
6328c260e80SJens Wiklander * @len: length in bytes of range, must be a multiple of @granule
6338c260e80SJens Wiklander * @granule: granularity of mapping, supported values are
6348c260e80SJens Wiklander * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
6358c260e80SJens Wiklander * match the actual mappings.
6368c260e80SJens Wiklander * @asid: Address space identifier
6378c260e80SJens Wiklander */
638fe16b87bSAlvin Chang void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
6398c260e80SJens Wiklander
6408c260e80SJens Wiklander /* Check cpu mmu enabled or not */
6418c260e80SJens Wiklander bool cpu_mmu_enabled(void);
6428c260e80SJens Wiklander
6438c260e80SJens Wiklander #ifdef CFG_CORE_DYN_SHM
6448c260e80SJens Wiklander /*
6458c260e80SJens Wiklander * Check if platform defines nsec DDR range(s).
6468c260e80SJens Wiklander * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
6478c260e80SJens Wiklander * always present.
6488c260e80SJens Wiklander */
6498c260e80SJens Wiklander bool core_mmu_nsec_ddr_is_defined(void);
6508c260e80SJens Wiklander
6518c260e80SJens Wiklander void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
6528c260e80SJens Wiklander size_t nelems);
6538c260e80SJens Wiklander #endif
6548c260e80SJens Wiklander
6558c260e80SJens Wiklander /* Initialize MMU partition */
656b8ef8d0bSJens Wiklander void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map);
6578c260e80SJens Wiklander
6588c260e80SJens Wiklander unsigned int asid_alloc(void);
6598c260e80SJens Wiklander void asid_free(unsigned int asid);
6608c260e80SJens Wiklander
6618c260e80SJens Wiklander #ifdef CFG_SECURE_DATA_PATH
6628c260e80SJens Wiklander /* Alloc and fill SDP memory objects table - table is NULL terminated */
6638c260e80SJens Wiklander struct mobj **core_sdp_mem_create_mobjs(void);
6648c260e80SJens Wiklander #endif
6658c260e80SJens Wiklander
666b76b2296SJerome Forissier #ifdef CFG_NS_VIRTUALIZATION
6678c260e80SJens Wiklander size_t core_mmu_get_total_pages_size(void);
6688c260e80SJens Wiklander struct mmu_partition *core_alloc_mmu_prtn(void *tables);
6698c260e80SJens Wiklander void core_free_mmu_prtn(struct mmu_partition *prtn);
6708c260e80SJens Wiklander void core_mmu_set_prtn(struct mmu_partition *prtn);
6718c260e80SJens Wiklander void core_mmu_set_default_prtn(void);
6728c260e80SJens Wiklander void core_mmu_set_default_prtn_tbl(void);
6738c260e80SJens Wiklander #endif
6748c260e80SJens Wiklander
67590c16066SJens Wiklander /* Initialize physical memory pool */
67690c16066SJens Wiklander void core_mmu_init_phys_mem(void);
6778c260e80SJens Wiklander
678b8ef8d0bSJens Wiklander void core_init_mmu(struct memory_map *mem_map);
679c02edd30SJens Wiklander
680c02edd30SJens Wiklander void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
681c02edd30SJens Wiklander unsigned int level, vaddr_t va_base, void *table);
682c02edd30SJens Wiklander void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
683c02edd30SJens Wiklander struct user_mode_ctx *uctx);
684c02edd30SJens Wiklander void core_mmu_map_region(struct mmu_partition *prtn,
685c02edd30SJens Wiklander struct tee_mmap_region *mm);
686c02edd30SJens Wiklander
687d8ba4baeSJens Wiklander bool arch_va2pa_helper(void *va, paddr_t *pa);
688d8ba4baeSJens Wiklander
689232f1cdeSYu-Chien Peter Lin vaddr_t arch_aslr_base_addr(vaddr_t start_addr, uint64_t seed,
690232f1cdeSYu-Chien Peter Lin unsigned int iteration_count);
691232f1cdeSYu-Chien Peter Lin
core_mmu_check_end_pa(paddr_t pa,size_t len)692c02edd30SJens Wiklander static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len)
693c02edd30SJens Wiklander {
694c02edd30SJens Wiklander paddr_t end_pa = 0;
695c02edd30SJens Wiklander
696c02edd30SJens Wiklander if (ADD_OVERFLOW(pa, len - 1, &end_pa))
697c02edd30SJens Wiklander return false;
698c02edd30SJens Wiklander return core_mmu_check_max_pa(end_pa);
699c02edd30SJens Wiklander }
70086ce921fSJens Wiklander
70175d90854SJens Wiklander /*
70275d90854SJens Wiklander * core_mmu_set_secure_memory() - set physical secure memory range
70375d90854SJens Wiklander * @base: base address of secure memory
70475d90854SJens Wiklander * @size: size of secure memory
70575d90854SJens Wiklander *
70675d90854SJens Wiklander * The physical secure memory range is not known in advance when OP-TEE is
70775d90854SJens Wiklander * relocatable, this information must be supplied once during boot before
70875d90854SJens Wiklander * the translation tables can be initialized and the MMU enabled.
70975d90854SJens Wiklander */
71075d90854SJens Wiklander void core_mmu_set_secure_memory(paddr_t base, size_t size);
71175d90854SJens Wiklander
71286ce921fSJens Wiklander /*
71386ce921fSJens Wiklander * core_mmu_get_secure_memory() - get physical secure memory range
71486ce921fSJens Wiklander * @base: base address of secure memory
71586ce921fSJens Wiklander * @size: size of secure memory
71686ce921fSJens Wiklander *
71786ce921fSJens Wiklander * The physical secure memory range returned covers at least the memory
71886ce921fSJens Wiklander * range used by OP-TEE Core, but may cover more memory depending on the
71986ce921fSJens Wiklander * configuration.
72086ce921fSJens Wiklander */
72186ce921fSJens Wiklander void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size);
72286ce921fSJens Wiklander
7238c260e80SJens Wiklander #endif /*__ASSEMBLER__*/
7248c260e80SJens Wiklander
725fbe66cf8SEtienne Carriere #endif /* __MM_CORE_MMU_H */
726