xref: /optee_os/core/include/mm/core_mmu.h (revision 949b0c0c6256c79b714d188839b67a85ec5a0b3b)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 #ifndef __MM_CORE_MMU_H
7 #define __MM_CORE_MMU_H
8 
9 #ifndef __ASSEMBLER__
10 #include <assert.h>
11 #include <compiler.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mm.h>
14 #include <mm/tee_mmu_types.h>
15 #include <types_ext.h>
16 #include <util.h>
17 #endif
18 
19 #include <mm/core_mmu_arch.h>
20 #include <platform_config.h>
21 
22 /* A small page is the smallest unit of memory that can be mapped */
23 #define SMALL_PAGE_SIZE			BIT(SMALL_PAGE_SHIFT)
24 #define SMALL_PAGE_MASK			((paddr_t)SMALL_PAGE_SIZE - 1)
25 
26 /*
27  * PGDIR is the translation table above the translation table that holds
28  * the pages.
29  */
30 #define CORE_MMU_PGDIR_SIZE		BIT(CORE_MMU_PGDIR_SHIFT)
31 #define CORE_MMU_PGDIR_MASK		((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
32 
33 /* TA user space code, data, stack and heap are mapped using this granularity */
34 #define CORE_MMU_USER_CODE_SIZE		BIT(CORE_MMU_USER_CODE_SHIFT)
35 #define CORE_MMU_USER_CODE_MASK		((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
36 
37 /* TA user space parameters are mapped using this granularity */
38 #define CORE_MMU_USER_PARAM_SIZE	BIT(CORE_MMU_USER_PARAM_SHIFT)
39 #define CORE_MMU_USER_PARAM_MASK	((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
40 
41 /*
42  * Identify mapping constraint: virtual base address is the physical start addr.
43  * If platform did not set some macros, some get default value.
44  */
45 #ifndef TEE_RAM_VA_SIZE
46 #define TEE_RAM_VA_SIZE			CORE_MMU_PGDIR_SIZE
47 #endif
48 
49 #ifndef TEE_LOAD_ADDR
50 #define TEE_LOAD_ADDR			TEE_RAM_START
51 #endif
52 
53 #ifndef STACK_ALIGNMENT
54 #define STACK_ALIGNMENT			(sizeof(long) * U(2))
55 #endif
56 
57 #ifndef __ASSEMBLER__
58 /*
59  * Memory area type:
60  * MEM_AREA_TEE_RAM:  core RAM (read/write/executable, secure, reserved to TEE)
61  * MEM_AREA_TEE_RAM_RX:  core private read-only/executable memory (secure)
62  * MEM_AREA_TEE_RAM_RO:  core private read-only/non-executable memory (secure)
63  * MEM_AREA_TEE_RAM_RW:  core private read/write/non-executable memory (secure)
64  * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
65  * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
66  * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
67  * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
68  * MEM_AREA_NEX_DYN_VASPACE: nexus private dynamic memory map (secure)
69  * MEM_AREA_TEE_DYN_VASPACE: core private dynamic memory (secure)
70  * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
71  * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
72  * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
73  * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
74  * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE.
75  * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
76  * MEM_AREA_RAM_SEC:  Secure RAM storing some secrets
77  * MEM_AREA_ROM_SEC:  Secure read only memory storing some secrets
78  * MEM_AREA_IO_NSEC:  NonSecure HW mapped registers
79  * MEM_AREA_IO_SEC:   Secure HW mapped registers
80  * MEM_AREA_EXT_DT:   Memory loads external device tree
81  * MEM_AREA_MANIFEST_DT: Memory loads manifest device tree
82  * MEM_AREA_TRANSFER_LIST: Memory area mapped for Transfer List
83  * MEM_AREA_RES_VASPACE: Reserved virtual memory space
84  * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
85  * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
86  * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
87  * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
88  * MEM_AREA_MAXTYPE:  lower invalid 'type' value
89  */
90 enum teecore_memtypes {
91 	MEM_AREA_TEE_RAM = 1,
92 	MEM_AREA_TEE_RAM_RX,
93 	MEM_AREA_TEE_RAM_RO,
94 	MEM_AREA_TEE_RAM_RW,
95 	MEM_AREA_INIT_RAM_RO,
96 	MEM_AREA_INIT_RAM_RX,
97 	MEM_AREA_NEX_RAM_RO,
98 	MEM_AREA_NEX_RAM_RW,
99 	MEM_AREA_NEX_DYN_VASPACE,
100 	MEM_AREA_TEE_DYN_VASPACE,
101 	MEM_AREA_TEE_COHERENT,
102 	MEM_AREA_TEE_ASAN,
103 	MEM_AREA_IDENTITY_MAP_RX,
104 	MEM_AREA_NSEC_SHM,
105 	MEM_AREA_NEX_NSEC_SHM,
106 	MEM_AREA_RAM_NSEC,
107 	MEM_AREA_RAM_SEC,
108 	MEM_AREA_ROM_SEC,
109 	MEM_AREA_IO_NSEC,
110 	MEM_AREA_IO_SEC,
111 	MEM_AREA_EXT_DT,
112 	MEM_AREA_MANIFEST_DT,
113 	MEM_AREA_TRANSFER_LIST,
114 	MEM_AREA_RES_VASPACE,
115 	MEM_AREA_SHM_VASPACE,
116 	MEM_AREA_TS_VASPACE,
117 	MEM_AREA_PAGER_VASPACE,
118 	MEM_AREA_SDP_MEM,
119 	MEM_AREA_DDR_OVERALL,
120 	MEM_AREA_SEC_RAM_OVERALL,
121 	MEM_AREA_MAXTYPE
122 };
123 
124 static inline const char *teecore_memtype_name(enum teecore_memtypes type)
125 {
126 	static const char * const names[] = {
127 		[MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
128 		[MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
129 		[MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
130 		[MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
131 		[MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
132 		[MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
133 		[MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
134 		[MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
135 		[MEM_AREA_NEX_DYN_VASPACE] = "NEX_DYN_VASPACE",
136 		[MEM_AREA_TEE_DYN_VASPACE] = "TEE_DYN_VASPACE",
137 		[MEM_AREA_TEE_ASAN] = "TEE_ASAN",
138 		[MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
139 		[MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
140 		[MEM_AREA_NSEC_SHM] = "NSEC_SHM",
141 		[MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM",
142 		[MEM_AREA_RAM_NSEC] = "RAM_NSEC",
143 		[MEM_AREA_RAM_SEC] = "RAM_SEC",
144 		[MEM_AREA_ROM_SEC] = "ROM_SEC",
145 		[MEM_AREA_IO_NSEC] = "IO_NSEC",
146 		[MEM_AREA_IO_SEC] = "IO_SEC",
147 		[MEM_AREA_EXT_DT] = "EXT_DT",
148 		[MEM_AREA_MANIFEST_DT] = "MANIFEST_DT",
149 		[MEM_AREA_TRANSFER_LIST] = "TRANSFER_LIST",
150 		[MEM_AREA_RES_VASPACE] = "RES_VASPACE",
151 		[MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
152 		[MEM_AREA_TS_VASPACE] = "TS_VASPACE",
153 		[MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
154 		[MEM_AREA_SDP_MEM] = "SDP_MEM",
155 		[MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
156 		[MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
157 	};
158 
159 	COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
160 	return names[type];
161 }
162 
163 #ifdef CFG_CORE_RWDATA_NOEXEC
164 #define MEM_AREA_TEE_RAM_RW_DATA	MEM_AREA_TEE_RAM_RW
165 #else
166 #define MEM_AREA_TEE_RAM_RW_DATA	MEM_AREA_TEE_RAM
167 #endif
168 
169 struct core_mmu_phys_mem {
170 	const char *name;
171 	enum teecore_memtypes type;
172 	__extension__ union {
173 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
174 		struct {
175 			uint32_t lo_addr;
176 			uint32_t hi_addr;
177 		};
178 #endif
179 		paddr_t addr;
180 	};
181 	__extension__ union {
182 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
183 		struct {
184 			uint32_t lo_size;
185 			uint32_t hi_size;
186 		};
187 #endif
188 		paddr_size_t size;
189 	};
190 };
191 
192 #define __register_memory(_name, _type, _addr, _size, _section) \
193 	SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
194 		{ .name = (_name), .type = (_type), .addr = (_addr), \
195 		  .size = (_size) }
196 
197 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
198 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
199 	SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
200 		{ .name = (_name), .type = (_type), .lo_addr = (_addr), \
201 		  .lo_size = (_size) }
202 #else
203 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
204 		__register_memory(_name, _type, _addr, _size, _section)
205 #endif
206 
207 #define register_phys_mem(type, addr, size) \
208 		__register_memory(#addr, (type), (addr), (size), \
209 				  phys_mem_map)
210 
211 #define register_phys_mem_ul(type, addr, size) \
212 		__register_memory_ul(#addr, (type), (addr), (size), \
213 				     phys_mem_map)
214 
215 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
216 #define register_phys_mem_pgdir(type, addr, size) \
217 	__register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
218 			  ROUNDUP(size + addr - \
219 					ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
220 				  CORE_MMU_PGDIR_SIZE), phys_mem_map)
221 
222 #ifdef CFG_SECURE_DATA_PATH
223 #define register_sdp_mem(addr, size) \
224 		__register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
225 				  phys_sdp_mem)
226 #else
227 #define register_sdp_mem(addr, size) \
228 		static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
229 			__unused
230 #endif
231 
232 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
233 #define register_dynamic_shm(addr, size) \
234 		__register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
235 				  phys_ddr_overall_compat)
236 
237 /*
238  * register_ddr() - Define a memory range
239  * @addr: Base address
240  * @size: Length
241  *
242  * This macro can be used multiple times to define disjoint ranges. While
243  * initializing holes are carved out of these ranges where it overlaps with
244  * special memory, for instance memory registered with register_sdp_mem().
245  *
246  * The memory that remains is accepted as non-secure shared memory when
247  * communicating with normal world.
248  *
249  * This macro is an alternative to supply the memory description with a
250  * devicetree blob.
251  */
252 #define register_ddr(addr, size) \
253 		__register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
254 				  (size), phys_ddr_overall)
255 
256 #define phys_ddr_overall_begin \
257 	SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
258 
259 #define phys_ddr_overall_end \
260 	SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
261 
262 #define phys_ddr_overall_compat_begin \
263 	SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
264 
265 #define phys_ddr_overall_compat_end \
266 	SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
267 
268 #define phys_sdp_mem_begin \
269 	SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
270 
271 #define phys_sdp_mem_end \
272 	SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
273 
274 #define phys_mem_map_begin \
275 	SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
276 
277 #define phys_mem_map_end \
278 	SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
279 
280 /* Virtual memory pool for core mappings */
281 extern tee_mm_pool_t core_virt_mem_pool;
282 
283 /* Virtual memory pool for shared memory mappings */
284 extern tee_mm_pool_t core_virt_shm_pool;
285 
286 #ifdef CFG_CORE_RESERVED_SHM
287 /* Default NSec shared memory allocated from NSec world */
288 extern unsigned long default_nsec_shm_paddr;
289 extern unsigned long default_nsec_shm_size;
290 #endif
291 
292 /*
293  * Physical load address of OP-TEE updated during boot if needed to reflect
294  * the value used.
295  */
296 #ifdef CFG_CORE_PHYS_RELOCATABLE
297 extern unsigned long core_mmu_tee_load_pa;
298 #else
299 extern const unsigned long core_mmu_tee_load_pa;
300 #endif
301 
302 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
303 void core_init_mmu_regs(struct core_mmu_config *cfg);
304 /* Copy static memory map from temporary boot_mem to heap */
305 void core_mmu_save_mem_map(void);
306 
307 /* Arch specific function to help optimizing 1 MMU xlat table */
308 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr);
309 
310 /*
311  * struct mmu_partition - stores MMU partition.
312  *
313  * Basically it	represent whole MMU mapping. It is possible
314  * to create multiple partitions, and change them in runtime,
315  * effectively changing how OP-TEE sees memory.
316  * This is opaque struct which is defined differently for
317  * v7 and LPAE MMUs
318  *
319  * This structure used mostly when virtualization is enabled.
320  * When CFG_NS_VIRTUALIZATION==n only default partition exists.
321  */
322 struct mmu_partition;
323 
324 /*
325  * core_mmu_get_user_va_range() - Return range of user va space
326  * @base:	Lowest user virtual address
327  * @size:	Size in bytes of user address space
328  */
329 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
330 
331 /*
332  * enum core_mmu_fault - different kinds of faults
333  * @CORE_MMU_FAULT_ALIGNMENT:		alignment fault
334  * @CORE_MMU_FAULT_DEBUG_EVENT:		debug event
335  * @CORE_MMU_FAULT_TRANSLATION:		translation fault
336  * @CORE_MMU_FAULT_WRITE_PERMISSION:	Permission fault during write
337  * @CORE_MMU_FAULT_READ_PERMISSION:	Permission fault during read
338  * @CORE_MMU_FAULT_ASYNC_EXTERNAL:	asynchronous external abort
339  * @CORE_MMU_FAULT_ACCESS_BIT:		access bit fault
340  * @CORE_MMU_FAULT_TAG_CHECK:		tag check fault
341  * @CORE_MMU_FAULT_SYNC_EXTERNAL:	synchronous external abort
342  * @CORE_MMU_FAULT_OTHER:		Other/unknown fault
343  */
344 enum core_mmu_fault {
345 	CORE_MMU_FAULT_ALIGNMENT,
346 	CORE_MMU_FAULT_DEBUG_EVENT,
347 	CORE_MMU_FAULT_TRANSLATION,
348 	CORE_MMU_FAULT_WRITE_PERMISSION,
349 	CORE_MMU_FAULT_READ_PERMISSION,
350 	CORE_MMU_FAULT_ASYNC_EXTERNAL,
351 	CORE_MMU_FAULT_ACCESS_BIT,
352 	CORE_MMU_FAULT_TAG_CHECK,
353 	CORE_MMU_FAULT_SYNC_EXTERNAL,
354 	CORE_MMU_FAULT_OTHER,
355 };
356 
357 /*
358  * core_mmu_get_fault_type() - get fault type
359  * @fault_descr:	Content of fault status or exception syndrome register
360  * @returns an enum describing the content of fault status register.
361  */
362 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
363 
364 /*
365  * core_mm_type_to_attr() - convert memory type to attribute
366  * @t: memory type
367  * @returns an attribute that can be passed to core_mm_set_entry() and friends
368  */
369 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
370 
371 /*
372  * core_mmu_create_user_map() - Create user mode mapping
373  * @uctx:	Pointer to user mode context
374  * @map:	MMU configuration to use when activating this VA space
375  */
376 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
377 			      struct core_mmu_user_map *map);
378 /*
379  * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
380  * @map:	MMU configuration for current user VA space.
381  */
382 void core_mmu_get_user_map(struct core_mmu_user_map *map);
383 
384 /*
385  * core_mmu_set_user_map() - Set new MMU configuration for user VA space
386  * @map:	User context MMU configuration or NULL to set core VA space
387  *
388  * Activate user VA space mapping and set its ASID if @map is not NULL,
389  * otherwise activate core mapping and set ASID to 0.
390  */
391 void core_mmu_set_user_map(struct core_mmu_user_map *map);
392 
393 /*
394  * struct core_mmu_table_info - Properties for a translation table
395  * @table:	Pointer to translation table
396  * @va_base:	VA base address of the transaltion table
397  * @level:	Translation table level
398  * @next_level:	Finer grained translation table level according to @level.
399  * @shift:	The shift of each entry in the table
400  * @num_entries: Number of entries in this table.
401  */
402 struct core_mmu_table_info {
403 	void *table;
404 	vaddr_t va_base;
405 	unsigned num_entries;
406 #ifdef CFG_NS_VIRTUALIZATION
407 	struct mmu_partition *prtn;
408 #endif
409 	uint8_t level;
410 	uint8_t shift;
411 	uint8_t next_level;
412 };
413 
414 /*
415  * core_mmu_find_table() - Locates a translation table
416  * @prtn:	MMU partition where search should be performed
417  * @va:		Virtual address for the table to cover
418  * @max_level:	Don't traverse beyond this level
419  * @tbl_info:	Pointer to where to store properties.
420  * @return true if a translation table was found, false on error
421  */
422 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
423 			 unsigned max_level,
424 			 struct core_mmu_table_info *tbl_info);
425 
426 /*
427  * core_mmu_entry_to_finer_grained() - divide mapping at current level into
428  *     smaller ones so memory can be mapped with finer granularity
429  * @tbl_info:	table where target record located
430  * @idx:	index of record for which a pdgir must be setup.
431  * @secure:	true/false if pgdir maps secure/non-secure memory (32bit mmu)
432  * @return true on successful, false on error
433  */
434 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
435 				     unsigned int idx, bool secure);
436 
437 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
438 				  paddr_t pa, uint32_t attr);
439 
440 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
441 
442 /*
443  * core_mmu_set_entry() - Set entry in translation table
444  * @tbl_info:	Translation table properties
445  * @idx:	Index of entry to update
446  * @pa:		Physical address to assign entry
447  * @attr:	Attributes to assign entry
448  */
449 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
450 			paddr_t pa, uint32_t attr);
451 
452 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
453 				  paddr_t *pa, uint32_t *attr);
454 
455 /*
456  * core_mmu_get_entry() - Get entry from translation table
457  * @tbl_info:	Translation table properties
458  * @idx:	Index of entry to read
459  * @pa:		Physical address is returned here if pa is not NULL
460  * @attr:	Attributues are returned here if attr is not NULL
461  */
462 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
463 			paddr_t *pa, uint32_t *attr);
464 
465 /*
466  * core_mmu_va2idx() - Translate from virtual address to table index
467  * @tbl_info:	Translation table properties
468  * @va:		Virtual address to translate
469  * @returns index in transaltion table
470  */
471 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
472 			vaddr_t va)
473 {
474 	return (va - tbl_info->va_base) >> tbl_info->shift;
475 }
476 
477 /*
478  * core_mmu_idx2va() - Translate from table index to virtual address
479  * @tbl_info:	Translation table properties
480  * @idx:	Index to translate
481  * @returns Virtual address
482  */
483 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
484 			unsigned idx)
485 {
486 	return (idx << tbl_info->shift) + tbl_info->va_base;
487 }
488 
489 /*
490  * core_mmu_get_block_offset() - Get offset inside a block/page
491  * @tbl_info:	Translation table properties
492  * @pa:		Physical address
493  * @returns offset within one block of the translation table
494  */
495 static inline size_t core_mmu_get_block_offset(
496 			struct core_mmu_table_info *tbl_info, paddr_t pa)
497 {
498 	return pa & ((1 << tbl_info->shift) - 1);
499 }
500 
501 /*
502  * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
503  *  empty virtual address space that is used for dymanic mappings
504  * @mm:		memory region to be checked
505  * @returns result of the check
506  */
507 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
508 {
509 	switch (mm->type) {
510 	case MEM_AREA_RES_VASPACE:
511 	case MEM_AREA_SHM_VASPACE:
512 	case MEM_AREA_NEX_DYN_VASPACE:
513 	case MEM_AREA_TEE_DYN_VASPACE:
514 		return true;
515 	default:
516 		return false;
517 	}
518 }
519 
520 /*
521  * core_mmu_map_pages() - map list of pages at given virtual address
522  * @vstart:	Virtual address where mapping begins
523  * @pages:	Array of page addresses
524  * @num_pages:	Number of pages
525  * @memtype:	Type of memmory to be mapped
526  *
527  * Note: This function asserts that pages are not mapped executeable for
528  * kernel (privileged) mode.
529  *
530  * @returns:	TEE_SUCCESS on success, TEE_ERROR_XXX on error
531  */
532 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
533 			      enum teecore_memtypes memtype);
534 
535 /*
536  * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
537  * @vstart:	Virtual address where mapping begins
538  * @pstart:	Physical address of the first page
539  * @num_pages:	Number of pages
540  * @memtype:	Type of memmory to be mapped
541  *
542  * Note: This function asserts that pages are not mapped executeable for
543  * kernel (privileged) mode.
544  *
545  * @returns:	TEE_SUCCESS on success, TEE_ERROR_XXX on error
546  */
547 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
548 					 size_t num_pages,
549 					 enum teecore_memtypes memtype);
550 
551 /*
552  * core_mmu_unmap_pages() - remove mapping at given virtual address
553  * @vstart:	Virtual address where mapping begins
554  * @num_pages:	Number of pages to unmap
555  */
556 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
557 
558 /*
559  * core_mmu_user_mapping_is_active() - Report if user mapping is active
560  * @returns true if a user VA space is active, false if user VA space is
561  *          inactive.
562  */
563 bool core_mmu_user_mapping_is_active(void);
564 
565 /*
566  * core_mmu_user_va_range_is_defined() - check if user va range is defined
567  * @returns true if a user VA space is defined, false if not.
568  */
569 bool core_mmu_user_va_range_is_defined(void);
570 
571 /*
572  * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
573  * @returns true if the attributes can be used, false if not.
574  */
575 bool core_mmu_mattr_is_ok(uint32_t mattr);
576 
577 TEE_Result core_mmu_for_each_map(void *ptr,
578 				 TEE_Result (*fn)(struct tee_mmap_region *map,
579 						  void *ptr));
580 
581 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
582 			      vaddr_t *e);
583 
584 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
585 
586 /* routines to retreive shared mem configuration */
587 static inline bool core_mmu_is_shm_cached(void)
588 {
589 	return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM));
590 }
591 
592 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
593 				   size_t len);
594 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
595 			   size_t len);
596 
597 /*
598  * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
599  *				       length. If more than one mapping of
600  *				       specified type is present, NULL will be
601  *				       returned.
602  * @type:	memory type
603  * @len:	length in bytes
604  */
605 struct tee_mmap_region *
606 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
607 
608 /*
609  * tlbi_va_range() - Invalidate TLB for virtual address range
610  * @va:		start virtual address, must be a multiple of @granule
611  * @len:	length in bytes of range, must be a multiple of @granule
612  * @granule:	granularity of mapping, supported values are
613  *		CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
614  *		match the actual mappings.
615  */
616 void tlbi_va_range(vaddr_t va, size_t len, size_t granule);
617 
618 /*
619  * tlbi_va_range_asid() - Invalidate TLB for virtual address range for
620  *			  a specific ASID
621  * @va:		start virtual address, must be a multiple of @granule
622  * @len:	length in bytes of range, must be a multiple of @granule
623  * @granule:	granularity of mapping, supported values are
624  *		CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
625  *		match the actual mappings.
626  * @asid:	Address space identifier
627  */
628 void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
629 
630 /* Check cpu mmu enabled or not */
631 bool cpu_mmu_enabled(void);
632 
633 #ifdef CFG_CORE_DYN_SHM
634 /*
635  * Check if platform defines nsec DDR range(s).
636  * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
637  * always present.
638  */
639 bool core_mmu_nsec_ddr_is_defined(void);
640 
641 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
642 				      size_t nelems);
643 #endif
644 
645 /* Initialize MMU partition */
646 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map);
647 
648 unsigned int asid_alloc(void);
649 void asid_free(unsigned int asid);
650 
651 #ifdef CFG_SECURE_DATA_PATH
652 /* Alloc and fill SDP memory objects table - table is NULL terminated */
653 struct mobj **core_sdp_mem_create_mobjs(void);
654 #endif
655 
656 #ifdef CFG_NS_VIRTUALIZATION
657 size_t core_mmu_get_total_pages_size(void);
658 struct mmu_partition *core_alloc_mmu_prtn(void *tables);
659 void core_free_mmu_prtn(struct mmu_partition *prtn);
660 void core_mmu_set_prtn(struct mmu_partition *prtn);
661 void core_mmu_set_default_prtn(void);
662 void core_mmu_set_default_prtn_tbl(void);
663 #endif
664 
665 /* Initialize physical memory pool */
666 void core_mmu_init_phys_mem(void);
667 
668 void core_init_mmu(struct memory_map *mem_map);
669 
670 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
671 			     unsigned int level, vaddr_t va_base, void *table);
672 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
673 				struct user_mode_ctx *uctx);
674 void core_mmu_map_region(struct mmu_partition *prtn,
675 			 struct tee_mmap_region *mm);
676 
677 bool arch_va2pa_helper(void *va, paddr_t *pa);
678 
679 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len)
680 {
681 	paddr_t end_pa = 0;
682 
683 	if (ADD_OVERFLOW(pa, len - 1, &end_pa))
684 		return false;
685 	return core_mmu_check_max_pa(end_pa);
686 }
687 
688 /*
689  * core_mmu_set_secure_memory() - set physical secure memory range
690  * @base: base address of secure memory
691  * @size: size of secure memory
692  *
693  * The physical secure memory range is not known in advance when OP-TEE is
694  * relocatable, this information must be supplied once during boot before
695  * the translation tables can be initialized and the MMU enabled.
696  */
697 void core_mmu_set_secure_memory(paddr_t base, size_t size);
698 
699 /*
700  * core_mmu_get_secure_memory() - get physical secure memory range
701  * @base: base address of secure memory
702  * @size: size of secure memory
703  *
704  * The physical secure memory range returned covers at least the memory
705  * range used by OP-TEE Core, but may cover more memory depending on the
706  * configuration.
707  */
708 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size);
709 
710 #endif /*__ASSEMBLER__*/
711 
712 #endif /* __MM_CORE_MMU_H */
713