xref: /optee_os/core/include/mm/core_mmu.h (revision 92d75aefed568a557dbd9152d749a4bc320fa9f2)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 #ifndef CORE_MMU_H
7 #define CORE_MMU_H
8 
9 #ifndef __ASSEMBLER__
10 #include <assert.h>
11 #include <compiler.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mmu_types.h>
14 #include <types_ext.h>
15 #include <util.h>
16 #endif
17 
18 #include <mm/core_mmu_arch.h>
19 #include <platform_config.h>
20 
21 /* A small page is the smallest unit of memory that can be mapped */
22 #define SMALL_PAGE_SIZE			BIT(SMALL_PAGE_SHIFT)
23 #define SMALL_PAGE_MASK			((paddr_t)SMALL_PAGE_SIZE - 1)
24 
25 /*
26  * PGDIR is the translation table above the translation table that holds
27  * the pages.
28  */
29 #define CORE_MMU_PGDIR_SIZE		BIT(CORE_MMU_PGDIR_SHIFT)
30 #define CORE_MMU_PGDIR_MASK		((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
31 
32 /* TA user space code, data, stack and heap are mapped using this granularity */
33 #define CORE_MMU_USER_CODE_SIZE		BIT(CORE_MMU_USER_CODE_SHIFT)
34 #define CORE_MMU_USER_CODE_MASK		((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
35 
36 /* TA user space parameters are mapped using this granularity */
37 #define CORE_MMU_USER_PARAM_SIZE	BIT(CORE_MMU_USER_PARAM_SHIFT)
38 #define CORE_MMU_USER_PARAM_MASK	((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
39 
40 /*
41  * Identify mapping constraint: virtual base address is the physical start addr.
42  * If platform did not set some macros, some get default value.
43  */
44 #ifndef TEE_RAM_VA_SIZE
45 #define TEE_RAM_VA_SIZE			CORE_MMU_PGDIR_SIZE
46 #endif
47 
48 #ifndef TEE_LOAD_ADDR
49 #define TEE_LOAD_ADDR			TEE_RAM_START
50 #endif
51 
52 #ifndef STACK_ALIGNMENT
53 #define STACK_ALIGNMENT			(sizeof(long) * U(2))
54 #endif
55 
56 #ifndef __ASSEMBLER__
57 /*
58  * Memory area type:
59  * MEM_AREA_END:      Reserved, marks the end of a table of mapping areas.
60  * MEM_AREA_TEE_RAM:  core RAM (read/write/executable, secure, reserved to TEE)
61  * MEM_AREA_TEE_RAM_RX:  core private read-only/executable memory (secure)
62  * MEM_AREA_TEE_RAM_RO:  core private read-only/non-executable memory (secure)
63  * MEM_AREA_TEE_RAM_RW:  core private read/write/non-executable memory (secure)
64  * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
65  * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
66  * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
67  * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
68  * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
69  * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
70  * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
71  * MEM_AREA_TA_RAM:   Secure RAM where teecore loads/exec TA instances.
72  * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
73  * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
74  * MEM_AREA_RAM_SEC:  Secure RAM storing some secrets
75  * MEM_AREA_IO_NSEC:  NonSecure HW mapped registers
76  * MEM_AREA_IO_SEC:   Secure HW mapped registers
77  * MEM_AREA_EXT_DT:   Memory loads external device tree
78  * MEM_AREA_RES_VASPACE: Reserved virtual memory space
79  * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
80  * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
81  * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
82  * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
83  * MEM_AREA_MAXTYPE:  lower invalid 'type' value
84  */
85 enum teecore_memtypes {
86 	MEM_AREA_END = 0,
87 	MEM_AREA_TEE_RAM,
88 	MEM_AREA_TEE_RAM_RX,
89 	MEM_AREA_TEE_RAM_RO,
90 	MEM_AREA_TEE_RAM_RW,
91 	MEM_AREA_INIT_RAM_RO,
92 	MEM_AREA_INIT_RAM_RX,
93 	MEM_AREA_NEX_RAM_RO,
94 	MEM_AREA_NEX_RAM_RW,
95 	MEM_AREA_TEE_COHERENT,
96 	MEM_AREA_TEE_ASAN,
97 	MEM_AREA_IDENTITY_MAP_RX,
98 	MEM_AREA_TA_RAM,
99 	MEM_AREA_NSEC_SHM,
100 	MEM_AREA_RAM_NSEC,
101 	MEM_AREA_RAM_SEC,
102 	MEM_AREA_IO_NSEC,
103 	MEM_AREA_IO_SEC,
104 	MEM_AREA_EXT_DT,
105 	MEM_AREA_RES_VASPACE,
106 	MEM_AREA_SHM_VASPACE,
107 	MEM_AREA_TS_VASPACE,
108 	MEM_AREA_PAGER_VASPACE,
109 	MEM_AREA_SDP_MEM,
110 	MEM_AREA_DDR_OVERALL,
111 	MEM_AREA_SEC_RAM_OVERALL,
112 	MEM_AREA_MAXTYPE
113 };
114 
115 static inline const char *teecore_memtype_name(enum teecore_memtypes type)
116 {
117 	static const char * const names[] = {
118 		[MEM_AREA_END] = "END",
119 		[MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
120 		[MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
121 		[MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
122 		[MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
123 		[MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
124 		[MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
125 		[MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
126 		[MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
127 		[MEM_AREA_TEE_ASAN] = "TEE_ASAN",
128 		[MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
129 		[MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
130 		[MEM_AREA_TA_RAM] = "TA_RAM",
131 		[MEM_AREA_NSEC_SHM] = "NSEC_SHM",
132 		[MEM_AREA_RAM_NSEC] = "RAM_NSEC",
133 		[MEM_AREA_RAM_SEC] = "RAM_SEC",
134 		[MEM_AREA_IO_NSEC] = "IO_NSEC",
135 		[MEM_AREA_IO_SEC] = "IO_SEC",
136 		[MEM_AREA_EXT_DT] = "EXT_DT",
137 		[MEM_AREA_RES_VASPACE] = "RES_VASPACE",
138 		[MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
139 		[MEM_AREA_TS_VASPACE] = "TS_VASPACE",
140 		[MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
141 		[MEM_AREA_SDP_MEM] = "SDP_MEM",
142 		[MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
143 		[MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
144 	};
145 
146 	COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
147 	return names[type];
148 }
149 
150 #ifdef CFG_CORE_RWDATA_NOEXEC
151 #define MEM_AREA_TEE_RAM_RW_DATA	MEM_AREA_TEE_RAM_RW
152 #else
153 #define MEM_AREA_TEE_RAM_RW_DATA	MEM_AREA_TEE_RAM
154 #endif
155 
156 struct core_mmu_phys_mem {
157 	const char *name;
158 	enum teecore_memtypes type;
159 	__extension__ union {
160 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
161 		struct {
162 			uint32_t lo_addr;
163 			uint32_t hi_addr;
164 		};
165 #endif
166 		paddr_t addr;
167 	};
168 	__extension__ union {
169 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
170 		struct {
171 			uint32_t lo_size;
172 			uint32_t hi_size;
173 		};
174 #endif
175 		paddr_size_t size;
176 	};
177 };
178 
179 #define __register_memory(_name, _type, _addr, _size, _section) \
180 	SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
181 		{ .name = (_name), .type = (_type), .addr = (_addr), \
182 		  .size = (_size) }
183 
184 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
185 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
186 	SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
187 		{ .name = (_name), .type = (_type), .lo_addr = (_addr), \
188 		  .lo_size = (_size) }
189 #else
190 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
191 		__register_memory(_name, _type, _addr, _size, _section)
192 #endif
193 
194 #define register_phys_mem(type, addr, size) \
195 		__register_memory(#addr, (type), (addr), (size), \
196 				  phys_mem_map)
197 
198 #define register_phys_mem_ul(type, addr, size) \
199 		__register_memory_ul(#addr, (type), (addr), (size), \
200 				     phys_mem_map)
201 
202 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
203 #define register_phys_mem_pgdir(type, addr, size) \
204 	__register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
205 			  ROUNDUP(size + addr - \
206 					ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
207 				  CORE_MMU_PGDIR_SIZE), phys_mem_map)
208 
209 #ifdef CFG_SECURE_DATA_PATH
210 #define register_sdp_mem(addr, size) \
211 		__register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
212 				  phys_sdp_mem)
213 #else
214 #define register_sdp_mem(addr, size) \
215 		static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
216 			__unused
217 #endif
218 
219 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
220 #define register_dynamic_shm(addr, size) \
221 		__register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
222 				  phys_ddr_overall_compat)
223 
224 /*
225  * register_ddr() - Define a memory range
226  * @addr: Base address
227  * @size: Length
228  *
229  * This macro can be used multiple times to define disjoint ranges. While
230  * initializing holes are carved out of these ranges where it overlaps with
231  * special memory, for instance memory registered with register_sdp_mem().
232  *
233  * The memory that remains is accepted as non-secure shared memory when
234  * communicating with normal world.
235  *
236  * This macro is an alternative to supply the memory description with a
237  * devicetree blob.
238  */
239 #define register_ddr(addr, size) \
240 		__register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
241 				  (size), phys_ddr_overall)
242 
243 #define phys_ddr_overall_begin \
244 	SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
245 
246 #define phys_ddr_overall_end \
247 	SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
248 
249 #define phys_ddr_overall_compat_begin \
250 	SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
251 
252 #define phys_ddr_overall_compat_end \
253 	SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
254 
255 #define phys_sdp_mem_begin \
256 	SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
257 
258 #define phys_sdp_mem_end \
259 	SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
260 
261 #define phys_mem_map_begin \
262 	SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
263 
264 #define phys_mem_map_end \
265 	SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
266 
267 #ifdef CFG_CORE_RESERVED_SHM
268 /* Default NSec shared memory allocated from NSec world */
269 extern unsigned long default_nsec_shm_paddr;
270 extern unsigned long default_nsec_shm_size;
271 #endif
272 
273 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
274 void core_init_mmu_regs(struct core_mmu_config *cfg);
275 
276 /* Arch specific function to help optimizing 1 MMU xlat table */
277 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr);
278 
279 /*
280  * struct mmu_partition - stores MMU partition.
281  *
282  * Basically it	represent whole MMU mapping. It is possible
283  * to create multiple partitions, and change them in runtime,
284  * effectively changing how OP-TEE sees memory.
285  * This is opaque struct which is defined differently for
286  * v7 and LPAE MMUs
287  *
288  * This structure used mostly when virtualization is enabled.
289  * When CFG_NS_VIRTUALIZATION==n only default partition exists.
290  */
291 struct mmu_partition;
292 
293 /*
294  * core_mmu_get_user_va_range() - Return range of user va space
295  * @base:	Lowest user virtual address
296  * @size:	Size in bytes of user address space
297  */
298 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
299 
300 /*
301  * enum core_mmu_fault - different kinds of faults
302  * @CORE_MMU_FAULT_ALIGNMENT:		alignment fault
303  * @CORE_MMU_FAULT_DEBUG_EVENT:		debug event
304  * @CORE_MMU_FAULT_TRANSLATION:		translation fault
305  * @CORE_MMU_FAULT_WRITE_PERMISSION:	Permission fault during write
306  * @CORE_MMU_FAULT_READ_PERMISSION:	Permission fault during read
307  * @CORE_MMU_FAULT_ASYNC_EXTERNAL:	asynchronous external abort
308  * @CORE_MMU_FAULT_ACCESS_BIT:		access bit fault
309  * @CORE_MMU_FAULT_TAG_CHECK:		tag check fault
310  * @CORE_MMU_FAULT_OTHER:		Other/unknown fault
311  */
312 enum core_mmu_fault {
313 	CORE_MMU_FAULT_ALIGNMENT,
314 	CORE_MMU_FAULT_DEBUG_EVENT,
315 	CORE_MMU_FAULT_TRANSLATION,
316 	CORE_MMU_FAULT_WRITE_PERMISSION,
317 	CORE_MMU_FAULT_READ_PERMISSION,
318 	CORE_MMU_FAULT_ASYNC_EXTERNAL,
319 	CORE_MMU_FAULT_ACCESS_BIT,
320 	CORE_MMU_FAULT_TAG_CHECK,
321 	CORE_MMU_FAULT_OTHER,
322 };
323 
324 /*
325  * core_mmu_get_fault_type() - get fault type
326  * @fault_descr:	Content of fault status or exception syndrome register
327  * @returns an enum describing the content of fault status register.
328  */
329 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
330 
331 /*
332  * core_mm_type_to_attr() - convert memory type to attribute
333  * @t: memory type
334  * @returns an attribute that can be passed to core_mm_set_entry() and friends
335  */
336 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
337 
338 /*
339  * core_mmu_create_user_map() - Create user mode mapping
340  * @uctx:	Pointer to user mode context
341  * @map:	MMU configuration to use when activating this VA space
342  */
343 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
344 			      struct core_mmu_user_map *map);
345 /*
346  * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
347  * @map:	MMU configuration for current user VA space.
348  */
349 void core_mmu_get_user_map(struct core_mmu_user_map *map);
350 
351 /*
352  * core_mmu_set_user_map() - Set new MMU configuration for user VA space
353  * @map:	User context MMU configuration or NULL to set core VA space
354  *
355  * Activate user VA space mapping and set its ASID if @map is not NULL,
356  * otherwise activate core mapping and set ASID to 0.
357  */
358 void core_mmu_set_user_map(struct core_mmu_user_map *map);
359 
360 /*
361  * struct core_mmu_table_info - Properties for a translation table
362  * @table:	Pointer to translation table
363  * @va_base:	VA base address of the transaltion table
364  * @level:	Translation table level
365  * @shift:	The shift of each entry in the table
366  * @num_entries: Number of entries in this table.
367  */
368 struct core_mmu_table_info {
369 	void *table;
370 	vaddr_t va_base;
371 	unsigned level;
372 	unsigned shift;
373 	unsigned num_entries;
374 #ifdef CFG_NS_VIRTUALIZATION
375 	struct mmu_partition *prtn;
376 #endif
377 };
378 
379 /*
380  * core_mmu_find_table() - Locates a translation table
381  * @prtn:	MMU partition where search should be performed
382  * @va:		Virtual address for the table to cover
383  * @max_level:	Don't traverse beyond this level
384  * @tbl_info:	Pointer to where to store properties.
385  * @return true if a translation table was found, false on error
386  */
387 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
388 			 unsigned max_level,
389 			 struct core_mmu_table_info *tbl_info);
390 
391 /*
392  * core_mmu_entry_to_finer_grained() - divide mapping at current level into
393  *     smaller ones so memory can be mapped with finer granularity
394  * @tbl_info:	table where target record located
395  * @idx:	index of record for which a pdgir must be setup.
396  * @secure:	true/false if pgdir maps secure/non-secure memory (32bit mmu)
397  * @return true on successful, false on error
398  */
399 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
400 				     unsigned int idx, bool secure);
401 
402 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
403 				  paddr_t pa, uint32_t attr);
404 
405 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
406 
407 /*
408  * core_mmu_set_entry() - Set entry in translation table
409  * @tbl_info:	Translation table properties
410  * @idx:	Index of entry to update
411  * @pa:		Physical address to assign entry
412  * @attr:	Attributes to assign entry
413  */
414 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
415 			paddr_t pa, uint32_t attr);
416 
417 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
418 				  paddr_t *pa, uint32_t *attr);
419 
420 /*
421  * core_mmu_get_entry() - Get entry from translation table
422  * @tbl_info:	Translation table properties
423  * @idx:	Index of entry to read
424  * @pa:		Physical address is returned here if pa is not NULL
425  * @attr:	Attributues are returned here if attr is not NULL
426  */
427 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
428 			paddr_t *pa, uint32_t *attr);
429 
430 /*
431  * core_mmu_va2idx() - Translate from virtual address to table index
432  * @tbl_info:	Translation table properties
433  * @va:		Virtual address to translate
434  * @returns index in transaltion table
435  */
436 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
437 			vaddr_t va)
438 {
439 	return (va - tbl_info->va_base) >> tbl_info->shift;
440 }
441 
442 /*
443  * core_mmu_idx2va() - Translate from table index to virtual address
444  * @tbl_info:	Translation table properties
445  * @idx:	Index to translate
446  * @returns Virtual address
447  */
448 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
449 			unsigned idx)
450 {
451 	return (idx << tbl_info->shift) + tbl_info->va_base;
452 }
453 
454 /*
455  * core_mmu_get_block_offset() - Get offset inside a block/page
456  * @tbl_info:	Translation table properties
457  * @pa:		Physical address
458  * @returns offset within one block of the translation table
459  */
460 static inline size_t core_mmu_get_block_offset(
461 			struct core_mmu_table_info *tbl_info, paddr_t pa)
462 {
463 	return pa & ((1 << tbl_info->shift) - 1);
464 }
465 
466 /*
467  * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
468  *  empty virtual address space that is used for dymanic mappings
469  * @mm:		memory region to be checked
470  * @returns result of the check
471  */
472 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
473 {
474 	return mm->type == MEM_AREA_RES_VASPACE ||
475 		mm->type == MEM_AREA_SHM_VASPACE;
476 }
477 
478 /*
479  * core_mmu_map_pages() - map list of pages at given virtual address
480  * @vstart:	Virtual address where mapping begins
481  * @pages:	Array of page addresses
482  * @num_pages:	Number of pages
483  * @memtype:	Type of memmory to be mapped
484  *
485  * Note: This function asserts that pages are not mapped executeable for
486  * kernel (privileged) mode.
487  *
488  * @returns:	TEE_SUCCESS on success, TEE_ERROR_XXX on error
489  */
490 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
491 			      enum teecore_memtypes memtype);
492 
493 /*
494  * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
495  * @vstart:	Virtual address where mapping begins
496  * @pstart:	Physical address of the first page
497  * @num_pages:	Number of pages
498  * @memtype:	Type of memmory to be mapped
499  *
500  * Note: This function asserts that pages are not mapped executeable for
501  * kernel (privileged) mode.
502  *
503  * @returns:	TEE_SUCCESS on success, TEE_ERROR_XXX on error
504  */
505 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
506 					 size_t num_pages,
507 					 enum teecore_memtypes memtype);
508 
509 /*
510  * core_mmu_unmap_pages() - remove mapping at given virtual address
511  * @vstart:	Virtual address where mapping begins
512  * @num_pages:	Number of pages to unmap
513  */
514 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
515 
516 /*
517  * core_mmu_user_mapping_is_active() - Report if user mapping is active
518  * @returns true if a user VA space is active, false if user VA space is
519  *          inactive.
520  */
521 bool core_mmu_user_mapping_is_active(void);
522 
523 /*
524  * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
525  * @returns true if the attributes can be used, false if not.
526  */
527 bool core_mmu_mattr_is_ok(uint32_t mattr);
528 
529 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
530 			      vaddr_t *e);
531 
532 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
533 
534 /* routines to retreive shared mem configuration */
535 static inline bool core_mmu_is_shm_cached(void)
536 {
537 	return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM));
538 }
539 
540 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
541 				   size_t len);
542 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
543 			   size_t len);
544 
545 /*
546  * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
547  *				       length. If more than one mapping of
548  *				       specified type is present, NULL will be
549  *				       returned.
550  * @type:	memory type
551  * @len:	length in bytes
552  */
553 struct tee_mmap_region *
554 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
555 
556 /*
557  * tlbi_mva_range() - Invalidate TLB for virtual address range
558  * @va:		start virtual address, must be a multiple of @granule
559  * @len:	length in bytes of range, must be a multiple of @granule
560  * @granule:	granularity of mapping, supported values are
561  *		CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
562  *		match the actual mappings.
563  */
564 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule);
565 
566 /*
567  * tlbi_mva_range_asid() - Invalidate TLB for virtual address range for
568  *			   a specific ASID
569  * @va:		start virtual address, must be a multiple of @granule
570  * @len:	length in bytes of range, must be a multiple of @granule
571  * @granule:	granularity of mapping, supported values are
572  *		CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
573  *		match the actual mappings.
574  * @asid:	Address space identifier
575  */
576 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
577 
578 /* Check cpu mmu enabled or not */
579 bool cpu_mmu_enabled(void);
580 
581 #ifdef CFG_CORE_DYN_SHM
582 /*
583  * Check if platform defines nsec DDR range(s).
584  * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
585  * always present.
586  */
587 bool core_mmu_nsec_ddr_is_defined(void);
588 
589 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
590 				      size_t nelems);
591 #endif
592 
593 /* Initialize MMU partition */
594 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm);
595 
596 unsigned int asid_alloc(void);
597 void asid_free(unsigned int asid);
598 
599 #ifdef CFG_SECURE_DATA_PATH
600 /* Alloc and fill SDP memory objects table - table is NULL terminated */
601 struct mobj **core_sdp_mem_create_mobjs(void);
602 #endif
603 
604 #ifdef CFG_NS_VIRTUALIZATION
605 size_t core_mmu_get_total_pages_size(void);
606 struct mmu_partition *core_alloc_mmu_prtn(void *tables);
607 void core_free_mmu_prtn(struct mmu_partition *prtn);
608 void core_mmu_set_prtn(struct mmu_partition *prtn);
609 void core_mmu_set_default_prtn(void);
610 void core_mmu_set_default_prtn_tbl(void);
611 #endif
612 
613 void core_mmu_init_virtualization(void);
614 
615 /* init some allocation pools */
616 void core_mmu_init_ta_ram(void);
617 
618 void core_init_mmu(struct tee_mmap_region *mm);
619 
620 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
621 			     unsigned int level, vaddr_t va_base, void *table);
622 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
623 				struct user_mode_ctx *uctx);
624 void core_mmu_map_region(struct mmu_partition *prtn,
625 			 struct tee_mmap_region *mm);
626 
627 bool arch_va2pa_helper(void *va, paddr_t *pa);
628 
629 static inline bool core_mmap_is_end_of_table(const struct tee_mmap_region *mm)
630 {
631 	return mm->type == MEM_AREA_END;
632 }
633 
634 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len)
635 {
636 	paddr_t end_pa = 0;
637 
638 	if (ADD_OVERFLOW(pa, len - 1, &end_pa))
639 		return false;
640 	return core_mmu_check_max_pa(end_pa);
641 }
642 
643 /*
644  * core_mmu_get_secure_memory() - get physical secure memory range
645  * @base: base address of secure memory
646  * @size: size of secure memory
647  *
648  * The physical secure memory range returned covers at least the memory
649  * range used by OP-TEE Core, but may cover more memory depending on the
650  * configuration.
651  */
652 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size);
653 
654 #endif /*__ASSEMBLER__*/
655 
656 #endif /* CORE_MMU_H */
657