1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6 #ifndef __MM_CORE_MMU_H
7 #define __MM_CORE_MMU_H
8
9 #ifndef __ASSEMBLER__
10 #include <assert.h>
11 #include <compiler.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mm.h>
14 #include <mm/tee_mmu_types.h>
15 #include <types_ext.h>
16 #include <util.h>
17 #endif
18
19 #include <mm/core_mmu_arch.h>
20 #include <platform_config.h>
21
22 /* A small page is the smallest unit of memory that can be mapped */
23 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT)
24 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1)
25
26 /*
27 * PGDIR is the translation table above the translation table that holds
28 * the pages.
29 */
30 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT)
31 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
32
33 /* TA user space code, data, stack and heap are mapped using this granularity */
34 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT)
35 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
36
37 /* TA user space parameters are mapped using this granularity */
38 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT)
39 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
40
41 /*
42 * Identify mapping constraint: virtual base address is the physical start addr.
43 * If platform did not set some macros, some get default value.
44 */
45 #ifndef TEE_RAM_VA_SIZE
46 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
47 #endif
48
49 #ifndef TEE_LOAD_ADDR
50 #define TEE_LOAD_ADDR TEE_RAM_START
51 #endif
52
53 #ifndef STACK_ALIGNMENT
54 #define STACK_ALIGNMENT (sizeof(long) * U(2))
55 #endif
56
57 #ifndef __ASSEMBLER__
58 /*
59 * Memory area type:
60 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE)
61 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure)
62 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure)
63 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure)
64 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
65 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
66 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
67 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
68 * MEM_AREA_NEX_DYN_VASPACE: nexus private dynamic memory map (secure)
69 * MEM_AREA_TEE_DYN_VASPACE: core private dynamic memory (secure)
70 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
71 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
72 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
73 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
74 * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE.
75 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
76 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
77 * MEM_AREA_ROM_SEC: Secure read only memory storing some secrets
78 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
79 * MEM_AREA_IO_SEC: Secure HW mapped registers
80 * MEM_AREA_EXT_DT: Memory loads external device tree
81 * MEM_AREA_MANIFEST_DT: Memory loads manifest device tree
82 * MEM_AREA_TRANSFER_LIST: Memory area mapped for Transfer List
83 * MEM_AREA_RES_VASPACE: Reserved virtual memory space
84 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
85 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
86 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
87 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
88 * MEM_AREA_MAXTYPE: lower invalid 'type' value
89 */
90 enum teecore_memtypes {
91 MEM_AREA_TEE_RAM = 1,
92 MEM_AREA_TEE_RAM_RX,
93 MEM_AREA_TEE_RAM_RO,
94 MEM_AREA_TEE_RAM_RW,
95 MEM_AREA_INIT_RAM_RO,
96 MEM_AREA_INIT_RAM_RX,
97 MEM_AREA_NEX_RAM_RO,
98 MEM_AREA_NEX_RAM_RW,
99 MEM_AREA_NEX_DYN_VASPACE,
100 MEM_AREA_TEE_DYN_VASPACE,
101 MEM_AREA_TEE_COHERENT,
102 MEM_AREA_TEE_ASAN,
103 MEM_AREA_IDENTITY_MAP_RX,
104 MEM_AREA_NSEC_SHM,
105 MEM_AREA_NEX_NSEC_SHM,
106 MEM_AREA_RAM_NSEC,
107 MEM_AREA_RAM_SEC,
108 MEM_AREA_ROM_SEC,
109 MEM_AREA_IO_NSEC,
110 MEM_AREA_IO_SEC,
111 MEM_AREA_EXT_DT,
112 MEM_AREA_MANIFEST_DT,
113 MEM_AREA_TRANSFER_LIST,
114 MEM_AREA_RES_VASPACE,
115 MEM_AREA_SHM_VASPACE,
116 MEM_AREA_TS_VASPACE,
117 MEM_AREA_PAGER_VASPACE,
118 MEM_AREA_SDP_MEM,
119 MEM_AREA_DDR_OVERALL,
120 MEM_AREA_SEC_RAM_OVERALL,
121 MEM_AREA_MAXTYPE
122 };
123
teecore_memtype_name(enum teecore_memtypes type)124 static inline const char *teecore_memtype_name(enum teecore_memtypes type)
125 {
126 static const char * const names[] = {
127 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
128 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
129 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
130 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
131 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
132 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
133 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
134 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
135 [MEM_AREA_NEX_DYN_VASPACE] = "NEX_DYN_VASPACE",
136 [MEM_AREA_TEE_DYN_VASPACE] = "TEE_DYN_VASPACE",
137 [MEM_AREA_TEE_ASAN] = "TEE_ASAN",
138 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
139 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
140 [MEM_AREA_NSEC_SHM] = "NSEC_SHM",
141 [MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM",
142 [MEM_AREA_RAM_NSEC] = "RAM_NSEC",
143 [MEM_AREA_RAM_SEC] = "RAM_SEC",
144 [MEM_AREA_ROM_SEC] = "ROM_SEC",
145 [MEM_AREA_IO_NSEC] = "IO_NSEC",
146 [MEM_AREA_IO_SEC] = "IO_SEC",
147 [MEM_AREA_EXT_DT] = "EXT_DT",
148 [MEM_AREA_MANIFEST_DT] = "MANIFEST_DT",
149 [MEM_AREA_TRANSFER_LIST] = "TRANSFER_LIST",
150 [MEM_AREA_RES_VASPACE] = "RES_VASPACE",
151 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
152 [MEM_AREA_TS_VASPACE] = "TS_VASPACE",
153 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
154 [MEM_AREA_SDP_MEM] = "SDP_MEM",
155 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
156 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
157 };
158
159 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
160 return names[type];
161 }
162
163 #ifdef CFG_CORE_RWDATA_NOEXEC
164 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW
165 #else
166 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM
167 #endif
168
169 struct core_mmu_phys_mem {
170 const char *name;
171 enum teecore_memtypes type;
172 __extension__ union {
173 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
174 struct {
175 uint32_t lo_addr;
176 uint32_t hi_addr;
177 };
178 #endif
179 paddr_t addr;
180 };
181 __extension__ union {
182 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
183 struct {
184 uint32_t lo_size;
185 uint32_t hi_size;
186 };
187 #endif
188 paddr_size_t size;
189 };
190 };
191
192 #define __register_memory(_name, _type, _addr, _size, _section) \
193 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
194 { .name = (_name), .type = (_type), .addr = (_addr), \
195 .size = (_size) }
196
197 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
198 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
199 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
200 { .name = (_name), .type = (_type), .lo_addr = (_addr), \
201 .lo_size = (_size) }
202 #else
203 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
204 __register_memory(_name, _type, _addr, _size, _section)
205 #endif
206
207 #define register_phys_mem(type, addr, size) \
208 __register_memory(#addr, (type), (addr), (size), \
209 phys_mem_map)
210
211 #define register_phys_mem_ul(type, addr, size) \
212 __register_memory_ul(#addr, (type), (addr), (size), \
213 phys_mem_map)
214
215 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
216 #define register_phys_mem_pgdir(type, addr, size) \
217 __register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
218 ROUNDUP(size + addr - \
219 ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
220 CORE_MMU_PGDIR_SIZE), phys_mem_map)
221
222 #ifdef CFG_SECURE_DATA_PATH
223 #define register_sdp_mem(addr, size) \
224 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
225 phys_sdp_mem)
226 #else
227 #define register_sdp_mem(addr, size) \
228 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
229 __unused
230 #endif
231
232 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
233 #define register_dynamic_shm(addr, size) \
234 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
235 phys_ddr_overall_compat)
236
237 /*
238 * register_ddr() - Define a memory range
239 * @addr: Base address
240 * @size: Length
241 *
242 * This macro can be used multiple times to define disjoint ranges. While
243 * initializing holes are carved out of these ranges where it overlaps with
244 * special memory, for instance memory registered with register_sdp_mem().
245 *
246 * The memory that remains is accepted as non-secure shared memory when
247 * communicating with normal world.
248 *
249 * This macro is an alternative to supply the memory description with a
250 * devicetree blob.
251 */
252 #define register_ddr(addr, size) \
253 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
254 (size), phys_ddr_overall)
255
256 #define phys_ddr_overall_begin \
257 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
258
259 #define phys_ddr_overall_end \
260 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
261
262 #define phys_ddr_overall_compat_begin \
263 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
264
265 #define phys_ddr_overall_compat_end \
266 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
267
268 #define phys_sdp_mem_begin \
269 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
270
271 #define phys_sdp_mem_end \
272 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
273
274 #define phys_mem_map_begin \
275 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
276
277 #define phys_mem_map_end \
278 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
279
280 /* Virtual memory pool for core mappings */
281 extern tee_mm_pool_t core_virt_mem_pool;
282
283 /* Virtual memory pool for shared memory mappings */
284 extern tee_mm_pool_t core_virt_shm_pool;
285
286 #ifdef CFG_CORE_RESERVED_SHM
287 /* Default NSec shared memory allocated from NSec world */
288 extern unsigned long default_nsec_shm_paddr;
289 extern unsigned long default_nsec_shm_size;
290 #endif
291
292 /*
293 * Physical load address of OP-TEE updated during boot if needed to reflect
294 * the value used.
295 */
296 #ifdef CFG_CORE_PHYS_RELOCATABLE
297 extern unsigned long core_mmu_tee_load_pa;
298 #else
299 extern const unsigned long core_mmu_tee_load_pa;
300 #endif
301
302 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
303 void core_init_mmu_regs(struct core_mmu_config *cfg);
304 /* Copy static memory map from temporary boot_mem to heap */
305 void core_mmu_save_mem_map(void);
306
307 /* Arch specific function to help optimizing 1 MMU xlat table */
308 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr);
309
310 /*
311 * struct mmu_partition - stores MMU partition.
312 *
313 * Basically it represent whole MMU mapping. It is possible
314 * to create multiple partitions, and change them in runtime,
315 * effectively changing how OP-TEE sees memory.
316 * This is opaque struct which is defined differently for
317 * v7 and LPAE MMUs
318 *
319 * This structure used mostly when virtualization is enabled.
320 * When CFG_NS_VIRTUALIZATION==n only default partition exists.
321 */
322 struct mmu_partition;
323
324 /*
325 * core_mmu_get_user_va_range() - Return range of user va space
326 * @base: Lowest user virtual address
327 * @size: Size in bytes of user address space
328 */
329 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
330
331 /*
332 * enum core_mmu_fault - different kinds of faults
333 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
334 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
335 * @CORE_MMU_FAULT_TRANSLATION: translation fault
336 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
337 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
338 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
339 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
340 * @CORE_MMU_FAULT_TAG_CHECK: tag check fault
341 * @CORE_MMU_FAULT_SYNC_EXTERNAL: synchronous external abort
342 * @CORE_MMU_FAULT_OTHER: Other/unknown fault
343 */
344 enum core_mmu_fault {
345 CORE_MMU_FAULT_ALIGNMENT,
346 CORE_MMU_FAULT_DEBUG_EVENT,
347 CORE_MMU_FAULT_TRANSLATION,
348 CORE_MMU_FAULT_WRITE_PERMISSION,
349 CORE_MMU_FAULT_READ_PERMISSION,
350 CORE_MMU_FAULT_ASYNC_EXTERNAL,
351 CORE_MMU_FAULT_ACCESS_BIT,
352 CORE_MMU_FAULT_TAG_CHECK,
353 CORE_MMU_FAULT_SYNC_EXTERNAL,
354 CORE_MMU_FAULT_OTHER,
355 };
356
357 /*
358 * core_mmu_get_fault_type() - get fault type
359 * @fault_descr: Content of fault status or exception syndrome register
360 * @returns an enum describing the content of fault status register.
361 */
362 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
363
364 /*
365 * core_mm_type_to_attr() - convert memory type to attribute
366 * @t: memory type
367 * @returns an attribute that can be passed to core_mm_set_entry() and friends
368 */
369 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
370
core_mmu_type_is_nex_shared(enum teecore_memtypes t)371 static inline bool core_mmu_type_is_nex_shared(enum teecore_memtypes t)
372 {
373 return IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
374 (t == MEM_AREA_NEX_DYN_VASPACE || t == MEM_AREA_NEX_NSEC_SHM);
375 }
376
377 /*
378 * core_mmu_create_user_map() - Create user mode mapping
379 * @uctx: Pointer to user mode context
380 * @map: MMU configuration to use when activating this VA space
381 */
382 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
383 struct core_mmu_user_map *map);
384 /*
385 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
386 * @map: MMU configuration for current user VA space.
387 */
388 void core_mmu_get_user_map(struct core_mmu_user_map *map);
389
390 /*
391 * core_mmu_set_user_map() - Set new MMU configuration for user VA space
392 * @map: User context MMU configuration or NULL to set core VA space
393 *
394 * Activate user VA space mapping and set its ASID if @map is not NULL,
395 * otherwise activate core mapping and set ASID to 0.
396 */
397 void core_mmu_set_user_map(struct core_mmu_user_map *map);
398
399 /*
400 * struct core_mmu_table_info - Properties for a translation table
401 * @table: Pointer to translation table
402 * @va_base: VA base address of the transaltion table
403 * @level: Translation table level
404 * @next_level: Finer grained translation table level according to @level.
405 * @shift: The shift of each entry in the table
406 * @num_entries: Number of entries in this table.
407 */
408 struct core_mmu_table_info {
409 void *table;
410 vaddr_t va_base;
411 unsigned num_entries;
412 #ifdef CFG_NS_VIRTUALIZATION
413 struct mmu_partition *prtn;
414 #endif
415 uint8_t level;
416 uint8_t shift;
417 uint8_t next_level;
418 };
419
420 /*
421 * core_mmu_find_table() - Locates a translation table
422 * @prtn: MMU partition where search should be performed
423 * @va: Virtual address for the table to cover
424 * @max_level: Don't traverse beyond this level
425 * @tbl_info: Pointer to where to store properties.
426 * @return true if a translation table was found, false on error
427 */
428 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
429 unsigned max_level,
430 struct core_mmu_table_info *tbl_info);
431
432 /*
433 * core_mmu_entry_to_finer_grained() - divide mapping at current level into
434 * smaller ones so memory can be mapped with finer granularity
435 * @tbl_info: table where target record located
436 * @idx: index of record for which a pdgir must be setup.
437 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu)
438 * @return true on successful, false on error
439 */
440 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
441 unsigned int idx, bool secure);
442
443 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
444 paddr_t pa, uint32_t attr);
445
446 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
447
448 /*
449 * core_mmu_set_entry() - Set entry in translation table
450 * @tbl_info: Translation table properties
451 * @idx: Index of entry to update
452 * @pa: Physical address to assign entry
453 * @attr: Attributes to assign entry
454 */
455 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
456 paddr_t pa, uint32_t attr);
457
458 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
459 paddr_t *pa, uint32_t *attr);
460
461 /*
462 * core_mmu_get_entry() - Get entry from translation table
463 * @tbl_info: Translation table properties
464 * @idx: Index of entry to read
465 * @pa: Physical address is returned here if pa is not NULL
466 * @attr: Attributues are returned here if attr is not NULL
467 */
468 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
469 paddr_t *pa, uint32_t *attr);
470
471 /*
472 * core_mmu_va2idx() - Translate from virtual address to table index
473 * @tbl_info: Translation table properties
474 * @va: Virtual address to translate
475 * @returns index in transaltion table
476 */
core_mmu_va2idx(struct core_mmu_table_info * tbl_info,vaddr_t va)477 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
478 vaddr_t va)
479 {
480 #ifdef RV64
481 if (tbl_info->level == CORE_MMU_BASE_TABLE_LEVEL)
482 va &= ~GENMASK_64(63, RISCV_MMU_VA_WIDTH);
483 #endif
484 return (va - tbl_info->va_base) >> tbl_info->shift;
485 }
486
487 /*
488 * core_mmu_idx2va() - Translate from table index to virtual address
489 * @tbl_info: Translation table properties
490 * @idx: Index to translate
491 * @returns Virtual address
492 */
core_mmu_idx2va(struct core_mmu_table_info * tbl_info,unsigned idx)493 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
494 unsigned idx)
495 {
496 return (idx << tbl_info->shift) + tbl_info->va_base;
497 }
498
499 /*
500 * core_mmu_get_block_offset() - Get offset inside a block/page
501 * @tbl_info: Translation table properties
502 * @pa: Physical address
503 * @returns offset within one block of the translation table
504 */
core_mmu_get_block_offset(struct core_mmu_table_info * tbl_info,paddr_t pa)505 static inline size_t core_mmu_get_block_offset(
506 struct core_mmu_table_info *tbl_info, paddr_t pa)
507 {
508 return pa & ((1 << tbl_info->shift) - 1);
509 }
510
511 /*
512 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
513 * empty virtual address space that is used for dymanic mappings
514 * @mm: memory region to be checked
515 * @returns result of the check
516 */
core_mmu_is_dynamic_vaspace(struct tee_mmap_region * mm)517 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
518 {
519 switch (mm->type) {
520 case MEM_AREA_RES_VASPACE:
521 case MEM_AREA_SHM_VASPACE:
522 case MEM_AREA_NEX_DYN_VASPACE:
523 case MEM_AREA_TEE_DYN_VASPACE:
524 return true;
525 default:
526 return false;
527 }
528 }
529
530 /*
531 * core_mmu_map_pages() - map list of pages at given virtual address
532 * @vstart: Virtual address where mapping begins
533 * @pages: Array of page addresses
534 * @num_pages: Number of pages
535 * @memtype: Type of memmory to be mapped
536 *
537 * Note: This function asserts that pages are not mapped executeable for
538 * kernel (privileged) mode.
539 *
540 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
541 */
542 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
543 enum teecore_memtypes memtype);
544
545 /*
546 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
547 * @vstart: Virtual address where mapping begins
548 * @pstart: Physical address of the first page
549 * @num_pages: Number of pages
550 * @memtype: Type of memmory to be mapped
551 *
552 * Note: This function asserts that pages are not mapped executeable for
553 * kernel (privileged) mode.
554 *
555 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
556 */
557 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
558 size_t num_pages,
559 enum teecore_memtypes memtype);
560
561 /*
562 * core_mmu_unmap_pages() - remove mapping at given virtual address
563 * @vstart: Virtual address where mapping begins
564 * @num_pages: Number of pages to unmap
565 */
566 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
567
568 /*
569 * core_mmu_user_mapping_is_active() - Report if user mapping is active
570 * @returns true if a user VA space is active, false if user VA space is
571 * inactive.
572 */
573 bool core_mmu_user_mapping_is_active(void);
574
575 /*
576 * core_mmu_user_va_range_is_defined() - check if user va range is defined
577 * @returns true if a user VA space is defined, false if not.
578 */
579 bool core_mmu_user_va_range_is_defined(void);
580
581 /*
582 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
583 * @returns true if the attributes can be used, false if not.
584 */
585 bool core_mmu_mattr_is_ok(uint32_t mattr);
586
587 TEE_Result core_mmu_for_each_map(void *ptr,
588 TEE_Result (*fn)(struct tee_mmap_region *map,
589 void *ptr));
590
591 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
592 vaddr_t *e);
593
594 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
595
596 /* routines to retreive shared mem configuration */
core_mmu_is_shm_cached(void)597 static inline bool core_mmu_is_shm_cached(void)
598 {
599 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM));
600 }
601
602 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
603 size_t len);
604 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
605 size_t len);
606
607 /*
608 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
609 * length. If more than one mapping of
610 * specified type is present, NULL will be
611 * returned.
612 * @type: memory type
613 * @len: length in bytes
614 */
615 struct tee_mmap_region *
616 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
617
618 /*
619 * tlbi_va_range() - Invalidate TLB for virtual address range
620 * @va: start virtual address, must be a multiple of @granule
621 * @len: length in bytes of range, must be a multiple of @granule
622 * @granule: granularity of mapping, supported values are
623 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
624 * match the actual mappings.
625 */
626 void tlbi_va_range(vaddr_t va, size_t len, size_t granule);
627
628 /*
629 * tlbi_va_range_asid() - Invalidate TLB for virtual address range for
630 * a specific ASID
631 * @va: start virtual address, must be a multiple of @granule
632 * @len: length in bytes of range, must be a multiple of @granule
633 * @granule: granularity of mapping, supported values are
634 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
635 * match the actual mappings.
636 * @asid: Address space identifier
637 */
638 void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
639
640 /* Check cpu mmu enabled or not */
641 bool cpu_mmu_enabled(void);
642
643 #ifdef CFG_CORE_DYN_SHM
644 /*
645 * Check if platform defines nsec DDR range(s).
646 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
647 * always present.
648 */
649 bool core_mmu_nsec_ddr_is_defined(void);
650
651 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
652 size_t nelems);
653 #endif
654
655 /* Initialize MMU partition */
656 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map);
657
658 unsigned int asid_alloc(void);
659 void asid_free(unsigned int asid);
660
661 #ifdef CFG_SECURE_DATA_PATH
662 /* Alloc and fill SDP memory objects table - table is NULL terminated */
663 struct mobj **core_sdp_mem_create_mobjs(void);
664 #endif
665
666 #ifdef CFG_NS_VIRTUALIZATION
667 size_t core_mmu_get_total_pages_size(void);
668 struct mmu_partition *core_alloc_mmu_prtn(void *tables);
669 void core_free_mmu_prtn(struct mmu_partition *prtn);
670 void core_mmu_set_prtn(struct mmu_partition *prtn);
671 void core_mmu_set_default_prtn(void);
672 void core_mmu_set_default_prtn_tbl(void);
673 #endif
674
675 /* Initialize physical memory pool */
676 void core_mmu_init_phys_mem(void);
677
678 void core_init_mmu(struct memory_map *mem_map);
679
680 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
681 unsigned int level, vaddr_t va_base, void *table);
682 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
683 struct user_mode_ctx *uctx);
684 void core_mmu_map_region(struct mmu_partition *prtn,
685 struct tee_mmap_region *mm);
686
687 bool arch_va2pa_helper(void *va, paddr_t *pa);
688
689 vaddr_t arch_aslr_base_addr(vaddr_t start_addr, uint64_t seed,
690 unsigned int iteration_count);
691
core_mmu_check_end_pa(paddr_t pa,size_t len)692 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len)
693 {
694 paddr_t end_pa = 0;
695
696 if (ADD_OVERFLOW(pa, len - 1, &end_pa))
697 return false;
698 return core_mmu_check_max_pa(end_pa);
699 }
700
701 /*
702 * core_mmu_set_secure_memory() - set physical secure memory range
703 * @base: base address of secure memory
704 * @size: size of secure memory
705 *
706 * The physical secure memory range is not known in advance when OP-TEE is
707 * relocatable, this information must be supplied once during boot before
708 * the translation tables can be initialized and the MMU enabled.
709 */
710 void core_mmu_set_secure_memory(paddr_t base, size_t size);
711
712 /*
713 * core_mmu_get_secure_memory() - get physical secure memory range
714 * @base: base address of secure memory
715 * @size: size of secure memory
716 *
717 * The physical secure memory range returned covers at least the memory
718 * range used by OP-TEE Core, but may cover more memory depending on the
719 * configuration.
720 */
721 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size);
722
723 #endif /*__ASSEMBLER__*/
724
725 #endif /* __MM_CORE_MMU_H */
726