1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 #ifndef CORE_MMU_H 7 #define CORE_MMU_H 8 9 #ifndef __ASSEMBLER__ 10 #include <assert.h> 11 #include <compiler.h> 12 #include <kernel/user_ta.h> 13 #include <mm/tee_mmu_types.h> 14 #include <types_ext.h> 15 #include <util.h> 16 #endif 17 18 #include <mm/core_mmu_arch.h> 19 #include <platform_config.h> 20 21 /* A small page is the smallest unit of memory that can be mapped */ 22 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT) 23 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1) 24 25 /* 26 * PGDIR is the translation table above the translation table that holds 27 * the pages. 28 */ 29 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT) 30 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1) 31 32 /* TA user space code, data, stack and heap are mapped using this granularity */ 33 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT) 34 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1) 35 36 /* TA user space parameters are mapped using this granularity */ 37 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT) 38 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1) 39 40 /* 41 * Identify mapping constraint: virtual base address is the physical start addr. 42 * If platform did not set some macros, some get default value. 43 */ 44 #ifndef TEE_RAM_VA_SIZE 45 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE 46 #endif 47 48 #ifndef TEE_LOAD_ADDR 49 #define TEE_LOAD_ADDR TEE_RAM_START 50 #endif 51 52 #define TEE_RAM_VA_START TEE_RAM_START 53 #define TEE_TEXT_VA_START (TEE_RAM_VA_START + \ 54 (TEE_LOAD_ADDR - TEE_RAM_START)) 55 56 #ifndef STACK_ALIGNMENT 57 #define STACK_ALIGNMENT (sizeof(long) * U(2)) 58 #endif 59 60 #ifndef __ASSEMBLER__ 61 /* 62 * Memory area type: 63 * MEM_AREA_END: Reserved, marks the end of a table of mapping areas. 64 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE) 65 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure) 66 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure) 67 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure) 68 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure) 69 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure) 70 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure) 71 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure) 72 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE) 73 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE) 74 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure) 75 * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances. 76 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE. 77 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data 78 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets 79 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers 80 * MEM_AREA_IO_SEC: Secure HW mapped registers 81 * MEM_AREA_EXT_DT: Memory loads external device tree 82 * MEM_AREA_RES_VASPACE: Reserved virtual memory space 83 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers 84 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt() 85 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm. 86 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM 87 * MEM_AREA_MAXTYPE: lower invalid 'type' value 88 */ 89 enum teecore_memtypes { 90 MEM_AREA_END = 0, 91 MEM_AREA_TEE_RAM, 92 MEM_AREA_TEE_RAM_RX, 93 MEM_AREA_TEE_RAM_RO, 94 MEM_AREA_TEE_RAM_RW, 95 MEM_AREA_INIT_RAM_RO, 96 MEM_AREA_INIT_RAM_RX, 97 MEM_AREA_NEX_RAM_RO, 98 MEM_AREA_NEX_RAM_RW, 99 MEM_AREA_TEE_COHERENT, 100 MEM_AREA_TEE_ASAN, 101 MEM_AREA_IDENTITY_MAP_RX, 102 MEM_AREA_TA_RAM, 103 MEM_AREA_NSEC_SHM, 104 MEM_AREA_RAM_NSEC, 105 MEM_AREA_RAM_SEC, 106 MEM_AREA_IO_NSEC, 107 MEM_AREA_IO_SEC, 108 MEM_AREA_EXT_DT, 109 MEM_AREA_RES_VASPACE, 110 MEM_AREA_SHM_VASPACE, 111 MEM_AREA_TS_VASPACE, 112 MEM_AREA_PAGER_VASPACE, 113 MEM_AREA_SDP_MEM, 114 MEM_AREA_DDR_OVERALL, 115 MEM_AREA_SEC_RAM_OVERALL, 116 MEM_AREA_MAXTYPE 117 }; 118 119 static inline const char *teecore_memtype_name(enum teecore_memtypes type) 120 { 121 static const char * const names[] = { 122 [MEM_AREA_END] = "END", 123 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX", 124 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX", 125 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO", 126 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW", 127 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO", 128 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX", 129 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO", 130 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW", 131 [MEM_AREA_TEE_ASAN] = "TEE_ASAN", 132 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX", 133 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT", 134 [MEM_AREA_TA_RAM] = "TA_RAM", 135 [MEM_AREA_NSEC_SHM] = "NSEC_SHM", 136 [MEM_AREA_RAM_NSEC] = "RAM_NSEC", 137 [MEM_AREA_RAM_SEC] = "RAM_SEC", 138 [MEM_AREA_IO_NSEC] = "IO_NSEC", 139 [MEM_AREA_IO_SEC] = "IO_SEC", 140 [MEM_AREA_EXT_DT] = "EXT_DT", 141 [MEM_AREA_RES_VASPACE] = "RES_VASPACE", 142 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE", 143 [MEM_AREA_TS_VASPACE] = "TS_VASPACE", 144 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE", 145 [MEM_AREA_SDP_MEM] = "SDP_MEM", 146 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL", 147 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL", 148 }; 149 150 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE); 151 return names[type]; 152 } 153 154 #ifdef CFG_CORE_RWDATA_NOEXEC 155 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW 156 #else 157 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM 158 #endif 159 160 struct core_mmu_phys_mem { 161 const char *name; 162 enum teecore_memtypes type; 163 __extension__ union { 164 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 165 struct { 166 uint32_t lo_addr; 167 uint32_t hi_addr; 168 }; 169 #endif 170 paddr_t addr; 171 }; 172 __extension__ union { 173 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 174 struct { 175 uint32_t lo_size; 176 uint32_t hi_size; 177 }; 178 #endif 179 paddr_size_t size; 180 }; 181 }; 182 183 #define __register_memory(_name, _type, _addr, _size, _section) \ 184 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 185 { .name = (_name), .type = (_type), .addr = (_addr), \ 186 .size = (_size) } 187 188 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 189 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 190 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 191 { .name = (_name), .type = (_type), .lo_addr = (_addr), \ 192 .lo_size = (_size) } 193 #else 194 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 195 __register_memory(_name, _type, _addr, _size, _section) 196 #endif 197 198 #define register_phys_mem(type, addr, size) \ 199 __register_memory(#addr, (type), (addr), (size), \ 200 phys_mem_map) 201 202 #define register_phys_mem_ul(type, addr, size) \ 203 __register_memory_ul(#addr, (type), (addr), (size), \ 204 phys_mem_map) 205 206 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */ 207 #define register_phys_mem_pgdir(type, addr, size) \ 208 register_phys_mem(type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 209 ROUNDUP(size + addr - ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 210 CORE_MMU_PGDIR_SIZE)) 211 212 #ifdef CFG_SECURE_DATA_PATH 213 #define register_sdp_mem(addr, size) \ 214 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \ 215 phys_sdp_mem) 216 #else 217 #define register_sdp_mem(addr, size) \ 218 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \ 219 __unused 220 #endif 221 222 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */ 223 #define register_dynamic_shm(addr, size) \ 224 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \ 225 phys_ddr_overall_compat) 226 227 /* 228 * register_ddr() - Define a memory range 229 * @addr: Base address 230 * @size: Length 231 * 232 * This macro can be used multiple times to define disjoint ranges. While 233 * initializing holes are carved out of these ranges where it overlaps with 234 * special memory, for instance memory registered with register_sdp_mem(). 235 * 236 * The memory that remains is accepted as non-secure shared memory when 237 * communicating with normal world. 238 * 239 * This macro is an alternative to supply the memory description with a 240 * devicetree blob. 241 */ 242 #define register_ddr(addr, size) \ 243 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \ 244 (size), phys_ddr_overall) 245 246 #define phys_ddr_overall_begin \ 247 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem) 248 249 #define phys_ddr_overall_end \ 250 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem) 251 252 #define phys_ddr_overall_compat_begin \ 253 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem) 254 255 #define phys_ddr_overall_compat_end \ 256 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem) 257 258 #define phys_sdp_mem_begin \ 259 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem) 260 261 #define phys_sdp_mem_end \ 262 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem) 263 264 #define phys_mem_map_begin \ 265 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem) 266 267 #define phys_mem_map_end \ 268 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem) 269 270 #ifdef CFG_CORE_RESERVED_SHM 271 /* Default NSec shared memory allocated from NSec world */ 272 extern unsigned long default_nsec_shm_paddr; 273 extern unsigned long default_nsec_shm_size; 274 #endif 275 276 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg); 277 void core_init_mmu_regs(struct core_mmu_config *cfg); 278 279 /* Arch specific function to help optimizing 1 MMU xlat table */ 280 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr); 281 282 /* 283 * struct mmu_partition - stores MMU partition. 284 * 285 * Basically it represent whole MMU mapping. It is possible 286 * to create multiple partitions, and change them in runtime, 287 * effectively changing how OP-TEE sees memory. 288 * This is opaque struct which is defined differently for 289 * v7 and LPAE MMUs 290 * 291 * This structure used mostly when virtualization is enabled. 292 * When CFG_VIRTUALIZATION==n only default partition exists. 293 */ 294 struct mmu_partition; 295 296 /* 297 * core_mmu_get_user_va_range() - Return range of user va space 298 * @base: Lowest user virtual address 299 * @size: Size in bytes of user address space 300 */ 301 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size); 302 303 /* 304 * enum core_mmu_fault - different kinds of faults 305 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault 306 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event 307 * @CORE_MMU_FAULT_TRANSLATION: translation fault 308 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write 309 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read 310 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort 311 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault 312 * @CORE_MMU_FAULT_OTHER: Other/unknown fault 313 */ 314 enum core_mmu_fault { 315 CORE_MMU_FAULT_ALIGNMENT, 316 CORE_MMU_FAULT_DEBUG_EVENT, 317 CORE_MMU_FAULT_TRANSLATION, 318 CORE_MMU_FAULT_WRITE_PERMISSION, 319 CORE_MMU_FAULT_READ_PERMISSION, 320 CORE_MMU_FAULT_ASYNC_EXTERNAL, 321 CORE_MMU_FAULT_ACCESS_BIT, 322 CORE_MMU_FAULT_OTHER, 323 }; 324 325 /* 326 * core_mmu_get_fault_type() - get fault type 327 * @fault_descr: Content of fault status or exception syndrome register 328 * @returns an enum describing the content of fault status register. 329 */ 330 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr); 331 332 /* 333 * core_mm_type_to_attr() - convert memory type to attribute 334 * @t: memory type 335 * @returns an attribute that can be passed to core_mm_set_entry() and friends 336 */ 337 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t); 338 339 /* 340 * core_mmu_create_user_map() - Create user mode mapping 341 * @uctx: Pointer to user mode context 342 * @map: MMU configuration to use when activating this VA space 343 */ 344 void core_mmu_create_user_map(struct user_mode_ctx *uctx, 345 struct core_mmu_user_map *map); 346 /* 347 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space 348 * @map: MMU configuration for current user VA space. 349 */ 350 void core_mmu_get_user_map(struct core_mmu_user_map *map); 351 352 /* 353 * core_mmu_set_user_map() - Set new MMU configuration for user VA space 354 * @map: User context MMU configuration or NULL to set core VA space 355 * 356 * Activate user VA space mapping and set its ASID if @map is not NULL, 357 * otherwise activate core mapping and set ASID to 0. 358 */ 359 void core_mmu_set_user_map(struct core_mmu_user_map *map); 360 361 /* 362 * struct core_mmu_table_info - Properties for a translation table 363 * @table: Pointer to translation table 364 * @va_base: VA base address of the transaltion table 365 * @level: Translation table level 366 * @shift: The shift of each entry in the table 367 * @num_entries: Number of entries in this table. 368 */ 369 struct core_mmu_table_info { 370 void *table; 371 vaddr_t va_base; 372 unsigned level; 373 unsigned shift; 374 unsigned num_entries; 375 #ifdef CFG_VIRTUALIZATION 376 struct mmu_partition *prtn; 377 #endif 378 }; 379 380 /* 381 * core_mmu_find_table() - Locates a translation table 382 * @prtn: MMU partition where search should be performed 383 * @va: Virtual address for the table to cover 384 * @max_level: Don't traverse beyond this level 385 * @tbl_info: Pointer to where to store properties. 386 * @return true if a translation table was found, false on error 387 */ 388 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va, 389 unsigned max_level, 390 struct core_mmu_table_info *tbl_info); 391 392 /* 393 * core_mmu_entry_to_finer_grained() - divide mapping at current level into 394 * smaller ones so memory can be mapped with finer granularity 395 * @tbl_info: table where target record located 396 * @idx: index of record for which a pdgir must be setup. 397 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu) 398 * @return true on successful, false on error 399 */ 400 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info, 401 unsigned int idx, bool secure); 402 403 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx, 404 paddr_t pa, uint32_t attr); 405 406 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info); 407 408 /* 409 * core_mmu_set_entry() - Set entry in translation table 410 * @tbl_info: Translation table properties 411 * @idx: Index of entry to update 412 * @pa: Physical address to assign entry 413 * @attr: Attributes to assign entry 414 */ 415 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 416 paddr_t pa, uint32_t attr); 417 418 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx, 419 paddr_t *pa, uint32_t *attr); 420 421 /* 422 * core_mmu_get_entry() - Get entry from translation table 423 * @tbl_info: Translation table properties 424 * @idx: Index of entry to read 425 * @pa: Physical address is returned here if pa is not NULL 426 * @attr: Attributues are returned here if attr is not NULL 427 */ 428 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 429 paddr_t *pa, uint32_t *attr); 430 431 /* 432 * core_mmu_va2idx() - Translate from virtual address to table index 433 * @tbl_info: Translation table properties 434 * @va: Virtual address to translate 435 * @returns index in transaltion table 436 */ 437 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info, 438 vaddr_t va) 439 { 440 return (va - tbl_info->va_base) >> tbl_info->shift; 441 } 442 443 /* 444 * core_mmu_idx2va() - Translate from table index to virtual address 445 * @tbl_info: Translation table properties 446 * @idx: Index to translate 447 * @returns Virtual address 448 */ 449 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info, 450 unsigned idx) 451 { 452 return (idx << tbl_info->shift) + tbl_info->va_base; 453 } 454 455 /* 456 * core_mmu_get_block_offset() - Get offset inside a block/page 457 * @tbl_info: Translation table properties 458 * @pa: Physical address 459 * @returns offset within one block of the translation table 460 */ 461 static inline size_t core_mmu_get_block_offset( 462 struct core_mmu_table_info *tbl_info, paddr_t pa) 463 { 464 return pa & ((1 << tbl_info->shift) - 1); 465 } 466 467 /* 468 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to 469 * empty virtual address space that is used for dymanic mappings 470 * @mm: memory region to be checked 471 * @returns result of the check 472 */ 473 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm) 474 { 475 return mm->type == MEM_AREA_RES_VASPACE || 476 mm->type == MEM_AREA_SHM_VASPACE; 477 } 478 479 /* 480 * core_mmu_map_pages() - map list of pages at given virtual address 481 * @vstart: Virtual address where mapping begins 482 * @pages: Array of page addresses 483 * @num_pages: Number of pages 484 * @memtype: Type of memmory to be mapped 485 * 486 * Note: This function asserts that pages are not mapped executeable for 487 * kernel (privileged) mode. 488 * 489 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 490 */ 491 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 492 enum teecore_memtypes memtype); 493 494 /* 495 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address 496 * @vstart: Virtual address where mapping begins 497 * @pstart: Physical address of the first page 498 * @num_pages: Number of pages 499 * @memtype: Type of memmory to be mapped 500 * 501 * Note: This function asserts that pages are not mapped executeable for 502 * kernel (privileged) mode. 503 * 504 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 505 */ 506 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 507 size_t num_pages, 508 enum teecore_memtypes memtype); 509 510 /* 511 * core_mmu_unmap_pages() - remove mapping at given virtual address 512 * @vstart: Virtual address where mapping begins 513 * @num_pages: Number of pages to unmap 514 */ 515 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages); 516 517 /* 518 * core_mmu_user_mapping_is_active() - Report if user mapping is active 519 * @returns true if a user VA space is active, false if user VA space is 520 * inactive. 521 */ 522 bool core_mmu_user_mapping_is_active(void); 523 524 /* 525 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used 526 * @returns true if the attributes can be used, false if not. 527 */ 528 bool core_mmu_mattr_is_ok(uint32_t mattr); 529 530 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 531 vaddr_t *e); 532 533 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa); 534 535 /* routines to retreive shared mem configuration */ 536 static inline bool core_mmu_is_shm_cached(void) 537 { 538 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM)); 539 } 540 541 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 542 size_t len); 543 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, 544 size_t len); 545 546 /* 547 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and 548 * length. If more than one mapping of 549 * specified type is present, NULL will be 550 * returned. 551 * @type: memory type 552 * @len: length in bytes 553 */ 554 struct tee_mmap_region * 555 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len); 556 557 /* 558 * tlbi_mva_range() - Invalidate TLB for virtual address range 559 * @va: start virtual address, must be a multiple of @granule 560 * @len: length in bytes of range, must be a multiple of @granule 561 * @granule: granularity of mapping, supported values are 562 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 563 * match the actual mappings. 564 */ 565 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule); 566 567 /* 568 * tlbi_mva_range_asid() - Invalidate TLB for virtual address range for 569 * a specific ASID 570 * @va: start virtual address, must be a multiple of @granule 571 * @len: length in bytes of range, must be a multiple of @granule 572 * @granule: granularity of mapping, supported values are 573 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 574 * match the actual mappings. 575 * @asid: Address space identifier 576 */ 577 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid); 578 579 /* Check cpu mmu enabled or not */ 580 bool cpu_mmu_enabled(void); 581 582 #ifdef CFG_CORE_DYN_SHM 583 /* 584 * Check if platform defines nsec DDR range(s). 585 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is 586 * always present. 587 */ 588 bool core_mmu_nsec_ddr_is_defined(void); 589 590 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 591 size_t nelems); 592 #endif 593 594 /* Initialize MMU partition */ 595 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm); 596 597 unsigned int asid_alloc(void); 598 void asid_free(unsigned int asid); 599 600 #ifdef CFG_SECURE_DATA_PATH 601 /* Alloc and fill SDP memory objects table - table is NULL terminated */ 602 struct mobj **core_sdp_mem_create_mobjs(void); 603 #endif 604 605 #ifdef CFG_VIRTUALIZATION 606 size_t core_mmu_get_total_pages_size(void); 607 struct mmu_partition *core_alloc_mmu_prtn(void *tables); 608 void core_free_mmu_prtn(struct mmu_partition *prtn); 609 void core_mmu_set_prtn(struct mmu_partition *prtn); 610 void core_mmu_set_default_prtn(void); 611 void core_mmu_set_default_prtn_tbl(void); 612 #endif 613 614 void core_mmu_init_virtualization(void); 615 616 /* init some allocation pools */ 617 void core_mmu_init_ta_ram(void); 618 619 void core_init_mmu(struct tee_mmap_region *mm); 620 621 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info, 622 unsigned int level, vaddr_t va_base, void *table); 623 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 624 struct user_mode_ctx *uctx); 625 void core_mmu_map_region(struct mmu_partition *prtn, 626 struct tee_mmap_region *mm); 627 628 bool arch_va2pa_helper(void *va, paddr_t *pa); 629 630 static inline bool core_mmap_is_end_of_table(const struct tee_mmap_region *mm) 631 { 632 return mm->type == MEM_AREA_END; 633 } 634 635 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len) 636 { 637 paddr_t end_pa = 0; 638 639 if (ADD_OVERFLOW(pa, len - 1, &end_pa)) 640 return false; 641 return core_mmu_check_max_pa(end_pa); 642 } 643 #endif /*__ASSEMBLER__*/ 644 645 #endif /* CORE_MMU_H */ 646