1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 #ifndef __MM_CORE_MMU_H 7 #define __MM_CORE_MMU_H 8 9 #ifndef __ASSEMBLER__ 10 #include <assert.h> 11 #include <compiler.h> 12 #include <kernel/user_ta.h> 13 #include <mm/tee_mm.h> 14 #include <mm/tee_mmu_types.h> 15 #include <types_ext.h> 16 #include <util.h> 17 #endif 18 19 #include <mm/core_mmu_arch.h> 20 #include <platform_config.h> 21 22 /* A small page is the smallest unit of memory that can be mapped */ 23 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT) 24 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1) 25 26 /* 27 * PGDIR is the translation table above the translation table that holds 28 * the pages. 29 */ 30 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT) 31 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1) 32 33 /* TA user space code, data, stack and heap are mapped using this granularity */ 34 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT) 35 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1) 36 37 /* TA user space parameters are mapped using this granularity */ 38 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT) 39 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1) 40 41 /* 42 * Identify mapping constraint: virtual base address is the physical start addr. 43 * If platform did not set some macros, some get default value. 44 */ 45 #ifndef TEE_RAM_VA_SIZE 46 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE 47 #endif 48 49 #ifndef TEE_LOAD_ADDR 50 #define TEE_LOAD_ADDR TEE_RAM_START 51 #endif 52 53 #ifndef STACK_ALIGNMENT 54 #define STACK_ALIGNMENT (sizeof(long) * U(2)) 55 #endif 56 57 #ifndef __ASSEMBLER__ 58 /* 59 * Memory area type: 60 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE) 61 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure) 62 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure) 63 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure) 64 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure) 65 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure) 66 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure) 67 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure) 68 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE) 69 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE) 70 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure) 71 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE. 72 * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE. 73 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data 74 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets 75 * MEM_AREA_ROM_SEC: Secure read only memory storing some secrets 76 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers 77 * MEM_AREA_IO_SEC: Secure HW mapped registers 78 * MEM_AREA_EXT_DT: Memory loads external device tree 79 * MEM_AREA_MANIFEST_DT: Memory loads manifest device tree 80 * MEM_AREA_TRANSFER_LIST: Memory area mapped for Transfer List 81 * MEM_AREA_RES_VASPACE: Reserved virtual memory space 82 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers 83 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt() 84 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm. 85 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM 86 * MEM_AREA_MAXTYPE: lower invalid 'type' value 87 */ 88 enum teecore_memtypes { 89 MEM_AREA_TEE_RAM = 1, 90 MEM_AREA_TEE_RAM_RX, 91 MEM_AREA_TEE_RAM_RO, 92 MEM_AREA_TEE_RAM_RW, 93 MEM_AREA_INIT_RAM_RO, 94 MEM_AREA_INIT_RAM_RX, 95 MEM_AREA_NEX_RAM_RO, 96 MEM_AREA_NEX_RAM_RW, 97 MEM_AREA_TEE_COHERENT, 98 MEM_AREA_TEE_ASAN, 99 MEM_AREA_IDENTITY_MAP_RX, 100 MEM_AREA_NSEC_SHM, 101 MEM_AREA_NEX_NSEC_SHM, 102 MEM_AREA_RAM_NSEC, 103 MEM_AREA_RAM_SEC, 104 MEM_AREA_ROM_SEC, 105 MEM_AREA_IO_NSEC, 106 MEM_AREA_IO_SEC, 107 MEM_AREA_EXT_DT, 108 MEM_AREA_MANIFEST_DT, 109 MEM_AREA_TRANSFER_LIST, 110 MEM_AREA_RES_VASPACE, 111 MEM_AREA_SHM_VASPACE, 112 MEM_AREA_TS_VASPACE, 113 MEM_AREA_PAGER_VASPACE, 114 MEM_AREA_SDP_MEM, 115 MEM_AREA_DDR_OVERALL, 116 MEM_AREA_SEC_RAM_OVERALL, 117 MEM_AREA_MAXTYPE 118 }; 119 120 static inline const char *teecore_memtype_name(enum teecore_memtypes type) 121 { 122 static const char * const names[] = { 123 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX", 124 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX", 125 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO", 126 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW", 127 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO", 128 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX", 129 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO", 130 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW", 131 [MEM_AREA_TEE_ASAN] = "TEE_ASAN", 132 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX", 133 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT", 134 [MEM_AREA_NSEC_SHM] = "NSEC_SHM", 135 [MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM", 136 [MEM_AREA_RAM_NSEC] = "RAM_NSEC", 137 [MEM_AREA_RAM_SEC] = "RAM_SEC", 138 [MEM_AREA_ROM_SEC] = "ROM_SEC", 139 [MEM_AREA_IO_NSEC] = "IO_NSEC", 140 [MEM_AREA_IO_SEC] = "IO_SEC", 141 [MEM_AREA_EXT_DT] = "EXT_DT", 142 [MEM_AREA_MANIFEST_DT] = "MANIFEST_DT", 143 [MEM_AREA_TRANSFER_LIST] = "TRANSFER_LIST", 144 [MEM_AREA_RES_VASPACE] = "RES_VASPACE", 145 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE", 146 [MEM_AREA_TS_VASPACE] = "TS_VASPACE", 147 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE", 148 [MEM_AREA_SDP_MEM] = "SDP_MEM", 149 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL", 150 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL", 151 }; 152 153 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE); 154 return names[type]; 155 } 156 157 #ifdef CFG_CORE_RWDATA_NOEXEC 158 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW 159 #else 160 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM 161 #endif 162 163 struct core_mmu_phys_mem { 164 const char *name; 165 enum teecore_memtypes type; 166 __extension__ union { 167 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 168 struct { 169 uint32_t lo_addr; 170 uint32_t hi_addr; 171 }; 172 #endif 173 paddr_t addr; 174 }; 175 __extension__ union { 176 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 177 struct { 178 uint32_t lo_size; 179 uint32_t hi_size; 180 }; 181 #endif 182 paddr_size_t size; 183 }; 184 }; 185 186 #define __register_memory(_name, _type, _addr, _size, _section) \ 187 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 188 { .name = (_name), .type = (_type), .addr = (_addr), \ 189 .size = (_size) } 190 191 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 192 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 193 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 194 { .name = (_name), .type = (_type), .lo_addr = (_addr), \ 195 .lo_size = (_size) } 196 #else 197 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 198 __register_memory(_name, _type, _addr, _size, _section) 199 #endif 200 201 #define register_phys_mem(type, addr, size) \ 202 __register_memory(#addr, (type), (addr), (size), \ 203 phys_mem_map) 204 205 #define register_phys_mem_ul(type, addr, size) \ 206 __register_memory_ul(#addr, (type), (addr), (size), \ 207 phys_mem_map) 208 209 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */ 210 #define register_phys_mem_pgdir(type, addr, size) \ 211 __register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 212 ROUNDUP(size + addr - \ 213 ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 214 CORE_MMU_PGDIR_SIZE), phys_mem_map) 215 216 #ifdef CFG_SECURE_DATA_PATH 217 #define register_sdp_mem(addr, size) \ 218 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \ 219 phys_sdp_mem) 220 #else 221 #define register_sdp_mem(addr, size) \ 222 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \ 223 __unused 224 #endif 225 226 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */ 227 #define register_dynamic_shm(addr, size) \ 228 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \ 229 phys_ddr_overall_compat) 230 231 /* 232 * register_ddr() - Define a memory range 233 * @addr: Base address 234 * @size: Length 235 * 236 * This macro can be used multiple times to define disjoint ranges. While 237 * initializing holes are carved out of these ranges where it overlaps with 238 * special memory, for instance memory registered with register_sdp_mem(). 239 * 240 * The memory that remains is accepted as non-secure shared memory when 241 * communicating with normal world. 242 * 243 * This macro is an alternative to supply the memory description with a 244 * devicetree blob. 245 */ 246 #define register_ddr(addr, size) \ 247 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \ 248 (size), phys_ddr_overall) 249 250 #define phys_ddr_overall_begin \ 251 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem) 252 253 #define phys_ddr_overall_end \ 254 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem) 255 256 #define phys_ddr_overall_compat_begin \ 257 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem) 258 259 #define phys_ddr_overall_compat_end \ 260 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem) 261 262 #define phys_sdp_mem_begin \ 263 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem) 264 265 #define phys_sdp_mem_end \ 266 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem) 267 268 #define phys_mem_map_begin \ 269 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem) 270 271 #define phys_mem_map_end \ 272 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem) 273 274 /* Virtual memory pool for core mappings */ 275 extern tee_mm_pool_t core_virt_mem_pool; 276 277 /* Virtual memory pool for shared memory mappings */ 278 extern tee_mm_pool_t core_virt_shm_pool; 279 280 #ifdef CFG_CORE_RESERVED_SHM 281 /* Default NSec shared memory allocated from NSec world */ 282 extern unsigned long default_nsec_shm_paddr; 283 extern unsigned long default_nsec_shm_size; 284 #endif 285 286 /* 287 * Physical load address of OP-TEE updated during boot if needed to reflect 288 * the value used. 289 */ 290 #ifdef CFG_CORE_PHYS_RELOCATABLE 291 extern unsigned long core_mmu_tee_load_pa; 292 #else 293 extern const unsigned long core_mmu_tee_load_pa; 294 #endif 295 296 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg); 297 void core_init_mmu_regs(struct core_mmu_config *cfg); 298 /* 299 * Copy static memory map from temporary boot_mem to heap when CFG_BOOT_MEM 300 * is enabled. 301 */ 302 void core_mmu_save_mem_map(void); 303 304 /* Arch specific function to help optimizing 1 MMU xlat table */ 305 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr); 306 307 /* 308 * struct mmu_partition - stores MMU partition. 309 * 310 * Basically it represent whole MMU mapping. It is possible 311 * to create multiple partitions, and change them in runtime, 312 * effectively changing how OP-TEE sees memory. 313 * This is opaque struct which is defined differently for 314 * v7 and LPAE MMUs 315 * 316 * This structure used mostly when virtualization is enabled. 317 * When CFG_NS_VIRTUALIZATION==n only default partition exists. 318 */ 319 struct mmu_partition; 320 321 /* 322 * core_mmu_get_user_va_range() - Return range of user va space 323 * @base: Lowest user virtual address 324 * @size: Size in bytes of user address space 325 */ 326 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size); 327 328 /* 329 * enum core_mmu_fault - different kinds of faults 330 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault 331 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event 332 * @CORE_MMU_FAULT_TRANSLATION: translation fault 333 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write 334 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read 335 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort 336 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault 337 * @CORE_MMU_FAULT_TAG_CHECK: tag check fault 338 * @CORE_MMU_FAULT_OTHER: Other/unknown fault 339 */ 340 enum core_mmu_fault { 341 CORE_MMU_FAULT_ALIGNMENT, 342 CORE_MMU_FAULT_DEBUG_EVENT, 343 CORE_MMU_FAULT_TRANSLATION, 344 CORE_MMU_FAULT_WRITE_PERMISSION, 345 CORE_MMU_FAULT_READ_PERMISSION, 346 CORE_MMU_FAULT_ASYNC_EXTERNAL, 347 CORE_MMU_FAULT_ACCESS_BIT, 348 CORE_MMU_FAULT_TAG_CHECK, 349 CORE_MMU_FAULT_OTHER, 350 }; 351 352 /* 353 * core_mmu_get_fault_type() - get fault type 354 * @fault_descr: Content of fault status or exception syndrome register 355 * @returns an enum describing the content of fault status register. 356 */ 357 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr); 358 359 /* 360 * core_mm_type_to_attr() - convert memory type to attribute 361 * @t: memory type 362 * @returns an attribute that can be passed to core_mm_set_entry() and friends 363 */ 364 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t); 365 366 /* 367 * core_mmu_create_user_map() - Create user mode mapping 368 * @uctx: Pointer to user mode context 369 * @map: MMU configuration to use when activating this VA space 370 */ 371 void core_mmu_create_user_map(struct user_mode_ctx *uctx, 372 struct core_mmu_user_map *map); 373 /* 374 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space 375 * @map: MMU configuration for current user VA space. 376 */ 377 void core_mmu_get_user_map(struct core_mmu_user_map *map); 378 379 /* 380 * core_mmu_set_user_map() - Set new MMU configuration for user VA space 381 * @map: User context MMU configuration or NULL to set core VA space 382 * 383 * Activate user VA space mapping and set its ASID if @map is not NULL, 384 * otherwise activate core mapping and set ASID to 0. 385 */ 386 void core_mmu_set_user_map(struct core_mmu_user_map *map); 387 388 /* 389 * struct core_mmu_table_info - Properties for a translation table 390 * @table: Pointer to translation table 391 * @va_base: VA base address of the transaltion table 392 * @level: Translation table level 393 * @next_level: Finer grained translation table level according to @level. 394 * @shift: The shift of each entry in the table 395 * @num_entries: Number of entries in this table. 396 */ 397 struct core_mmu_table_info { 398 void *table; 399 vaddr_t va_base; 400 unsigned num_entries; 401 #ifdef CFG_NS_VIRTUALIZATION 402 struct mmu_partition *prtn; 403 #endif 404 uint8_t level; 405 uint8_t shift; 406 uint8_t next_level; 407 }; 408 409 /* 410 * core_mmu_find_table() - Locates a translation table 411 * @prtn: MMU partition where search should be performed 412 * @va: Virtual address for the table to cover 413 * @max_level: Don't traverse beyond this level 414 * @tbl_info: Pointer to where to store properties. 415 * @return true if a translation table was found, false on error 416 */ 417 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va, 418 unsigned max_level, 419 struct core_mmu_table_info *tbl_info); 420 421 /* 422 * core_mmu_entry_to_finer_grained() - divide mapping at current level into 423 * smaller ones so memory can be mapped with finer granularity 424 * @tbl_info: table where target record located 425 * @idx: index of record for which a pdgir must be setup. 426 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu) 427 * @return true on successful, false on error 428 */ 429 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info, 430 unsigned int idx, bool secure); 431 432 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx, 433 paddr_t pa, uint32_t attr); 434 435 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info); 436 437 /* 438 * core_mmu_set_entry() - Set entry in translation table 439 * @tbl_info: Translation table properties 440 * @idx: Index of entry to update 441 * @pa: Physical address to assign entry 442 * @attr: Attributes to assign entry 443 */ 444 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 445 paddr_t pa, uint32_t attr); 446 447 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx, 448 paddr_t *pa, uint32_t *attr); 449 450 /* 451 * core_mmu_get_entry() - Get entry from translation table 452 * @tbl_info: Translation table properties 453 * @idx: Index of entry to read 454 * @pa: Physical address is returned here if pa is not NULL 455 * @attr: Attributues are returned here if attr is not NULL 456 */ 457 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 458 paddr_t *pa, uint32_t *attr); 459 460 /* 461 * core_mmu_va2idx() - Translate from virtual address to table index 462 * @tbl_info: Translation table properties 463 * @va: Virtual address to translate 464 * @returns index in transaltion table 465 */ 466 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info, 467 vaddr_t va) 468 { 469 return (va - tbl_info->va_base) >> tbl_info->shift; 470 } 471 472 /* 473 * core_mmu_idx2va() - Translate from table index to virtual address 474 * @tbl_info: Translation table properties 475 * @idx: Index to translate 476 * @returns Virtual address 477 */ 478 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info, 479 unsigned idx) 480 { 481 return (idx << tbl_info->shift) + tbl_info->va_base; 482 } 483 484 /* 485 * core_mmu_get_block_offset() - Get offset inside a block/page 486 * @tbl_info: Translation table properties 487 * @pa: Physical address 488 * @returns offset within one block of the translation table 489 */ 490 static inline size_t core_mmu_get_block_offset( 491 struct core_mmu_table_info *tbl_info, paddr_t pa) 492 { 493 return pa & ((1 << tbl_info->shift) - 1); 494 } 495 496 /* 497 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to 498 * empty virtual address space that is used for dymanic mappings 499 * @mm: memory region to be checked 500 * @returns result of the check 501 */ 502 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm) 503 { 504 return mm->type == MEM_AREA_RES_VASPACE || 505 mm->type == MEM_AREA_SHM_VASPACE; 506 } 507 508 /* 509 * core_mmu_map_pages() - map list of pages at given virtual address 510 * @vstart: Virtual address where mapping begins 511 * @pages: Array of page addresses 512 * @num_pages: Number of pages 513 * @memtype: Type of memmory to be mapped 514 * 515 * Note: This function asserts that pages are not mapped executeable for 516 * kernel (privileged) mode. 517 * 518 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 519 */ 520 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 521 enum teecore_memtypes memtype); 522 523 /* 524 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address 525 * @vstart: Virtual address where mapping begins 526 * @pstart: Physical address of the first page 527 * @num_pages: Number of pages 528 * @memtype: Type of memmory to be mapped 529 * 530 * Note: This function asserts that pages are not mapped executeable for 531 * kernel (privileged) mode. 532 * 533 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 534 */ 535 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 536 size_t num_pages, 537 enum teecore_memtypes memtype); 538 539 /* 540 * core_mmu_unmap_pages() - remove mapping at given virtual address 541 * @vstart: Virtual address where mapping begins 542 * @num_pages: Number of pages to unmap 543 */ 544 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages); 545 546 /* 547 * core_mmu_user_mapping_is_active() - Report if user mapping is active 548 * @returns true if a user VA space is active, false if user VA space is 549 * inactive. 550 */ 551 bool core_mmu_user_mapping_is_active(void); 552 553 /* 554 * core_mmu_user_va_range_is_defined() - check if user va range is defined 555 * @returns true if a user VA space is defined, false if not. 556 */ 557 bool core_mmu_user_va_range_is_defined(void); 558 559 /* 560 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used 561 * @returns true if the attributes can be used, false if not. 562 */ 563 bool core_mmu_mattr_is_ok(uint32_t mattr); 564 565 TEE_Result core_mmu_for_each_map(void *ptr, 566 TEE_Result (*fn)(struct tee_mmap_region *map, 567 void *ptr)); 568 569 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 570 vaddr_t *e); 571 572 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa); 573 574 /* routines to retreive shared mem configuration */ 575 static inline bool core_mmu_is_shm_cached(void) 576 { 577 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM)); 578 } 579 580 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 581 size_t len); 582 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, 583 size_t len); 584 585 /* 586 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and 587 * length. If more than one mapping of 588 * specified type is present, NULL will be 589 * returned. 590 * @type: memory type 591 * @len: length in bytes 592 */ 593 struct tee_mmap_region * 594 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len); 595 596 /* 597 * tlbi_va_range() - Invalidate TLB for virtual address range 598 * @va: start virtual address, must be a multiple of @granule 599 * @len: length in bytes of range, must be a multiple of @granule 600 * @granule: granularity of mapping, supported values are 601 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 602 * match the actual mappings. 603 */ 604 void tlbi_va_range(vaddr_t va, size_t len, size_t granule); 605 606 /* 607 * tlbi_va_range_asid() - Invalidate TLB for virtual address range for 608 * a specific ASID 609 * @va: start virtual address, must be a multiple of @granule 610 * @len: length in bytes of range, must be a multiple of @granule 611 * @granule: granularity of mapping, supported values are 612 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 613 * match the actual mappings. 614 * @asid: Address space identifier 615 */ 616 void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid); 617 618 /* Check cpu mmu enabled or not */ 619 bool cpu_mmu_enabled(void); 620 621 #ifdef CFG_CORE_DYN_SHM 622 /* 623 * Check if platform defines nsec DDR range(s). 624 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is 625 * always present. 626 */ 627 bool core_mmu_nsec_ddr_is_defined(void); 628 629 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 630 size_t nelems); 631 #endif 632 633 /* Initialize MMU partition */ 634 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map); 635 636 unsigned int asid_alloc(void); 637 void asid_free(unsigned int asid); 638 639 #ifdef CFG_SECURE_DATA_PATH 640 /* Alloc and fill SDP memory objects table - table is NULL terminated */ 641 struct mobj **core_sdp_mem_create_mobjs(void); 642 #endif 643 644 #ifdef CFG_NS_VIRTUALIZATION 645 size_t core_mmu_get_total_pages_size(void); 646 struct mmu_partition *core_alloc_mmu_prtn(void *tables); 647 void core_free_mmu_prtn(struct mmu_partition *prtn); 648 void core_mmu_set_prtn(struct mmu_partition *prtn); 649 void core_mmu_set_default_prtn(void); 650 void core_mmu_set_default_prtn_tbl(void); 651 #endif 652 653 /* Initialize physical memory pool */ 654 void core_mmu_init_phys_mem(void); 655 656 void core_init_mmu(struct memory_map *mem_map); 657 658 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info, 659 unsigned int level, vaddr_t va_base, void *table); 660 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 661 struct user_mode_ctx *uctx); 662 void core_mmu_map_region(struct mmu_partition *prtn, 663 struct tee_mmap_region *mm); 664 665 bool arch_va2pa_helper(void *va, paddr_t *pa); 666 667 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len) 668 { 669 paddr_t end_pa = 0; 670 671 if (ADD_OVERFLOW(pa, len - 1, &end_pa)) 672 return false; 673 return core_mmu_check_max_pa(end_pa); 674 } 675 676 /* 677 * core_mmu_set_secure_memory() - set physical secure memory range 678 * @base: base address of secure memory 679 * @size: size of secure memory 680 * 681 * The physical secure memory range is not known in advance when OP-TEE is 682 * relocatable, this information must be supplied once during boot before 683 * the translation tables can be initialized and the MMU enabled. 684 */ 685 void core_mmu_set_secure_memory(paddr_t base, size_t size); 686 687 /* 688 * core_mmu_get_secure_memory() - get physical secure memory range 689 * @base: base address of secure memory 690 * @size: size of secure memory 691 * 692 * The physical secure memory range returned covers at least the memory 693 * range used by OP-TEE Core, but may cover more memory depending on the 694 * configuration. 695 */ 696 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size); 697 698 #endif /*__ASSEMBLER__*/ 699 700 #endif /* __MM_CORE_MMU_H */ 701