1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 #ifndef __MM_CORE_MMU_H 7 #define __MM_CORE_MMU_H 8 9 #ifndef __ASSEMBLER__ 10 #include <assert.h> 11 #include <compiler.h> 12 #include <kernel/user_ta.h> 13 #include <mm/tee_mm.h> 14 #include <mm/tee_mmu_types.h> 15 #include <types_ext.h> 16 #include <util.h> 17 #endif 18 19 #include <mm/core_mmu_arch.h> 20 #include <platform_config.h> 21 22 /* A small page is the smallest unit of memory that can be mapped */ 23 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT) 24 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1) 25 26 /* 27 * PGDIR is the translation table above the translation table that holds 28 * the pages. 29 */ 30 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT) 31 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1) 32 33 /* TA user space code, data, stack and heap are mapped using this granularity */ 34 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT) 35 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1) 36 37 /* TA user space parameters are mapped using this granularity */ 38 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT) 39 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1) 40 41 /* 42 * Identify mapping constraint: virtual base address is the physical start addr. 43 * If platform did not set some macros, some get default value. 44 */ 45 #ifndef TEE_RAM_VA_SIZE 46 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE 47 #endif 48 49 #ifndef TEE_LOAD_ADDR 50 #define TEE_LOAD_ADDR TEE_RAM_START 51 #endif 52 53 #ifndef STACK_ALIGNMENT 54 #define STACK_ALIGNMENT (sizeof(long) * U(2)) 55 #endif 56 57 #ifndef __ASSEMBLER__ 58 /* 59 * Memory area type: 60 * MEM_AREA_END: Reserved, marks the end of a table of mapping areas. 61 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE) 62 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure) 63 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure) 64 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure) 65 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure) 66 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure) 67 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure) 68 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure) 69 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE) 70 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE) 71 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure) 72 * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances. 73 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE. 74 * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE. 75 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data 76 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets 77 * MEM_AREA_ROM_SEC: Secure read only memory storing some secrets 78 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers 79 * MEM_AREA_IO_SEC: Secure HW mapped registers 80 * MEM_AREA_EXT_DT: Memory loads external device tree 81 * MEM_AREA_MANIFEST_DT: Memory loads manifest device tree 82 * MEM_AREA_TRANSFER_LIST: Memory area mapped for Transfer List 83 * MEM_AREA_RES_VASPACE: Reserved virtual memory space 84 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers 85 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt() 86 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm. 87 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM 88 * MEM_AREA_MAXTYPE: lower invalid 'type' value 89 */ 90 enum teecore_memtypes { 91 MEM_AREA_END = 0, 92 MEM_AREA_TEE_RAM, 93 MEM_AREA_TEE_RAM_RX, 94 MEM_AREA_TEE_RAM_RO, 95 MEM_AREA_TEE_RAM_RW, 96 MEM_AREA_INIT_RAM_RO, 97 MEM_AREA_INIT_RAM_RX, 98 MEM_AREA_NEX_RAM_RO, 99 MEM_AREA_NEX_RAM_RW, 100 MEM_AREA_TEE_COHERENT, 101 MEM_AREA_TEE_ASAN, 102 MEM_AREA_IDENTITY_MAP_RX, 103 MEM_AREA_TA_RAM, 104 MEM_AREA_NSEC_SHM, 105 MEM_AREA_NEX_NSEC_SHM, 106 MEM_AREA_RAM_NSEC, 107 MEM_AREA_RAM_SEC, 108 MEM_AREA_ROM_SEC, 109 MEM_AREA_IO_NSEC, 110 MEM_AREA_IO_SEC, 111 MEM_AREA_EXT_DT, 112 MEM_AREA_MANIFEST_DT, 113 MEM_AREA_TRANSFER_LIST, 114 MEM_AREA_RES_VASPACE, 115 MEM_AREA_SHM_VASPACE, 116 MEM_AREA_TS_VASPACE, 117 MEM_AREA_PAGER_VASPACE, 118 MEM_AREA_SDP_MEM, 119 MEM_AREA_DDR_OVERALL, 120 MEM_AREA_SEC_RAM_OVERALL, 121 MEM_AREA_MAXTYPE 122 }; 123 124 static inline const char *teecore_memtype_name(enum teecore_memtypes type) 125 { 126 static const char * const names[] = { 127 [MEM_AREA_END] = "END", 128 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX", 129 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX", 130 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO", 131 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW", 132 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO", 133 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX", 134 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO", 135 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW", 136 [MEM_AREA_TEE_ASAN] = "TEE_ASAN", 137 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX", 138 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT", 139 [MEM_AREA_TA_RAM] = "TA_RAM", 140 [MEM_AREA_NSEC_SHM] = "NSEC_SHM", 141 [MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM", 142 [MEM_AREA_RAM_NSEC] = "RAM_NSEC", 143 [MEM_AREA_RAM_SEC] = "RAM_SEC", 144 [MEM_AREA_ROM_SEC] = "ROM_SEC", 145 [MEM_AREA_IO_NSEC] = "IO_NSEC", 146 [MEM_AREA_IO_SEC] = "IO_SEC", 147 [MEM_AREA_EXT_DT] = "EXT_DT", 148 [MEM_AREA_MANIFEST_DT] = "MANIFEST_DT", 149 [MEM_AREA_TRANSFER_LIST] = "TRANSFER_LIST", 150 [MEM_AREA_RES_VASPACE] = "RES_VASPACE", 151 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE", 152 [MEM_AREA_TS_VASPACE] = "TS_VASPACE", 153 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE", 154 [MEM_AREA_SDP_MEM] = "SDP_MEM", 155 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL", 156 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL", 157 }; 158 159 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE); 160 return names[type]; 161 } 162 163 #ifdef CFG_CORE_RWDATA_NOEXEC 164 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW 165 #else 166 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM 167 #endif 168 169 struct core_mmu_phys_mem { 170 const char *name; 171 enum teecore_memtypes type; 172 __extension__ union { 173 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 174 struct { 175 uint32_t lo_addr; 176 uint32_t hi_addr; 177 }; 178 #endif 179 paddr_t addr; 180 }; 181 __extension__ union { 182 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 183 struct { 184 uint32_t lo_size; 185 uint32_t hi_size; 186 }; 187 #endif 188 paddr_size_t size; 189 }; 190 }; 191 192 #define __register_memory(_name, _type, _addr, _size, _section) \ 193 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 194 { .name = (_name), .type = (_type), .addr = (_addr), \ 195 .size = (_size) } 196 197 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 198 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 199 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 200 { .name = (_name), .type = (_type), .lo_addr = (_addr), \ 201 .lo_size = (_size) } 202 #else 203 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 204 __register_memory(_name, _type, _addr, _size, _section) 205 #endif 206 207 #define register_phys_mem(type, addr, size) \ 208 __register_memory(#addr, (type), (addr), (size), \ 209 phys_mem_map) 210 211 #define register_phys_mem_ul(type, addr, size) \ 212 __register_memory_ul(#addr, (type), (addr), (size), \ 213 phys_mem_map) 214 215 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */ 216 #define register_phys_mem_pgdir(type, addr, size) \ 217 __register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 218 ROUNDUP(size + addr - \ 219 ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 220 CORE_MMU_PGDIR_SIZE), phys_mem_map) 221 222 #ifdef CFG_SECURE_DATA_PATH 223 #define register_sdp_mem(addr, size) \ 224 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \ 225 phys_sdp_mem) 226 #else 227 #define register_sdp_mem(addr, size) \ 228 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \ 229 __unused 230 #endif 231 232 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */ 233 #define register_dynamic_shm(addr, size) \ 234 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \ 235 phys_ddr_overall_compat) 236 237 /* 238 * register_ddr() - Define a memory range 239 * @addr: Base address 240 * @size: Length 241 * 242 * This macro can be used multiple times to define disjoint ranges. While 243 * initializing holes are carved out of these ranges where it overlaps with 244 * special memory, for instance memory registered with register_sdp_mem(). 245 * 246 * The memory that remains is accepted as non-secure shared memory when 247 * communicating with normal world. 248 * 249 * This macro is an alternative to supply the memory description with a 250 * devicetree blob. 251 */ 252 #define register_ddr(addr, size) \ 253 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \ 254 (size), phys_ddr_overall) 255 256 #define phys_ddr_overall_begin \ 257 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem) 258 259 #define phys_ddr_overall_end \ 260 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem) 261 262 #define phys_ddr_overall_compat_begin \ 263 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem) 264 265 #define phys_ddr_overall_compat_end \ 266 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem) 267 268 #define phys_sdp_mem_begin \ 269 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem) 270 271 #define phys_sdp_mem_end \ 272 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem) 273 274 #define phys_mem_map_begin \ 275 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem) 276 277 #define phys_mem_map_end \ 278 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem) 279 280 /* Physical Secure DDR pool */ 281 extern tee_mm_pool_t tee_mm_sec_ddr; 282 283 /* Virtual memory pool for core mappings */ 284 extern tee_mm_pool_t core_virt_mem_pool; 285 286 /* Virtual memory pool for shared memory mappings */ 287 extern tee_mm_pool_t core_virt_shm_pool; 288 289 #ifdef CFG_CORE_RESERVED_SHM 290 /* Default NSec shared memory allocated from NSec world */ 291 extern unsigned long default_nsec_shm_paddr; 292 extern unsigned long default_nsec_shm_size; 293 #endif 294 295 /* 296 * Physical load address of OP-TEE updated during boot if needed to reflect 297 * the value used. 298 */ 299 #ifdef CFG_CORE_PHYS_RELOCATABLE 300 extern unsigned long core_mmu_tee_load_pa; 301 #else 302 extern const unsigned long core_mmu_tee_load_pa; 303 #endif 304 305 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg); 306 void core_init_mmu_regs(struct core_mmu_config *cfg); 307 308 /* Arch specific function to help optimizing 1 MMU xlat table */ 309 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr); 310 311 /* 312 * struct mmu_partition - stores MMU partition. 313 * 314 * Basically it represent whole MMU mapping. It is possible 315 * to create multiple partitions, and change them in runtime, 316 * effectively changing how OP-TEE sees memory. 317 * This is opaque struct which is defined differently for 318 * v7 and LPAE MMUs 319 * 320 * This structure used mostly when virtualization is enabled. 321 * When CFG_NS_VIRTUALIZATION==n only default partition exists. 322 */ 323 struct mmu_partition; 324 325 /* 326 * core_mmu_get_user_va_range() - Return range of user va space 327 * @base: Lowest user virtual address 328 * @size: Size in bytes of user address space 329 */ 330 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size); 331 332 /* 333 * enum core_mmu_fault - different kinds of faults 334 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault 335 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event 336 * @CORE_MMU_FAULT_TRANSLATION: translation fault 337 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write 338 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read 339 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort 340 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault 341 * @CORE_MMU_FAULT_TAG_CHECK: tag check fault 342 * @CORE_MMU_FAULT_OTHER: Other/unknown fault 343 */ 344 enum core_mmu_fault { 345 CORE_MMU_FAULT_ALIGNMENT, 346 CORE_MMU_FAULT_DEBUG_EVENT, 347 CORE_MMU_FAULT_TRANSLATION, 348 CORE_MMU_FAULT_WRITE_PERMISSION, 349 CORE_MMU_FAULT_READ_PERMISSION, 350 CORE_MMU_FAULT_ASYNC_EXTERNAL, 351 CORE_MMU_FAULT_ACCESS_BIT, 352 CORE_MMU_FAULT_TAG_CHECK, 353 CORE_MMU_FAULT_OTHER, 354 }; 355 356 /* 357 * core_mmu_get_fault_type() - get fault type 358 * @fault_descr: Content of fault status or exception syndrome register 359 * @returns an enum describing the content of fault status register. 360 */ 361 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr); 362 363 /* 364 * core_mm_type_to_attr() - convert memory type to attribute 365 * @t: memory type 366 * @returns an attribute that can be passed to core_mm_set_entry() and friends 367 */ 368 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t); 369 370 /* 371 * core_mmu_create_user_map() - Create user mode mapping 372 * @uctx: Pointer to user mode context 373 * @map: MMU configuration to use when activating this VA space 374 */ 375 void core_mmu_create_user_map(struct user_mode_ctx *uctx, 376 struct core_mmu_user_map *map); 377 /* 378 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space 379 * @map: MMU configuration for current user VA space. 380 */ 381 void core_mmu_get_user_map(struct core_mmu_user_map *map); 382 383 /* 384 * core_mmu_set_user_map() - Set new MMU configuration for user VA space 385 * @map: User context MMU configuration or NULL to set core VA space 386 * 387 * Activate user VA space mapping and set its ASID if @map is not NULL, 388 * otherwise activate core mapping and set ASID to 0. 389 */ 390 void core_mmu_set_user_map(struct core_mmu_user_map *map); 391 392 /* 393 * struct core_mmu_table_info - Properties for a translation table 394 * @table: Pointer to translation table 395 * @va_base: VA base address of the transaltion table 396 * @level: Translation table level 397 * @next_level: Finer grained translation table level according to @level. 398 * @shift: The shift of each entry in the table 399 * @num_entries: Number of entries in this table. 400 */ 401 struct core_mmu_table_info { 402 void *table; 403 vaddr_t va_base; 404 unsigned num_entries; 405 #ifdef CFG_NS_VIRTUALIZATION 406 struct mmu_partition *prtn; 407 #endif 408 uint8_t level; 409 uint8_t shift; 410 uint8_t next_level; 411 }; 412 413 /* 414 * core_mmu_find_table() - Locates a translation table 415 * @prtn: MMU partition where search should be performed 416 * @va: Virtual address for the table to cover 417 * @max_level: Don't traverse beyond this level 418 * @tbl_info: Pointer to where to store properties. 419 * @return true if a translation table was found, false on error 420 */ 421 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va, 422 unsigned max_level, 423 struct core_mmu_table_info *tbl_info); 424 425 /* 426 * core_mmu_entry_to_finer_grained() - divide mapping at current level into 427 * smaller ones so memory can be mapped with finer granularity 428 * @tbl_info: table where target record located 429 * @idx: index of record for which a pdgir must be setup. 430 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu) 431 * @return true on successful, false on error 432 */ 433 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info, 434 unsigned int idx, bool secure); 435 436 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx, 437 paddr_t pa, uint32_t attr); 438 439 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info); 440 441 /* 442 * core_mmu_set_entry() - Set entry in translation table 443 * @tbl_info: Translation table properties 444 * @idx: Index of entry to update 445 * @pa: Physical address to assign entry 446 * @attr: Attributes to assign entry 447 */ 448 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 449 paddr_t pa, uint32_t attr); 450 451 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx, 452 paddr_t *pa, uint32_t *attr); 453 454 /* 455 * core_mmu_get_entry() - Get entry from translation table 456 * @tbl_info: Translation table properties 457 * @idx: Index of entry to read 458 * @pa: Physical address is returned here if pa is not NULL 459 * @attr: Attributues are returned here if attr is not NULL 460 */ 461 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 462 paddr_t *pa, uint32_t *attr); 463 464 /* 465 * core_mmu_va2idx() - Translate from virtual address to table index 466 * @tbl_info: Translation table properties 467 * @va: Virtual address to translate 468 * @returns index in transaltion table 469 */ 470 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info, 471 vaddr_t va) 472 { 473 return (va - tbl_info->va_base) >> tbl_info->shift; 474 } 475 476 /* 477 * core_mmu_idx2va() - Translate from table index to virtual address 478 * @tbl_info: Translation table properties 479 * @idx: Index to translate 480 * @returns Virtual address 481 */ 482 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info, 483 unsigned idx) 484 { 485 return (idx << tbl_info->shift) + tbl_info->va_base; 486 } 487 488 /* 489 * core_mmu_get_block_offset() - Get offset inside a block/page 490 * @tbl_info: Translation table properties 491 * @pa: Physical address 492 * @returns offset within one block of the translation table 493 */ 494 static inline size_t core_mmu_get_block_offset( 495 struct core_mmu_table_info *tbl_info, paddr_t pa) 496 { 497 return pa & ((1 << tbl_info->shift) - 1); 498 } 499 500 /* 501 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to 502 * empty virtual address space that is used for dymanic mappings 503 * @mm: memory region to be checked 504 * @returns result of the check 505 */ 506 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm) 507 { 508 return mm->type == MEM_AREA_RES_VASPACE || 509 mm->type == MEM_AREA_SHM_VASPACE; 510 } 511 512 /* 513 * core_mmu_map_pages() - map list of pages at given virtual address 514 * @vstart: Virtual address where mapping begins 515 * @pages: Array of page addresses 516 * @num_pages: Number of pages 517 * @memtype: Type of memmory to be mapped 518 * 519 * Note: This function asserts that pages are not mapped executeable for 520 * kernel (privileged) mode. 521 * 522 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 523 */ 524 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 525 enum teecore_memtypes memtype); 526 527 /* 528 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address 529 * @vstart: Virtual address where mapping begins 530 * @pstart: Physical address of the first page 531 * @num_pages: Number of pages 532 * @memtype: Type of memmory to be mapped 533 * 534 * Note: This function asserts that pages are not mapped executeable for 535 * kernel (privileged) mode. 536 * 537 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 538 */ 539 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 540 size_t num_pages, 541 enum teecore_memtypes memtype); 542 543 /* 544 * core_mmu_unmap_pages() - remove mapping at given virtual address 545 * @vstart: Virtual address where mapping begins 546 * @num_pages: Number of pages to unmap 547 */ 548 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages); 549 550 /* 551 * core_mmu_user_mapping_is_active() - Report if user mapping is active 552 * @returns true if a user VA space is active, false if user VA space is 553 * inactive. 554 */ 555 bool core_mmu_user_mapping_is_active(void); 556 557 /* 558 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used 559 * @returns true if the attributes can be used, false if not. 560 */ 561 bool core_mmu_mattr_is_ok(uint32_t mattr); 562 563 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 564 vaddr_t *e); 565 566 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa); 567 568 /* routines to retreive shared mem configuration */ 569 static inline bool core_mmu_is_shm_cached(void) 570 { 571 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM)); 572 } 573 574 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 575 size_t len); 576 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, 577 size_t len); 578 579 /* 580 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and 581 * length. If more than one mapping of 582 * specified type is present, NULL will be 583 * returned. 584 * @type: memory type 585 * @len: length in bytes 586 */ 587 struct tee_mmap_region * 588 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len); 589 590 /* 591 * tlbi_va_range() - Invalidate TLB for virtual address range 592 * @va: start virtual address, must be a multiple of @granule 593 * @len: length in bytes of range, must be a multiple of @granule 594 * @granule: granularity of mapping, supported values are 595 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 596 * match the actual mappings. 597 */ 598 void tlbi_va_range(vaddr_t va, size_t len, size_t granule); 599 600 /* 601 * tlbi_va_range_asid() - Invalidate TLB for virtual address range for 602 * a specific ASID 603 * @va: start virtual address, must be a multiple of @granule 604 * @len: length in bytes of range, must be a multiple of @granule 605 * @granule: granularity of mapping, supported values are 606 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 607 * match the actual mappings. 608 * @asid: Address space identifier 609 */ 610 void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid); 611 612 /* Check cpu mmu enabled or not */ 613 bool cpu_mmu_enabled(void); 614 615 #ifdef CFG_CORE_DYN_SHM 616 /* 617 * Check if platform defines nsec DDR range(s). 618 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is 619 * always present. 620 */ 621 bool core_mmu_nsec_ddr_is_defined(void); 622 623 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 624 size_t nelems); 625 #endif 626 627 /* Initialize MMU partition */ 628 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm); 629 630 unsigned int asid_alloc(void); 631 void asid_free(unsigned int asid); 632 633 #ifdef CFG_SECURE_DATA_PATH 634 /* Alloc and fill SDP memory objects table - table is NULL terminated */ 635 struct mobj **core_sdp_mem_create_mobjs(void); 636 #endif 637 638 #ifdef CFG_NS_VIRTUALIZATION 639 size_t core_mmu_get_total_pages_size(void); 640 struct mmu_partition *core_alloc_mmu_prtn(void *tables); 641 void core_free_mmu_prtn(struct mmu_partition *prtn); 642 void core_mmu_set_prtn(struct mmu_partition *prtn); 643 void core_mmu_set_default_prtn(void); 644 void core_mmu_set_default_prtn_tbl(void); 645 #endif 646 647 void core_mmu_init_virtualization(void); 648 649 /* init some allocation pools */ 650 void core_mmu_init_ta_ram(void); 651 652 void core_init_mmu(struct tee_mmap_region *mm); 653 654 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info, 655 unsigned int level, vaddr_t va_base, void *table); 656 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 657 struct user_mode_ctx *uctx); 658 void core_mmu_map_region(struct mmu_partition *prtn, 659 struct tee_mmap_region *mm); 660 661 bool arch_va2pa_helper(void *va, paddr_t *pa); 662 663 static inline bool core_mmap_is_end_of_table(const struct tee_mmap_region *mm) 664 { 665 return mm->type == MEM_AREA_END; 666 } 667 668 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len) 669 { 670 paddr_t end_pa = 0; 671 672 if (ADD_OVERFLOW(pa, len - 1, &end_pa)) 673 return false; 674 return core_mmu_check_max_pa(end_pa); 675 } 676 677 /* 678 * core_mmu_set_secure_memory() - set physical secure memory range 679 * @base: base address of secure memory 680 * @size: size of secure memory 681 * 682 * The physical secure memory range is not known in advance when OP-TEE is 683 * relocatable, this information must be supplied once during boot before 684 * the translation tables can be initialized and the MMU enabled. 685 */ 686 void core_mmu_set_secure_memory(paddr_t base, size_t size); 687 688 /* 689 * core_mmu_get_secure_memory() - get physical secure memory range 690 * @base: base address of secure memory 691 * @size: size of secure memory 692 * 693 * The physical secure memory range returned covers at least the memory 694 * range used by OP-TEE Core, but may cover more memory depending on the 695 * configuration. 696 */ 697 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size); 698 699 /* 700 * core_mmu_get_ta_range() - get physical memory range reserved for TAs 701 * @base: [out] range base address ref or NULL 702 * @size: [out] range size ref or NULL 703 */ 704 void core_mmu_get_ta_range(paddr_t *base, size_t *size); 705 706 #endif /*__ASSEMBLER__*/ 707 708 #endif /* __MM_CORE_MMU_H */ 709