1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 #ifndef CORE_MMU_H 7 #define CORE_MMU_H 8 9 #ifndef __ASSEMBLER__ 10 #include <assert.h> 11 #include <compiler.h> 12 #include <kernel/user_ta.h> 13 #include <mm/tee_mmu_types.h> 14 #include <types_ext.h> 15 #include <util.h> 16 #endif 17 18 #include <mm/core_mmu_arch.h> 19 #include <platform_config.h> 20 21 /* A small page is the smallest unit of memory that can be mapped */ 22 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT) 23 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1) 24 25 /* 26 * PGDIR is the translation table above the translation table that holds 27 * the pages. 28 */ 29 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT) 30 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1) 31 32 /* TA user space code, data, stack and heap are mapped using this granularity */ 33 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT) 34 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1) 35 36 /* TA user space parameters are mapped using this granularity */ 37 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT) 38 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1) 39 40 /* 41 * Identify mapping constraint: virtual base address is the physical start addr. 42 * If platform did not set some macros, some get default value. 43 */ 44 #ifndef TEE_RAM_VA_SIZE 45 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE 46 #endif 47 48 #ifndef TEE_LOAD_ADDR 49 #define TEE_LOAD_ADDR TEE_RAM_START 50 #endif 51 52 #ifndef STACK_ALIGNMENT 53 #define STACK_ALIGNMENT (sizeof(long) * U(2)) 54 #endif 55 56 #ifndef __ASSEMBLER__ 57 /* 58 * Memory area type: 59 * MEM_AREA_END: Reserved, marks the end of a table of mapping areas. 60 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE) 61 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure) 62 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure) 63 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure) 64 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure) 65 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure) 66 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure) 67 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure) 68 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE) 69 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE) 70 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure) 71 * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances. 72 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE. 73 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data 74 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets 75 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers 76 * MEM_AREA_IO_SEC: Secure HW mapped registers 77 * MEM_AREA_EXT_DT: Memory loads external device tree 78 * MEM_AREA_RES_VASPACE: Reserved virtual memory space 79 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers 80 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt() 81 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm. 82 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM 83 * MEM_AREA_MAXTYPE: lower invalid 'type' value 84 */ 85 enum teecore_memtypes { 86 MEM_AREA_END = 0, 87 MEM_AREA_TEE_RAM, 88 MEM_AREA_TEE_RAM_RX, 89 MEM_AREA_TEE_RAM_RO, 90 MEM_AREA_TEE_RAM_RW, 91 MEM_AREA_INIT_RAM_RO, 92 MEM_AREA_INIT_RAM_RX, 93 MEM_AREA_NEX_RAM_RO, 94 MEM_AREA_NEX_RAM_RW, 95 MEM_AREA_TEE_COHERENT, 96 MEM_AREA_TEE_ASAN, 97 MEM_AREA_IDENTITY_MAP_RX, 98 MEM_AREA_TA_RAM, 99 MEM_AREA_NSEC_SHM, 100 MEM_AREA_RAM_NSEC, 101 MEM_AREA_RAM_SEC, 102 MEM_AREA_IO_NSEC, 103 MEM_AREA_IO_SEC, 104 MEM_AREA_EXT_DT, 105 MEM_AREA_RES_VASPACE, 106 MEM_AREA_SHM_VASPACE, 107 MEM_AREA_TS_VASPACE, 108 MEM_AREA_PAGER_VASPACE, 109 MEM_AREA_SDP_MEM, 110 MEM_AREA_DDR_OVERALL, 111 MEM_AREA_SEC_RAM_OVERALL, 112 MEM_AREA_MAXTYPE 113 }; 114 115 static inline const char *teecore_memtype_name(enum teecore_memtypes type) 116 { 117 static const char * const names[] = { 118 [MEM_AREA_END] = "END", 119 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX", 120 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX", 121 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO", 122 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW", 123 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO", 124 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX", 125 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO", 126 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW", 127 [MEM_AREA_TEE_ASAN] = "TEE_ASAN", 128 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX", 129 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT", 130 [MEM_AREA_TA_RAM] = "TA_RAM", 131 [MEM_AREA_NSEC_SHM] = "NSEC_SHM", 132 [MEM_AREA_RAM_NSEC] = "RAM_NSEC", 133 [MEM_AREA_RAM_SEC] = "RAM_SEC", 134 [MEM_AREA_IO_NSEC] = "IO_NSEC", 135 [MEM_AREA_IO_SEC] = "IO_SEC", 136 [MEM_AREA_EXT_DT] = "EXT_DT", 137 [MEM_AREA_RES_VASPACE] = "RES_VASPACE", 138 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE", 139 [MEM_AREA_TS_VASPACE] = "TS_VASPACE", 140 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE", 141 [MEM_AREA_SDP_MEM] = "SDP_MEM", 142 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL", 143 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL", 144 }; 145 146 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE); 147 return names[type]; 148 } 149 150 #ifdef CFG_CORE_RWDATA_NOEXEC 151 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW 152 #else 153 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM 154 #endif 155 156 struct core_mmu_phys_mem { 157 const char *name; 158 enum teecore_memtypes type; 159 __extension__ union { 160 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 161 struct { 162 uint32_t lo_addr; 163 uint32_t hi_addr; 164 }; 165 #endif 166 paddr_t addr; 167 }; 168 __extension__ union { 169 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 170 struct { 171 uint32_t lo_size; 172 uint32_t hi_size; 173 }; 174 #endif 175 paddr_size_t size; 176 }; 177 }; 178 179 #define __register_memory(_name, _type, _addr, _size, _section) \ 180 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 181 { .name = (_name), .type = (_type), .addr = (_addr), \ 182 .size = (_size) } 183 184 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__ 185 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 186 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \ 187 { .name = (_name), .type = (_type), .lo_addr = (_addr), \ 188 .lo_size = (_size) } 189 #else 190 #define __register_memory_ul(_name, _type, _addr, _size, _section) \ 191 __register_memory(_name, _type, _addr, _size, _section) 192 #endif 193 194 #define register_phys_mem(type, addr, size) \ 195 __register_memory(#addr, (type), (addr), (size), \ 196 phys_mem_map) 197 198 #define register_phys_mem_ul(type, addr, size) \ 199 __register_memory_ul(#addr, (type), (addr), (size), \ 200 phys_mem_map) 201 202 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */ 203 #define register_phys_mem_pgdir(type, addr, size) \ 204 __register_memory(#addr, type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 205 ROUNDUP(size + addr - \ 206 ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \ 207 CORE_MMU_PGDIR_SIZE), phys_mem_map) 208 209 #ifdef CFG_SECURE_DATA_PATH 210 #define register_sdp_mem(addr, size) \ 211 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \ 212 phys_sdp_mem) 213 #else 214 #define register_sdp_mem(addr, size) \ 215 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \ 216 __unused 217 #endif 218 219 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */ 220 #define register_dynamic_shm(addr, size) \ 221 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \ 222 phys_ddr_overall_compat) 223 224 /* 225 * register_ddr() - Define a memory range 226 * @addr: Base address 227 * @size: Length 228 * 229 * This macro can be used multiple times to define disjoint ranges. While 230 * initializing holes are carved out of these ranges where it overlaps with 231 * special memory, for instance memory registered with register_sdp_mem(). 232 * 233 * The memory that remains is accepted as non-secure shared memory when 234 * communicating with normal world. 235 * 236 * This macro is an alternative to supply the memory description with a 237 * devicetree blob. 238 */ 239 #define register_ddr(addr, size) \ 240 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \ 241 (size), phys_ddr_overall) 242 243 #define phys_ddr_overall_begin \ 244 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem) 245 246 #define phys_ddr_overall_end \ 247 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem) 248 249 #define phys_ddr_overall_compat_begin \ 250 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem) 251 252 #define phys_ddr_overall_compat_end \ 253 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem) 254 255 #define phys_sdp_mem_begin \ 256 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem) 257 258 #define phys_sdp_mem_end \ 259 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem) 260 261 #define phys_mem_map_begin \ 262 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem) 263 264 #define phys_mem_map_end \ 265 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem) 266 267 #ifdef CFG_CORE_RESERVED_SHM 268 /* Default NSec shared memory allocated from NSec world */ 269 extern unsigned long default_nsec_shm_paddr; 270 extern unsigned long default_nsec_shm_size; 271 #endif 272 273 /* 274 * Physical load address of OP-TEE updated during boot if needed to reflect 275 * the value used. 276 */ 277 #ifdef CFG_CORE_PHYS_RELOCATABLE 278 extern unsigned long core_mmu_tee_load_pa; 279 #else 280 extern const unsigned long core_mmu_tee_load_pa; 281 #endif 282 283 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg); 284 void core_init_mmu_regs(struct core_mmu_config *cfg); 285 286 /* Arch specific function to help optimizing 1 MMU xlat table */ 287 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr); 288 289 /* 290 * struct mmu_partition - stores MMU partition. 291 * 292 * Basically it represent whole MMU mapping. It is possible 293 * to create multiple partitions, and change them in runtime, 294 * effectively changing how OP-TEE sees memory. 295 * This is opaque struct which is defined differently for 296 * v7 and LPAE MMUs 297 * 298 * This structure used mostly when virtualization is enabled. 299 * When CFG_NS_VIRTUALIZATION==n only default partition exists. 300 */ 301 struct mmu_partition; 302 303 /* 304 * core_mmu_get_user_va_range() - Return range of user va space 305 * @base: Lowest user virtual address 306 * @size: Size in bytes of user address space 307 */ 308 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size); 309 310 /* 311 * enum core_mmu_fault - different kinds of faults 312 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault 313 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event 314 * @CORE_MMU_FAULT_TRANSLATION: translation fault 315 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write 316 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read 317 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort 318 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault 319 * @CORE_MMU_FAULT_TAG_CHECK: tag check fault 320 * @CORE_MMU_FAULT_OTHER: Other/unknown fault 321 */ 322 enum core_mmu_fault { 323 CORE_MMU_FAULT_ALIGNMENT, 324 CORE_MMU_FAULT_DEBUG_EVENT, 325 CORE_MMU_FAULT_TRANSLATION, 326 CORE_MMU_FAULT_WRITE_PERMISSION, 327 CORE_MMU_FAULT_READ_PERMISSION, 328 CORE_MMU_FAULT_ASYNC_EXTERNAL, 329 CORE_MMU_FAULT_ACCESS_BIT, 330 CORE_MMU_FAULT_TAG_CHECK, 331 CORE_MMU_FAULT_OTHER, 332 }; 333 334 /* 335 * core_mmu_get_fault_type() - get fault type 336 * @fault_descr: Content of fault status or exception syndrome register 337 * @returns an enum describing the content of fault status register. 338 */ 339 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr); 340 341 /* 342 * core_mm_type_to_attr() - convert memory type to attribute 343 * @t: memory type 344 * @returns an attribute that can be passed to core_mm_set_entry() and friends 345 */ 346 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t); 347 348 /* 349 * core_mmu_create_user_map() - Create user mode mapping 350 * @uctx: Pointer to user mode context 351 * @map: MMU configuration to use when activating this VA space 352 */ 353 void core_mmu_create_user_map(struct user_mode_ctx *uctx, 354 struct core_mmu_user_map *map); 355 /* 356 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space 357 * @map: MMU configuration for current user VA space. 358 */ 359 void core_mmu_get_user_map(struct core_mmu_user_map *map); 360 361 /* 362 * core_mmu_set_user_map() - Set new MMU configuration for user VA space 363 * @map: User context MMU configuration or NULL to set core VA space 364 * 365 * Activate user VA space mapping and set its ASID if @map is not NULL, 366 * otherwise activate core mapping and set ASID to 0. 367 */ 368 void core_mmu_set_user_map(struct core_mmu_user_map *map); 369 370 /* 371 * struct core_mmu_table_info - Properties for a translation table 372 * @table: Pointer to translation table 373 * @va_base: VA base address of the transaltion table 374 * @level: Translation table level 375 * @shift: The shift of each entry in the table 376 * @num_entries: Number of entries in this table. 377 */ 378 struct core_mmu_table_info { 379 void *table; 380 vaddr_t va_base; 381 unsigned level; 382 unsigned shift; 383 unsigned num_entries; 384 #ifdef CFG_NS_VIRTUALIZATION 385 struct mmu_partition *prtn; 386 #endif 387 }; 388 389 /* 390 * core_mmu_find_table() - Locates a translation table 391 * @prtn: MMU partition where search should be performed 392 * @va: Virtual address for the table to cover 393 * @max_level: Don't traverse beyond this level 394 * @tbl_info: Pointer to where to store properties. 395 * @return true if a translation table was found, false on error 396 */ 397 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va, 398 unsigned max_level, 399 struct core_mmu_table_info *tbl_info); 400 401 /* 402 * core_mmu_entry_to_finer_grained() - divide mapping at current level into 403 * smaller ones so memory can be mapped with finer granularity 404 * @tbl_info: table where target record located 405 * @idx: index of record for which a pdgir must be setup. 406 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu) 407 * @return true on successful, false on error 408 */ 409 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info, 410 unsigned int idx, bool secure); 411 412 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx, 413 paddr_t pa, uint32_t attr); 414 415 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info); 416 417 /* 418 * core_mmu_set_entry() - Set entry in translation table 419 * @tbl_info: Translation table properties 420 * @idx: Index of entry to update 421 * @pa: Physical address to assign entry 422 * @attr: Attributes to assign entry 423 */ 424 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 425 paddr_t pa, uint32_t attr); 426 427 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx, 428 paddr_t *pa, uint32_t *attr); 429 430 /* 431 * core_mmu_get_entry() - Get entry from translation table 432 * @tbl_info: Translation table properties 433 * @idx: Index of entry to read 434 * @pa: Physical address is returned here if pa is not NULL 435 * @attr: Attributues are returned here if attr is not NULL 436 */ 437 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx, 438 paddr_t *pa, uint32_t *attr); 439 440 /* 441 * core_mmu_va2idx() - Translate from virtual address to table index 442 * @tbl_info: Translation table properties 443 * @va: Virtual address to translate 444 * @returns index in transaltion table 445 */ 446 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info, 447 vaddr_t va) 448 { 449 return (va - tbl_info->va_base) >> tbl_info->shift; 450 } 451 452 /* 453 * core_mmu_idx2va() - Translate from table index to virtual address 454 * @tbl_info: Translation table properties 455 * @idx: Index to translate 456 * @returns Virtual address 457 */ 458 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info, 459 unsigned idx) 460 { 461 return (idx << tbl_info->shift) + tbl_info->va_base; 462 } 463 464 /* 465 * core_mmu_get_block_offset() - Get offset inside a block/page 466 * @tbl_info: Translation table properties 467 * @pa: Physical address 468 * @returns offset within one block of the translation table 469 */ 470 static inline size_t core_mmu_get_block_offset( 471 struct core_mmu_table_info *tbl_info, paddr_t pa) 472 { 473 return pa & ((1 << tbl_info->shift) - 1); 474 } 475 476 /* 477 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to 478 * empty virtual address space that is used for dymanic mappings 479 * @mm: memory region to be checked 480 * @returns result of the check 481 */ 482 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm) 483 { 484 return mm->type == MEM_AREA_RES_VASPACE || 485 mm->type == MEM_AREA_SHM_VASPACE; 486 } 487 488 /* 489 * core_mmu_map_pages() - map list of pages at given virtual address 490 * @vstart: Virtual address where mapping begins 491 * @pages: Array of page addresses 492 * @num_pages: Number of pages 493 * @memtype: Type of memmory to be mapped 494 * 495 * Note: This function asserts that pages are not mapped executeable for 496 * kernel (privileged) mode. 497 * 498 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 499 */ 500 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 501 enum teecore_memtypes memtype); 502 503 /* 504 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address 505 * @vstart: Virtual address where mapping begins 506 * @pstart: Physical address of the first page 507 * @num_pages: Number of pages 508 * @memtype: Type of memmory to be mapped 509 * 510 * Note: This function asserts that pages are not mapped executeable for 511 * kernel (privileged) mode. 512 * 513 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error 514 */ 515 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 516 size_t num_pages, 517 enum teecore_memtypes memtype); 518 519 /* 520 * core_mmu_unmap_pages() - remove mapping at given virtual address 521 * @vstart: Virtual address where mapping begins 522 * @num_pages: Number of pages to unmap 523 */ 524 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages); 525 526 /* 527 * core_mmu_user_mapping_is_active() - Report if user mapping is active 528 * @returns true if a user VA space is active, false if user VA space is 529 * inactive. 530 */ 531 bool core_mmu_user_mapping_is_active(void); 532 533 /* 534 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used 535 * @returns true if the attributes can be used, false if not. 536 */ 537 bool core_mmu_mattr_is_ok(uint32_t mattr); 538 539 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 540 vaddr_t *e); 541 542 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa); 543 544 /* routines to retreive shared mem configuration */ 545 static inline bool core_mmu_is_shm_cached(void) 546 { 547 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM)); 548 } 549 550 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 551 size_t len); 552 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, 553 size_t len); 554 555 /* 556 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and 557 * length. If more than one mapping of 558 * specified type is present, NULL will be 559 * returned. 560 * @type: memory type 561 * @len: length in bytes 562 */ 563 struct tee_mmap_region * 564 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len); 565 566 /* 567 * tlbi_mva_range() - Invalidate TLB for virtual address range 568 * @va: start virtual address, must be a multiple of @granule 569 * @len: length in bytes of range, must be a multiple of @granule 570 * @granule: granularity of mapping, supported values are 571 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 572 * match the actual mappings. 573 */ 574 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule); 575 576 /* 577 * tlbi_mva_range_asid() - Invalidate TLB for virtual address range for 578 * a specific ASID 579 * @va: start virtual address, must be a multiple of @granule 580 * @len: length in bytes of range, must be a multiple of @granule 581 * @granule: granularity of mapping, supported values are 582 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must 583 * match the actual mappings. 584 * @asid: Address space identifier 585 */ 586 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid); 587 588 /* Check cpu mmu enabled or not */ 589 bool cpu_mmu_enabled(void); 590 591 #ifdef CFG_CORE_DYN_SHM 592 /* 593 * Check if platform defines nsec DDR range(s). 594 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is 595 * always present. 596 */ 597 bool core_mmu_nsec_ddr_is_defined(void); 598 599 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 600 size_t nelems); 601 #endif 602 603 /* Initialize MMU partition */ 604 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm); 605 606 unsigned int asid_alloc(void); 607 void asid_free(unsigned int asid); 608 609 #ifdef CFG_SECURE_DATA_PATH 610 /* Alloc and fill SDP memory objects table - table is NULL terminated */ 611 struct mobj **core_sdp_mem_create_mobjs(void); 612 #endif 613 614 #ifdef CFG_NS_VIRTUALIZATION 615 size_t core_mmu_get_total_pages_size(void); 616 struct mmu_partition *core_alloc_mmu_prtn(void *tables); 617 void core_free_mmu_prtn(struct mmu_partition *prtn); 618 void core_mmu_set_prtn(struct mmu_partition *prtn); 619 void core_mmu_set_default_prtn(void); 620 void core_mmu_set_default_prtn_tbl(void); 621 #endif 622 623 void core_mmu_init_virtualization(void); 624 625 /* init some allocation pools */ 626 void core_mmu_init_ta_ram(void); 627 628 void core_init_mmu(struct tee_mmap_region *mm); 629 630 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info, 631 unsigned int level, vaddr_t va_base, void *table); 632 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 633 struct user_mode_ctx *uctx); 634 void core_mmu_map_region(struct mmu_partition *prtn, 635 struct tee_mmap_region *mm); 636 637 bool arch_va2pa_helper(void *va, paddr_t *pa); 638 639 static inline bool core_mmap_is_end_of_table(const struct tee_mmap_region *mm) 640 { 641 return mm->type == MEM_AREA_END; 642 } 643 644 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len) 645 { 646 paddr_t end_pa = 0; 647 648 if (ADD_OVERFLOW(pa, len - 1, &end_pa)) 649 return false; 650 return core_mmu_check_max_pa(end_pa); 651 } 652 653 #ifdef CFG_CORE_PHYS_RELOCATABLE 654 /* 655 * core_mmu_set_secure_memory() - set physical secure memory range 656 * @base: base address of secure memory 657 * @size: size of secure memory 658 * 659 * The physical secure memory range is not known in advance when OP-TEE is 660 * relocatable, this information must be supplied once during boot before 661 * the translation tables can be initialized and the MMU enabled. 662 */ 663 void core_mmu_set_secure_memory(paddr_t base, size_t size); 664 #endif 665 666 /* 667 * core_mmu_get_secure_memory() - get physical secure memory range 668 * @base: base address of secure memory 669 * @size: size of secure memory 670 * 671 * The physical secure memory range returned covers at least the memory 672 * range used by OP-TEE Core, but may cover more memory depending on the 673 * configuration. 674 */ 675 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size); 676 677 /* 678 * core_mmu_get_ta_range() - get physical memory range reserved for TAs 679 * @base: [out] range base address ref or NULL 680 * @size: [out] range size ref or NULL 681 */ 682 void core_mmu_get_ta_range(paddr_t *base, size_t *size); 683 684 #endif /*__ASSEMBLER__*/ 685 686 #endif /* CORE_MMU_H */ 687