1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/queue.h> 29 #include <stdlib.h> 30 #include <inttypes.h> 31 #include <kernel/tee_common_unpg.h> 32 #include <kernel/tee_common.h> 33 #include <kernel/thread_defs.h> 34 #include <kernel/panic.h> 35 #include <mm/tee_mmu_defs.h> 36 #include <kernel/tee_ta_manager.h> 37 #include <kernel/tee_kta_trace.h> 38 #include <kernel/misc.h> 39 #include <kernel/tee_misc.h> 40 #include <mm/tee_pager.h> 41 #include <mm/tee_mm.h> 42 #include <mm/core_mmu.h> 43 #include <tee/arch_svc.h> 44 #include <arm.h> 45 #include <tee/tee_cryp_provider.h> 46 #include <tee_api_defines.h> 47 #include <utee_defines.h> 48 #include <trace.h> 49 50 struct tee_pager_abort_info { 51 uint32_t abort_type; 52 uint32_t fault_descr; 53 vaddr_t va; 54 uint32_t pc; 55 struct thread_abort_regs *regs; 56 }; 57 58 enum tee_pager_fault_type { 59 TEE_PAGER_FAULT_TYPE_USER_TA_PANIC, 60 TEE_PAGER_FAULT_TYPE_PAGEABLE, 61 TEE_PAGER_FAULT_TYPE_IGNORE, 62 }; 63 64 #ifdef CFG_WITH_PAGER 65 struct tee_pager_area { 66 const uint8_t *hashes; 67 const uint8_t *store; 68 uint32_t flags; 69 tee_mm_entry_t *mm; 70 TAILQ_ENTRY(tee_pager_area) link; 71 }; 72 73 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head = 74 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 75 76 /* 77 * struct tee_pager_pmem - Represents a physical page used for paging. 78 * 79 * @pgidx an index of the entry in tbl_info. The actual physical 80 * address is stored here so even if the page isn't mapped, 81 * there's always an MMU entry holding the physical address. 82 * 83 * @area a pointer to the pager area 84 */ 85 struct tee_pager_pmem { 86 unsigned pgidx; 87 struct tee_pager_area *area; 88 TAILQ_ENTRY(tee_pager_pmem) link; 89 }; 90 91 /* The list of physical pages. The first page in the list is the oldest */ 92 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 93 94 static struct tee_pager_pmem_head tee_pager_pmem_head = 95 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 96 97 static struct tee_pager_pmem_head tee_pager_rw_pmem_head = 98 TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head); 99 100 /* number of pages hidden */ 101 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 102 103 /* Number of registered physical pages, used hiding pages. */ 104 static size_t tee_pager_npages; 105 106 /* 107 * Reference to translation table used to map the virtual memory range 108 * covered by the pager. 109 */ 110 static struct core_mmu_table_info tbl_info; 111 112 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store, 113 const void *hashes) 114 { 115 struct tee_pager_area *area; 116 size_t tbl_va_size; 117 118 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 119 tee_mm_get_smem(mm), 120 tee_mm_get_smem(mm) + (mm->size << mm->pool->shift), 121 flags, store, hashes); 122 123 if (flags & TEE_PAGER_AREA_RO) 124 TEE_ASSERT(store && hashes); 125 else if (flags & TEE_PAGER_AREA_RW) 126 TEE_ASSERT(!store && !hashes); 127 else 128 panic(); 129 130 if (!tbl_info.num_entries) { 131 if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX, 132 &tbl_info)) 133 return false; 134 if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) { 135 DMSG("Unsupported page size in translation table %u", 136 1 << tbl_info.shift); 137 return false; 138 } 139 } 140 141 tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries; 142 if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 143 tbl_info.va_base, tbl_va_size)) { 144 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 145 tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 146 tbl_info.va_base, tbl_va_size); 147 return false; 148 } 149 150 151 152 area = malloc(sizeof(struct tee_pager_area)); 153 if (!area) 154 return false; 155 156 157 area->mm = mm; 158 area->flags = flags; 159 area->store = store; 160 area->hashes = hashes; 161 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 162 return true; 163 } 164 165 static struct tee_pager_area *tee_pager_find_area(vaddr_t va) 166 { 167 struct tee_pager_area *area; 168 169 TAILQ_FOREACH(area, &tee_pager_area_head, link) { 170 tee_mm_entry_t *mm = area->mm; 171 size_t offset = (va - mm->pool->lo) >> mm->pool->shift; 172 173 if (offset >= mm->offset && offset < (mm->offset + mm->size)) 174 return area; 175 } 176 return NULL; 177 } 178 179 static uint32_t get_area_mattr(struct tee_pager_area *area __unused) 180 { 181 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 182 TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE; 183 184 attr |= TEE_MATTR_PRWX; 185 186 return attr; 187 } 188 189 190 191 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va) 192 { 193 size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT; 194 195 if (area->store) { 196 size_t rel_pg_idx = pg_idx - area->mm->offset; 197 const void *stored_page = area->store + 198 rel_pg_idx * SMALL_PAGE_SIZE; 199 200 memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE); 201 } else { 202 memset((void *)page_va, 0, SMALL_PAGE_SIZE); 203 } 204 } 205 206 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va) 207 { 208 size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT; 209 210 if (area->store) { 211 size_t rel_pg_idx = pg_idx - area->mm->offset; 212 const void *hash = area->hashes + 213 rel_pg_idx * TEE_SHA256_HASH_SIZE; 214 215 if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) != 216 TEE_SUCCESS) { 217 EMSG("PH 0x%" PRIxVA " failed", page_va); 218 panic(); 219 } 220 } 221 } 222 223 static bool tee_pager_unhide_page(vaddr_t page_va) 224 { 225 struct tee_pager_pmem *pmem; 226 227 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 228 paddr_t pa; 229 uint32_t attr; 230 231 core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr); 232 233 if (!(attr & TEE_MATTR_HIDDEN_BLOCK)) 234 continue; 235 236 if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) { 237 /* page is hidden, show and move to back */ 238 core_mmu_set_entry(&tbl_info, pmem->pgidx, pa, 239 get_area_mattr(pmem->area)); 240 241 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 242 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 243 244 /* TODO only invalidate entry touched above */ 245 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 246 return true; 247 } 248 } 249 250 return false; 251 } 252 253 static void tee_pager_hide_pages(void) 254 { 255 struct tee_pager_pmem *pmem; 256 size_t n = 0; 257 258 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 259 paddr_t pa; 260 uint32_t attr; 261 262 if (n >= TEE_PAGER_NHIDE) 263 break; 264 n++; 265 core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr); 266 if (!(attr & TEE_MATTR_VALID_BLOCK)) 267 continue; 268 269 core_mmu_set_entry(&tbl_info, pmem->pgidx, pa, 270 TEE_MATTR_HIDDEN_BLOCK); 271 272 } 273 274 /* TODO only invalidate entries touched above */ 275 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 276 } 277 #endif /*CFG_WITH_PAGER*/ 278 279 #ifdef ARM32 280 /* Returns true if the exception originated from user mode */ 281 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai) 282 { 283 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 284 } 285 #endif /*ARM32*/ 286 287 #ifdef ARM64 288 /* Returns true if the exception originated from user mode */ 289 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai) 290 { 291 uint32_t spsr = ai->regs->spsr; 292 293 if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 294 return true; 295 if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 296 SPSR_64_MODE_EL0) 297 return true; 298 return false; 299 } 300 #endif /*ARM64*/ 301 302 #ifdef ARM32 303 /* Returns true if the exception originated from abort mode */ 304 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai) 305 { 306 return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT; 307 } 308 #endif /*ARM32*/ 309 310 #ifdef ARM64 311 /* Returns true if the exception originated from abort mode */ 312 static bool tee_pager_is_abort_in_abort_handler( 313 struct tee_pager_abort_info *ai __unused) 314 { 315 return false; 316 } 317 #endif /*ARM64*/ 318 319 static __unused const char *abort_type_to_str(uint32_t abort_type) 320 { 321 if (abort_type == THREAD_ABORT_DATA) 322 return "data"; 323 if (abort_type == THREAD_ABORT_PREFETCH) 324 return "prefetch"; 325 return "undef"; 326 } 327 328 static __unused void tee_pager_print_detailed_abort( 329 struct tee_pager_abort_info *ai __unused, 330 const char *ctx __unused) 331 { 332 EMSG_RAW("\n%s %s-abort at address 0x%" PRIxVA "\n", 333 ctx, abort_type_to_str(ai->abort_type), ai->va); 334 #ifdef ARM32 335 EMSG_RAW(" fsr 0x%08x ttbr0 0x%08x ttbr1 0x%08x cidr 0x%X\n", 336 ai->fault_descr, read_ttbr0(), read_ttbr1(), 337 read_contextidr()); 338 EMSG_RAW(" cpu #%zu cpsr 0x%08x\n", 339 get_core_pos(), ai->regs->spsr); 340 EMSG_RAW(" r0 0x%08x r4 0x%08x r8 0x%08x r12 0x%08x\n", 341 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip); 342 EMSG_RAW(" r1 0x%08x r5 0x%08x r9 0x%08x sp 0x%08x\n", 343 ai->regs->r1, ai->regs->r5, ai->regs->r9, 344 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK)); 345 EMSG_RAW(" r2 0x%08x r6 0x%08x r10 0x%08x lr 0x%08x\n", 346 ai->regs->r2, ai->regs->r6, ai->regs->r10, 347 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK)); 348 EMSG_RAW(" r3 0x%08x r7 0x%08x r11 0x%08x pc 0x%08x\n", 349 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc); 350 #endif /*ARM32*/ 351 #ifdef ARM64 352 EMSG_RAW(" esr 0x%08x ttbr0 0x%08" PRIx64 " ttbr1 0x%08" PRIx64 " cidr 0x%X\n", 353 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(), 354 read_contextidr_el1()); 355 EMSG_RAW(" cpu #%zu cpsr 0x%08x\n", 356 get_core_pos(), (uint32_t)ai->regs->spsr); 357 EMSG_RAW("x0 %016" PRIx64 " x1 %016" PRIx64, 358 ai->regs->x0, ai->regs->x1); 359 EMSG_RAW("x2 %016" PRIx64 " x3 %016" PRIx64, 360 ai->regs->x2, ai->regs->x3); 361 EMSG_RAW("x4 %016" PRIx64 " x5 %016" PRIx64, 362 ai->regs->x4, ai->regs->x5); 363 EMSG_RAW("x6 %016" PRIx64 " x7 %016" PRIx64, 364 ai->regs->x6, ai->regs->x7); 365 EMSG_RAW("x8 %016" PRIx64 " x9 %016" PRIx64, 366 ai->regs->x8, ai->regs->x9); 367 EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64, 368 ai->regs->x10, ai->regs->x11); 369 EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64, 370 ai->regs->x12, ai->regs->x13); 371 EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64, 372 ai->regs->x14, ai->regs->x15); 373 EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64, 374 ai->regs->x16, ai->regs->x17); 375 EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64, 376 ai->regs->x18, ai->regs->x19); 377 EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64, 378 ai->regs->x20, ai->regs->x21); 379 EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64, 380 ai->regs->x22, ai->regs->x23); 381 EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64, 382 ai->regs->x24, ai->regs->x25); 383 EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64, 384 ai->regs->x26, ai->regs->x27); 385 EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64, 386 ai->regs->x28, ai->regs->x29); 387 EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64, 388 ai->regs->x30, ai->regs->elr); 389 EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0); 390 #endif /*ARM64*/ 391 } 392 393 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused) 394 { 395 #ifdef CFG_TEE_CORE_TA_TRACE 396 tee_pager_print_detailed_abort(ai, "user TA"); 397 tee_ta_dump_current(); 398 #endif 399 } 400 401 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused) 402 { 403 #if (TRACE_LEVEL >= TRACE_DEBUG) 404 tee_pager_print_detailed_abort(ai, "core"); 405 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/ 406 } 407 408 static void tee_pager_print_error_abort( 409 struct tee_pager_abort_info *ai __unused) 410 { 411 #if (TRACE_LEVEL >= TRACE_DEBUG) 412 /* full verbose log at DEBUG level */ 413 tee_pager_print_detailed_abort(ai, "core"); 414 #else 415 #ifdef ARM32 416 EMSG("%s-abort at 0x%" PRIxVA "\n" 417 "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n" 418 "CPUID 0x%x CPSR 0x%x (read from SPSR)", 419 abort_type_to_str(ai->abort_type), 420 ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(), 421 read_mpidr(), read_spsr()); 422 #endif /*ARM32*/ 423 #ifdef ARM64 424 EMSG("%s-abort at 0x%" PRIxVA "\n" 425 "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n" 426 "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)", 427 abort_type_to_str(ai->abort_type), 428 ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(), 429 read_contextidr_el1(), 430 read_mpidr_el1(), (uint32_t)ai->regs->spsr); 431 #endif /*ARM64*/ 432 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/ 433 } 434 435 static enum tee_pager_fault_type tee_pager_get_fault_type( 436 struct tee_pager_abort_info *ai) 437 { 438 439 /* In case of multithreaded version, this section must be protected */ 440 if (tee_pager_is_user_exception(ai)) { 441 tee_pager_print_user_abort(ai); 442 DMSG("[TEE_PAGER] abort in User mode (TA will panic)"); 443 return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC; 444 } 445 446 if (tee_pager_is_abort_in_abort_handler(ai)) { 447 tee_pager_print_error_abort(ai); 448 EMSG("[PAGER] abort in abort handler (trap CPU)"); 449 panic(); 450 } 451 452 if (ai->abort_type == THREAD_ABORT_UNDEF) { 453 tee_pager_print_error_abort(ai); 454 EMSG("[TEE_PAGER] undefined abort (trap CPU)"); 455 panic(); 456 } 457 458 switch (core_mmu_get_fault_type(ai->fault_descr)) { 459 case CORE_MMU_FAULT_ALIGNMENT: 460 tee_pager_print_error_abort(ai); 461 EMSG("[TEE_PAGER] alignement fault! (trap CPU)"); 462 panic(); 463 break; 464 465 case CORE_MMU_FAULT_DEBUG_EVENT: 466 tee_pager_print_abort(ai); 467 DMSG("[TEE_PAGER] Ignoring debug event!"); 468 return TEE_PAGER_FAULT_TYPE_IGNORE; 469 470 case CORE_MMU_FAULT_TRANSLATION: 471 case CORE_MMU_FAULT_PERMISSION: 472 return TEE_PAGER_FAULT_TYPE_PAGEABLE; 473 474 case CORE_MMU_FAULT_ASYNC_EXTERNAL: 475 tee_pager_print_abort(ai); 476 DMSG("[TEE_PAGER] Ignoring async external abort!"); 477 return TEE_PAGER_FAULT_TYPE_IGNORE; 478 479 case CORE_MMU_FAULT_OTHER: 480 default: 481 tee_pager_print_abort(ai); 482 DMSG("[TEE_PAGER] Unhandled fault!"); 483 return TEE_PAGER_FAULT_TYPE_IGNORE; 484 } 485 } 486 487 488 #ifdef CFG_WITH_PAGER 489 490 /* Finds the oldest page and remaps it for the new virtual address */ 491 static struct tee_pager_pmem *tee_pager_get_page( 492 struct tee_pager_abort_info *ai, 493 struct tee_pager_area *area) 494 { 495 unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va); 496 struct tee_pager_pmem *pmem; 497 paddr_t pa; 498 uint32_t attr; 499 500 core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr); 501 502 assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK))); 503 504 if (attr & TEE_MATTR_PHYS_BLOCK) { 505 /* 506 * There's an pmem entry using this mmu entry, let's use 507 * that entry in the new mapping. 508 */ 509 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 510 if (pmem->pgidx == pgidx) 511 break; 512 } 513 if (!pmem) { 514 tee_pager_print_abort(ai); 515 DMSG("Couldn't find pmem for pgidx %u", pgidx); 516 panic(); 517 } 518 } else { 519 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 520 if (!pmem) { 521 tee_pager_print_abort(ai); 522 DMSG("No pmem entries"); 523 panic(); 524 } 525 core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr); 526 core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0); 527 } 528 529 pmem->pgidx = pgidx; 530 pmem->area = area; 531 core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area)); 532 533 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 534 if (area->store) { 535 /* move page to back */ 536 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 537 } else { 538 /* Move page to rw list */ 539 TEE_ASSERT(tee_pager_npages > 0); 540 tee_pager_npages--; 541 TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link); 542 } 543 544 /* TODO only invalidate entries touched above */ 545 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 546 547 #ifdef TEE_PAGER_DEBUG_PRINT 548 DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa); 549 #endif 550 551 return pmem; 552 } 553 554 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai) 555 { 556 struct tee_pager_area *area; 557 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 558 559 #ifdef TEE_PAGER_DEBUG_PRINT 560 tee_pager_print_abort(ai); 561 #endif 562 563 /* check if the access is valid */ 564 area = tee_pager_find_area(ai->va); 565 if (!area) { 566 tee_pager_print_abort(ai); 567 DMSG("Invalid addr 0x%" PRIxVA, ai->va); 568 panic(); 569 } 570 571 if (!tee_pager_unhide_page(page_va)) { 572 /* the page wasn't hidden */ 573 tee_pager_get_page(ai, area); 574 575 /* load page code & data */ 576 tee_pager_load_page(area, page_va); 577 /* TODO remap readonly if TEE_PAGER_AREA_RO */ 578 tee_pager_verify_page(area, page_va); 579 /* TODO remap executable if TEE_PAGER_AREA_X */ 580 581 if (area->flags & TEE_PAGER_AREA_X) { 582 cache_maintenance_l1(DCACHE_AREA_CLEAN, 583 (void *)page_va, SMALL_PAGE_SIZE); 584 585 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, 586 (void *)page_va, SMALL_PAGE_SIZE); 587 } 588 } 589 590 tee_pager_hide_pages(); 591 /* end protect (multithreded version) */ 592 } 593 594 #else /*CFG_WITH_PAGER*/ 595 596 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai) 597 { 598 /* 599 * Until PAGER is supported, trap CPU here. 600 */ 601 tee_pager_print_error_abort(ai); 602 EMSG("Unexpected page fault! Trap CPU"); 603 panic(); 604 } 605 606 #endif /*CFG_WITH_PAGER*/ 607 608 #ifdef ARM32 609 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs, 610 struct tee_pager_abort_info *ai) 611 { 612 switch (abort_type) { 613 case THREAD_ABORT_DATA: 614 ai->fault_descr = read_dfsr(); 615 ai->va = read_dfar(); 616 break; 617 case THREAD_ABORT_PREFETCH: 618 ai->fault_descr = read_ifsr(); 619 ai->va = read_ifar(); 620 break; 621 default: 622 ai->fault_descr = 0; 623 ai->va = regs->elr; 624 break; 625 } 626 ai->abort_type = abort_type; 627 ai->pc = regs->elr; 628 ai->regs = regs; 629 } 630 #endif /*ARM32*/ 631 632 #ifdef ARM64 633 static void set_abort_info(uint32_t abort_type __unused, 634 struct thread_abort_regs *regs, struct tee_pager_abort_info *ai) 635 { 636 ai->fault_descr = read_esr_el1(); 637 switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) { 638 case ESR_EC_IABT_EL0: 639 case ESR_EC_IABT_EL1: 640 ai->abort_type = THREAD_ABORT_PREFETCH; 641 ai->va = read_far_el1(); 642 break; 643 case ESR_EC_DABT_EL0: 644 case ESR_EC_DABT_EL1: 645 case ESR_EC_SP_ALIGN: 646 ai->abort_type = THREAD_ABORT_DATA; 647 ai->va = read_far_el1(); 648 break; 649 default: 650 ai->abort_type = THREAD_ABORT_UNDEF; 651 ai->va = regs->elr; 652 } 653 ai->pc = regs->elr; 654 ai->regs = regs; 655 } 656 #endif /*ARM64*/ 657 658 #ifdef ARM32 659 static void handle_user_ta_panic(struct tee_pager_abort_info *ai) 660 { 661 /* 662 * It was a user exception, stop user execution and return 663 * to TEE Core. 664 */ 665 ai->regs->r0 = TEE_ERROR_TARGET_DEAD; 666 ai->regs->r1 = true; 667 ai->regs->r2 = 0xdeadbeef; 668 ai->regs->elr = (uint32_t)thread_unwind_user_mode; 669 ai->regs->spsr = read_cpsr(); 670 ai->regs->spsr &= ~CPSR_MODE_MASK; 671 ai->regs->spsr |= CPSR_MODE_SVC; 672 ai->regs->spsr &= ~CPSR_FIA; 673 ai->regs->spsr |= read_spsr() & CPSR_FIA; 674 /* Select Thumb or ARM mode */ 675 if (ai->regs->elr & 1) 676 ai->regs->spsr |= CPSR_T; 677 else 678 ai->regs->spsr &= ~CPSR_T; 679 } 680 #endif /*ARM32*/ 681 682 #ifdef ARM64 683 static void handle_user_ta_panic(struct tee_pager_abort_info *ai) 684 { 685 uint32_t daif; 686 687 /* 688 * It was a user exception, stop user execution and return 689 * to TEE Core. 690 */ 691 ai->regs->x0 = TEE_ERROR_TARGET_DEAD; 692 ai->regs->x1 = true; 693 ai->regs->x2 = 0xdeadbeef; 694 ai->regs->elr = (vaddr_t)thread_unwind_user_mode; 695 ai->regs->sp_el0 = thread_get_saved_thread_sp(); 696 697 daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK; 698 /* XXX what about DAIF_D? */ 699 ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif); 700 } 701 #endif /*ARM64*/ 702 703 void tee_pager_abort_handler(uint32_t abort_type, 704 struct thread_abort_regs *regs) 705 { 706 struct tee_pager_abort_info ai; 707 708 set_abort_info(abort_type, regs, &ai); 709 710 switch (tee_pager_get_fault_type(&ai)) { 711 case TEE_PAGER_FAULT_TYPE_IGNORE: 712 break; 713 case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC: 714 handle_user_ta_panic(&ai); 715 break; 716 case TEE_PAGER_FAULT_TYPE_PAGEABLE: 717 default: 718 tee_pager_handle_fault(&ai); 719 break; 720 } 721 } 722 723 #ifdef CFG_WITH_PAGER 724 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 725 { 726 size_t n; 727 728 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 729 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 730 731 /* setup memory */ 732 for (n = 0; n < npages; n++) { 733 struct tee_pager_pmem *pmem; 734 tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 735 unsigned pgidx = core_mmu_va2idx(&tbl_info, va); 736 paddr_t pa; 737 uint32_t attr; 738 739 core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr); 740 741 /* Ignore unmapped pages/blocks */ 742 if (!(attr & TEE_MATTR_VALID_BLOCK)) 743 continue; 744 745 pmem = malloc(sizeof(struct tee_pager_pmem)); 746 if (pmem == NULL) { 747 DMSG("Can't allocate memory"); 748 panic(); 749 } 750 751 pmem->pgidx = pgidx; 752 pmem->area = NULL; 753 754 if (unmap) { 755 /* 756 * Note that we're making the page inaccessible 757 * with the TEE_MATTR_PHYS_BLOCK attribute to 758 * indicate that the descriptor still holds a valid 759 * physical address of a page. 760 */ 761 core_mmu_set_entry(&tbl_info, pgidx, pa, 762 TEE_MATTR_PHYS_BLOCK); 763 } 764 tee_pager_npages++; 765 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 766 } 767 768 if (unmap) { 769 /* Invalidate secure TLB */ 770 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 771 } 772 } 773 #endif /*CFG_WITH_PAGER*/ 774