1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <io.h> 10 #include <keep.h> 11 #include <kernel/abort.h> 12 #include <kernel/asan.h> 13 #include <kernel/cache_helpers.h> 14 #include <kernel/panic.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tee_ta_manager.h> 18 #include <kernel/thread.h> 19 #include <kernel/tlb_helpers.h> 20 #include <kernel/user_mode_ctx.h> 21 #include <mm/core_memprot.h> 22 #include <mm/fobj.h> 23 #include <mm/tee_mm.h> 24 #include <mm/tee_pager.h> 25 #include <stdlib.h> 26 #include <sys/queue.h> 27 #include <tee_api_defines.h> 28 #include <trace.h> 29 #include <types_ext.h> 30 #include <utee_defines.h> 31 #include <util.h> 32 33 34 static struct tee_pager_area_head tee_pager_area_head = 35 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 36 37 #define INVALID_PGIDX UINT_MAX 38 #define PMEM_FLAG_DIRTY BIT(0) 39 #define PMEM_FLAG_HIDDEN BIT(1) 40 41 /* 42 * struct tee_pager_pmem - Represents a physical page used for paging. 43 * 44 * @flags flags defined by PMEM_FLAG_* above 45 * @fobj_pgidx index of the page in the @fobj 46 * @fobj File object of which a page is made visible. 47 * @va_alias Virtual address where the physical page always is aliased. 48 * Used during remapping of the page when the content need to 49 * be updated before it's available at the new location. 50 */ 51 struct tee_pager_pmem { 52 unsigned int flags; 53 unsigned int fobj_pgidx; 54 struct fobj *fobj; 55 void *va_alias; 56 TAILQ_ENTRY(tee_pager_pmem) link; 57 }; 58 59 /* The list of physical pages. The first page in the list is the oldest */ 60 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 61 62 static struct tee_pager_pmem_head tee_pager_pmem_head = 63 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 64 65 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 66 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 67 68 /* number of pages hidden */ 69 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 70 71 /* Number of registered physical pages, used hiding pages. */ 72 static size_t tee_pager_npages; 73 74 #ifdef CFG_WITH_STATS 75 static struct tee_pager_stats pager_stats; 76 77 static inline void incr_ro_hits(void) 78 { 79 pager_stats.ro_hits++; 80 } 81 82 static inline void incr_rw_hits(void) 83 { 84 pager_stats.rw_hits++; 85 } 86 87 static inline void incr_hidden_hits(void) 88 { 89 pager_stats.hidden_hits++; 90 } 91 92 static inline void incr_zi_released(void) 93 { 94 pager_stats.zi_released++; 95 } 96 97 static inline void incr_npages_all(void) 98 { 99 pager_stats.npages_all++; 100 } 101 102 static inline void set_npages(void) 103 { 104 pager_stats.npages = tee_pager_npages; 105 } 106 107 void tee_pager_get_stats(struct tee_pager_stats *stats) 108 { 109 *stats = pager_stats; 110 111 pager_stats.hidden_hits = 0; 112 pager_stats.ro_hits = 0; 113 pager_stats.rw_hits = 0; 114 pager_stats.zi_released = 0; 115 } 116 117 #else /* CFG_WITH_STATS */ 118 static inline void incr_ro_hits(void) { } 119 static inline void incr_rw_hits(void) { } 120 static inline void incr_hidden_hits(void) { } 121 static inline void incr_zi_released(void) { } 122 static inline void incr_npages_all(void) { } 123 static inline void set_npages(void) { } 124 125 void tee_pager_get_stats(struct tee_pager_stats *stats) 126 { 127 memset(stats, 0, sizeof(struct tee_pager_stats)); 128 } 129 #endif /* CFG_WITH_STATS */ 130 131 #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE) 132 #define TBL_LEVEL CORE_MMU_PGDIR_LEVEL 133 #define TBL_SHIFT SMALL_PAGE_SHIFT 134 135 #define EFFECTIVE_VA_SIZE \ 136 (ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \ 137 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE)) 138 139 static struct pager_table { 140 struct pgt pgt; 141 struct core_mmu_table_info tbl_info; 142 } *pager_tables; 143 static unsigned int num_pager_tables; 144 145 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 146 147 /* Defines the range of the alias area */ 148 static tee_mm_entry_t *pager_alias_area; 149 /* 150 * Physical pages are added in a stack like fashion to the alias area, 151 * @pager_alias_next_free gives the address of next free entry if 152 * @pager_alias_next_free is != 0 153 */ 154 static uintptr_t pager_alias_next_free; 155 156 #ifdef CFG_TEE_CORE_DEBUG 157 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai) 158 159 static uint32_t pager_lock_dldetect(const char *func, const int line, 160 struct abort_info *ai) 161 { 162 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 163 unsigned int retries = 0; 164 unsigned int reminder = 0; 165 166 while (!cpu_spin_trylock(&pager_spinlock)) { 167 retries++; 168 if (!retries) { 169 /* wrapped, time to report */ 170 trace_printf(func, line, TRACE_ERROR, true, 171 "possible spinlock deadlock reminder %u", 172 reminder); 173 if (reminder < UINT_MAX) 174 reminder++; 175 if (ai) 176 abort_print(ai); 177 } 178 } 179 180 return exceptions; 181 } 182 #else 183 static uint32_t pager_lock(struct abort_info __unused *ai) 184 { 185 return cpu_spin_lock_xsave(&pager_spinlock); 186 } 187 #endif 188 189 static uint32_t pager_lock_check_stack(size_t stack_size) 190 { 191 if (stack_size) { 192 int8_t buf[stack_size]; 193 size_t n; 194 195 /* 196 * Make sure to touch all pages of the stack that we expect 197 * to use with this lock held. We need to take eventual 198 * page faults before the lock is taken or we'll deadlock 199 * the pager. The pages that are populated in this way will 200 * eventually be released at certain save transitions of 201 * the thread. 202 */ 203 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) 204 io_write8((vaddr_t)buf + n, 1); 205 io_write8((vaddr_t)buf + stack_size - 1, 1); 206 } 207 208 return pager_lock(NULL); 209 } 210 211 static void pager_unlock(uint32_t exceptions) 212 { 213 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 214 } 215 216 void *tee_pager_phys_to_virt(paddr_t pa) 217 { 218 struct core_mmu_table_info ti; 219 unsigned idx; 220 uint32_t a; 221 paddr_t p; 222 vaddr_t v; 223 size_t n; 224 225 /* 226 * Most addresses are mapped lineary, try that first if possible. 227 */ 228 if (!tee_pager_get_table_info(pa, &ti)) 229 return NULL; /* impossible pa */ 230 idx = core_mmu_va2idx(&ti, pa); 231 core_mmu_get_entry(&ti, idx, &p, &a); 232 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 233 return (void *)core_mmu_idx2va(&ti, idx); 234 235 n = 0; 236 idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START); 237 while (true) { 238 while (idx < TBL_NUM_ENTRIES) { 239 v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx); 240 if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE)) 241 return NULL; 242 243 core_mmu_get_entry(&pager_tables[n].tbl_info, 244 idx, &p, &a); 245 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 246 return (void *)v; 247 idx++; 248 } 249 250 n++; 251 if (n >= num_pager_tables) 252 return NULL; 253 idx = 0; 254 } 255 256 return NULL; 257 } 258 259 static bool pmem_is_hidden(struct tee_pager_pmem *pmem) 260 { 261 return pmem->flags & PMEM_FLAG_HIDDEN; 262 } 263 264 static bool pmem_is_dirty(struct tee_pager_pmem *pmem) 265 { 266 return pmem->flags & PMEM_FLAG_DIRTY; 267 } 268 269 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem, 270 struct tee_pager_area *area) 271 { 272 if (pmem->fobj != area->fobj) 273 return false; 274 if (pmem->fobj_pgidx < area->fobj_pgoffs) 275 return false; 276 if ((pmem->fobj_pgidx - area->fobj_pgoffs) >= 277 (area->size >> SMALL_PAGE_SHIFT)) 278 return false; 279 280 return true; 281 } 282 283 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem, 284 struct tee_pager_area *area) 285 { 286 size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT; 287 288 return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs; 289 } 290 291 static struct pager_table *find_pager_table_may_fail(vaddr_t va) 292 { 293 size_t n; 294 const vaddr_t mask = CORE_MMU_PGDIR_MASK; 295 296 if (!pager_tables) 297 return NULL; 298 299 n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >> 300 CORE_MMU_PGDIR_SHIFT; 301 if (n >= num_pager_tables) 302 return NULL; 303 304 assert(va >= pager_tables[n].tbl_info.va_base && 305 va <= (pager_tables[n].tbl_info.va_base | mask)); 306 307 return pager_tables + n; 308 } 309 310 static struct pager_table *find_pager_table(vaddr_t va) 311 { 312 struct pager_table *pt = find_pager_table_may_fail(va); 313 314 assert(pt); 315 return pt; 316 } 317 318 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti) 319 { 320 struct pager_table *pt = find_pager_table_may_fail(va); 321 322 if (!pt) 323 return false; 324 325 *ti = pt->tbl_info; 326 return true; 327 } 328 329 static struct core_mmu_table_info *find_table_info(vaddr_t va) 330 { 331 return &find_pager_table(va)->tbl_info; 332 } 333 334 static struct pgt *find_core_pgt(vaddr_t va) 335 { 336 return &find_pager_table(va)->pgt; 337 } 338 339 void tee_pager_set_alias_area(tee_mm_entry_t *mm) 340 { 341 struct pager_table *pt; 342 unsigned idx; 343 vaddr_t smem = tee_mm_get_smem(mm); 344 size_t nbytes = tee_mm_get_bytes(mm); 345 vaddr_t v; 346 uint32_t a = 0; 347 348 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 349 350 assert(!pager_alias_area); 351 pager_alias_area = mm; 352 pager_alias_next_free = smem; 353 354 /* Clear all mapping in the alias area */ 355 pt = find_pager_table(smem); 356 idx = core_mmu_va2idx(&pt->tbl_info, smem); 357 while (pt <= (pager_tables + num_pager_tables - 1)) { 358 while (idx < TBL_NUM_ENTRIES) { 359 v = core_mmu_idx2va(&pt->tbl_info, idx); 360 if (v >= (smem + nbytes)) 361 goto out; 362 363 core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a); 364 core_mmu_set_entry(&pt->tbl_info, idx, 0, 0); 365 if (a & TEE_MATTR_VALID_BLOCK) 366 pgt_dec_used_entries(&pt->pgt); 367 idx++; 368 } 369 370 pt++; 371 idx = 0; 372 } 373 374 out: 375 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); 376 } 377 378 static size_t tbl_usage_count(struct core_mmu_table_info *ti) 379 { 380 size_t n; 381 uint32_t a = 0; 382 size_t usage = 0; 383 384 for (n = 0; n < ti->num_entries; n++) { 385 core_mmu_get_entry(ti, n, NULL, &a); 386 if (a & TEE_MATTR_VALID_BLOCK) 387 usage++; 388 } 389 return usage; 390 } 391 392 static void area_get_entry(struct tee_pager_area *area, size_t idx, 393 paddr_t *pa, uint32_t *attr) 394 { 395 assert(area->pgt); 396 assert(idx < TBL_NUM_ENTRIES); 397 core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr); 398 } 399 400 static void area_set_entry(struct tee_pager_area *area, size_t idx, 401 paddr_t pa, uint32_t attr) 402 { 403 assert(area->pgt); 404 assert(idx < TBL_NUM_ENTRIES); 405 core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr); 406 } 407 408 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va) 409 { 410 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT; 411 } 412 413 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx) 414 { 415 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK); 416 } 417 418 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx) 419 { 420 vaddr_t va = area_idx2va(area, idx); 421 422 #if defined(CFG_PAGED_USER_TA) 423 assert(area->pgt); 424 if (area->pgt->ctx) { 425 uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid; 426 427 tlbi_mva_asid(va, asid); 428 return; 429 } 430 #endif 431 tlbi_mva_allasid(va); 432 } 433 434 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt) 435 { 436 struct tee_pager_area *area = NULL; 437 size_t tblidx = 0; 438 uint32_t a = 0; 439 440 TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) { 441 /* 442 * If only_this_pgt points to a pgt then the pgt of this 443 * area has to match or we'll skip over it. 444 */ 445 if (only_this_pgt && area->pgt != only_this_pgt) 446 continue; 447 if (!area->pgt || !pmem_is_covered_by_area(pmem, area)) 448 continue; 449 tblidx = pmem_get_area_tblidx(pmem, area); 450 area_get_entry(area, tblidx, NULL, &a); 451 if (a & TEE_MATTR_VALID_BLOCK) { 452 area_set_entry(area, tblidx, 0, 0); 453 pgt_dec_used_entries(area->pgt); 454 area_tlbi_entry(area, tblidx); 455 } 456 } 457 } 458 459 void tee_pager_early_init(void) 460 { 461 size_t n = 0; 462 463 num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE; 464 pager_tables = calloc(num_pager_tables, sizeof(*pager_tables)); 465 if (!pager_tables) 466 panic("Cannot allocate pager_tables"); 467 468 /* 469 * Note that this depends on add_pager_vaspace() adding vaspace 470 * after end of memory. 471 */ 472 for (n = 0; n < num_pager_tables; n++) { 473 if (!core_mmu_find_table(NULL, TEE_RAM_VA_START + 474 n * CORE_MMU_PGDIR_SIZE, UINT_MAX, 475 &pager_tables[n].tbl_info)) 476 panic("can't find mmu tables"); 477 478 if (pager_tables[n].tbl_info.shift != TBL_SHIFT) 479 panic("Unsupported page size in translation table"); 480 assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES); 481 assert(pager_tables[n].tbl_info.level == TBL_LEVEL); 482 483 pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table; 484 pgt_set_used_entries(&pager_tables[n].pgt, 485 tbl_usage_count(&pager_tables[n].tbl_info)); 486 } 487 } 488 489 static void *pager_add_alias_page(paddr_t pa) 490 { 491 unsigned idx; 492 struct core_mmu_table_info *ti; 493 /* Alias pages mapped without write permission: runtime will care */ 494 uint32_t attr = TEE_MATTR_VALID_BLOCK | 495 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 496 TEE_MATTR_SECURE | TEE_MATTR_PR; 497 498 DMSG("0x%" PRIxPA, pa); 499 500 ti = find_table_info(pager_alias_next_free); 501 idx = core_mmu_va2idx(ti, pager_alias_next_free); 502 core_mmu_set_entry(ti, idx, pa, attr); 503 pgt_inc_used_entries(find_core_pgt(pager_alias_next_free)); 504 pager_alias_next_free += SMALL_PAGE_SIZE; 505 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 506 tee_mm_get_bytes(pager_alias_area))) 507 pager_alias_next_free = 0; 508 return (void *)core_mmu_idx2va(ti, idx); 509 } 510 511 static void area_insert(struct tee_pager_area_head *head, 512 struct tee_pager_area *area, 513 struct tee_pager_area *a_prev) 514 { 515 uint32_t exceptions = pager_lock_check_stack(8); 516 517 if (a_prev) 518 TAILQ_INSERT_AFTER(head, a_prev, area, link); 519 else 520 TAILQ_INSERT_HEAD(head, area, link); 521 TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link); 522 523 pager_unlock(exceptions); 524 } 525 KEEP_PAGER(area_insert); 526 527 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type, 528 struct fobj *fobj) 529 { 530 struct tee_pager_area *area = NULL; 531 uint32_t flags = 0; 532 size_t fobj_pgoffs = 0; 533 vaddr_t b = base; 534 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 535 size_t s2 = 0; 536 537 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type); 538 539 if (base & SMALL_PAGE_MASK || !s) { 540 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s); 541 panic(); 542 } 543 544 switch (type) { 545 case PAGER_AREA_TYPE_RO: 546 flags = TEE_MATTR_PRX; 547 break; 548 case PAGER_AREA_TYPE_RW: 549 case PAGER_AREA_TYPE_LOCK: 550 flags = TEE_MATTR_PRW; 551 break; 552 default: 553 panic(); 554 } 555 556 if (!fobj) 557 panic(); 558 559 while (s) { 560 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 561 area = calloc(1, sizeof(*area)); 562 if (!area) 563 panic("alloc_area"); 564 565 area->fobj = fobj_get(fobj); 566 area->fobj_pgoffs = fobj_pgoffs; 567 area->type = type; 568 area->pgt = find_core_pgt(b); 569 area->base = b; 570 area->size = s2; 571 area->flags = flags; 572 area_insert(&tee_pager_area_head, area, NULL); 573 574 b += s2; 575 s -= s2; 576 fobj_pgoffs += s2 / SMALL_PAGE_SIZE; 577 } 578 } 579 580 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas, 581 vaddr_t va) 582 { 583 struct tee_pager_area *area; 584 585 if (!areas) 586 return NULL; 587 588 TAILQ_FOREACH(area, areas, link) { 589 if (core_is_buffer_inside(va, 1, area->base, area->size)) 590 return area; 591 } 592 return NULL; 593 } 594 595 #ifdef CFG_PAGED_USER_TA 596 static struct tee_pager_area *find_uta_area(vaddr_t va) 597 { 598 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx; 599 600 if (!is_user_mode_ctx(ctx)) 601 return NULL; 602 return find_area(to_user_mode_ctx(ctx)->areas, va); 603 } 604 #else 605 static struct tee_pager_area *find_uta_area(vaddr_t va __unused) 606 { 607 return NULL; 608 } 609 #endif /*CFG_PAGED_USER_TA*/ 610 611 612 static uint32_t get_area_mattr(uint32_t area_flags) 613 { 614 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 615 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 616 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 617 618 return attr; 619 } 620 621 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 622 { 623 struct core_mmu_table_info *ti; 624 paddr_t pa; 625 unsigned idx; 626 627 ti = find_table_info((vaddr_t)pmem->va_alias); 628 idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias); 629 core_mmu_get_entry(ti, idx, &pa, NULL); 630 return pa; 631 } 632 633 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 634 void *va_alias) 635 { 636 size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) + 637 area->fobj_pgoffs; 638 struct core_mmu_table_info *ti; 639 uint32_t attr_alias; 640 paddr_t pa_alias; 641 unsigned int idx_alias; 642 643 /* Insure we are allowed to write to aliased virtual page */ 644 ti = find_table_info((vaddr_t)va_alias); 645 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 646 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 647 if (!(attr_alias & TEE_MATTR_PW)) { 648 attr_alias |= TEE_MATTR_PW; 649 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 650 tlbi_mva_allasid((vaddr_t)va_alias); 651 } 652 653 asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE); 654 if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) { 655 EMSG("PH 0x%" PRIxVA " failed", page_va); 656 panic(); 657 } 658 switch (area->type) { 659 case PAGER_AREA_TYPE_RO: 660 incr_ro_hits(); 661 /* Forbid write to aliases for read-only (maybe exec) pages */ 662 attr_alias &= ~TEE_MATTR_PW; 663 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 664 tlbi_mva_allasid((vaddr_t)va_alias); 665 break; 666 case PAGER_AREA_TYPE_RW: 667 incr_rw_hits(); 668 break; 669 case PAGER_AREA_TYPE_LOCK: 670 break; 671 default: 672 panic(); 673 } 674 asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE); 675 } 676 677 static void tee_pager_save_page(struct tee_pager_pmem *pmem) 678 { 679 if (pmem_is_dirty(pmem)) { 680 asan_tag_access(pmem->va_alias, 681 (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE); 682 if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx, 683 pmem->va_alias)) 684 panic("fobj_save_page"); 685 asan_tag_no_access(pmem->va_alias, 686 (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE); 687 } 688 } 689 690 #ifdef CFG_PAGED_USER_TA 691 static void unlink_area(struct tee_pager_area_head *area_head, 692 struct tee_pager_area *area) 693 { 694 uint32_t exceptions = pager_lock_check_stack(64); 695 696 TAILQ_REMOVE(area_head, area, link); 697 TAILQ_REMOVE(&area->fobj->areas, area, fobj_link); 698 699 pager_unlock(exceptions); 700 } 701 KEEP_PAGER(unlink_area); 702 703 static void free_area(struct tee_pager_area *area) 704 { 705 fobj_put(area->fobj); 706 free(area); 707 } 708 709 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base, 710 struct fobj *fobj, uint32_t prot) 711 { 712 struct tee_pager_area *a_prev = NULL; 713 struct tee_pager_area *area = NULL; 714 vaddr_t b = base; 715 size_t fobj_pgoffs = 0; 716 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 717 718 if (!uctx->areas) { 719 uctx->areas = malloc(sizeof(*uctx->areas)); 720 if (!uctx->areas) 721 return TEE_ERROR_OUT_OF_MEMORY; 722 TAILQ_INIT(uctx->areas); 723 } 724 725 area = TAILQ_FIRST(uctx->areas); 726 while (area) { 727 if (core_is_buffer_intersect(b, s, area->base, 728 area->size)) 729 return TEE_ERROR_BAD_PARAMETERS; 730 if (b < area->base) 731 break; 732 a_prev = area; 733 area = TAILQ_NEXT(area, link); 734 } 735 736 while (s) { 737 size_t s2; 738 739 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 740 area = calloc(1, sizeof(*area)); 741 if (!area) 742 return TEE_ERROR_OUT_OF_MEMORY; 743 744 /* Table info will be set when the context is activated. */ 745 area->fobj = fobj_get(fobj); 746 area->fobj_pgoffs = fobj_pgoffs; 747 area->type = PAGER_AREA_TYPE_RW; 748 area->base = b; 749 area->size = s2; 750 area->flags = prot; 751 752 area_insert(uctx->areas, area, a_prev); 753 754 a_prev = area; 755 b += s2; 756 s -= s2; 757 fobj_pgoffs += s2 / SMALL_PAGE_SIZE; 758 } 759 760 return TEE_SUCCESS; 761 } 762 763 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base, 764 struct fobj *fobj, uint32_t prot) 765 { 766 TEE_Result res = TEE_SUCCESS; 767 struct thread_specific_data *tsd = thread_get_tsd(); 768 struct tee_pager_area *area = NULL; 769 struct core_mmu_table_info dir_info = { NULL }; 770 771 if (&uctx->ctx != tsd->ctx) { 772 /* 773 * Changes are to an utc that isn't active. Just add the 774 * areas page tables will be dealt with later. 775 */ 776 return pager_add_um_area(uctx, base, fobj, prot); 777 } 778 779 /* 780 * Assign page tables before adding areas to be able to tell which 781 * are newly added and should be removed in case of failure. 782 */ 783 tee_pager_assign_um_tables(uctx); 784 res = pager_add_um_area(uctx, base, fobj, prot); 785 if (res) { 786 struct tee_pager_area *next_a; 787 788 /* Remove all added areas */ 789 TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) { 790 if (!area->pgt) { 791 unlink_area(uctx->areas, area); 792 free_area(area); 793 } 794 } 795 return res; 796 } 797 798 /* 799 * Assign page tables to the new areas and make sure that the page 800 * tables are registered in the upper table. 801 */ 802 tee_pager_assign_um_tables(uctx); 803 core_mmu_get_user_pgdir(&dir_info); 804 TAILQ_FOREACH(area, uctx->areas, link) { 805 paddr_t pa; 806 size_t idx; 807 uint32_t attr; 808 809 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase); 810 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 811 812 /* 813 * Check if the page table already is used, if it is, it's 814 * already registered. 815 */ 816 if (area->pgt->num_used_entries) { 817 assert(attr & TEE_MATTR_TABLE); 818 assert(pa == virt_to_phys(area->pgt->tbl)); 819 continue; 820 } 821 822 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 823 pa = virt_to_phys(area->pgt->tbl); 824 assert(pa); 825 /* 826 * Note that the update of the table entry is guaranteed to 827 * be atomic. 828 */ 829 core_mmu_set_entry(&dir_info, idx, pa, attr); 830 } 831 832 return TEE_SUCCESS; 833 } 834 835 static void split_area(struct tee_pager_area_head *area_head, 836 struct tee_pager_area *area, struct tee_pager_area *a2, 837 vaddr_t va) 838 { 839 uint32_t exceptions = pager_lock_check_stack(64); 840 size_t diff = va - area->base; 841 842 a2->fobj = fobj_get(area->fobj); 843 a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE; 844 a2->type = area->type; 845 a2->flags = area->flags; 846 a2->base = va; 847 a2->size = area->size - diff; 848 a2->pgt = area->pgt; 849 area->size = diff; 850 851 TAILQ_INSERT_AFTER(area_head, area, a2, link); 852 TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link); 853 854 pager_unlock(exceptions); 855 } 856 KEEP_PAGER(split_area); 857 858 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va) 859 { 860 struct tee_pager_area *area = NULL; 861 struct tee_pager_area *a2 = NULL; 862 863 if (va & SMALL_PAGE_MASK) 864 return TEE_ERROR_BAD_PARAMETERS; 865 866 TAILQ_FOREACH(area, uctx->areas, link) { 867 if (va == area->base || va == area->base + area->size) 868 return TEE_SUCCESS; 869 if (va > area->base && va < area->base + area->size) { 870 a2 = calloc(1, sizeof(*a2)); 871 if (!a2) 872 return TEE_ERROR_OUT_OF_MEMORY; 873 split_area(uctx->areas, area, a2, va); 874 return TEE_SUCCESS; 875 } 876 } 877 878 return TEE_SUCCESS; 879 } 880 881 static void merge_area_with_next(struct tee_pager_area_head *area_head, 882 struct tee_pager_area *a, 883 struct tee_pager_area *a_next) 884 { 885 uint32_t exceptions = pager_lock_check_stack(64); 886 887 TAILQ_REMOVE(area_head, a_next, link); 888 TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link); 889 a->size += a_next->size; 890 891 pager_unlock(exceptions); 892 } 893 KEEP_PAGER(merge_area_with_next); 894 895 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va, 896 size_t len) 897 { 898 struct tee_pager_area *a_next = NULL; 899 struct tee_pager_area *a = NULL; 900 901 if ((va | len) & SMALL_PAGE_MASK) 902 return; 903 904 for (a = TAILQ_FIRST(uctx->areas);; a = a_next) { 905 a_next = TAILQ_NEXT(a, link); 906 if (!a_next) 907 return; 908 909 /* Try merging with the area just before va */ 910 if (a->base + a->size < va) 911 continue; 912 913 /* 914 * If a->base is well past our range we're done. 915 * Note that if it's just the page after our range we'll 916 * try to merge. 917 */ 918 if (a->base > va + len) 919 return; 920 921 if (a->base + a->size != a_next->base) 922 continue; 923 if (a->fobj != a_next->fobj || a->type != a_next->type || 924 a->flags != a_next->flags || a->pgt != a_next->pgt) 925 continue; 926 if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE != 927 a_next->fobj_pgoffs) 928 continue; 929 930 merge_area_with_next(uctx->areas, a, a_next); 931 free_area(a_next); 932 a_next = a; 933 } 934 } 935 936 static void rem_area(struct tee_pager_area_head *area_head, 937 struct tee_pager_area *area) 938 { 939 struct tee_pager_pmem *pmem; 940 size_t last_pgoffs = area->fobj_pgoffs + 941 (area->size >> SMALL_PAGE_SHIFT) - 1; 942 uint32_t exceptions; 943 size_t idx = 0; 944 uint32_t a = 0; 945 946 exceptions = pager_lock_check_stack(64); 947 948 TAILQ_REMOVE(area_head, area, link); 949 TAILQ_REMOVE(&area->fobj->areas, area, fobj_link); 950 951 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 952 if (pmem->fobj != area->fobj || 953 pmem->fobj_pgidx < area->fobj_pgoffs || 954 pmem->fobj_pgidx > last_pgoffs) 955 continue; 956 957 idx = pmem_get_area_tblidx(pmem, area); 958 area_get_entry(area, idx, NULL, &a); 959 if (!(a & TEE_MATTR_VALID_BLOCK)) 960 continue; 961 962 area_set_entry(area, idx, 0, 0); 963 area_tlbi_entry(area, idx); 964 pgt_dec_used_entries(area->pgt); 965 } 966 967 pager_unlock(exceptions); 968 969 free_area(area); 970 } 971 KEEP_PAGER(rem_area); 972 973 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base, 974 size_t size) 975 { 976 struct tee_pager_area *area; 977 struct tee_pager_area *next_a; 978 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 979 980 TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) { 981 if (core_is_buffer_inside(area->base, area->size, base, s)) 982 rem_area(uctx->areas, area); 983 } 984 tlbi_asid(uctx->vm_info.asid); 985 } 986 987 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx) 988 { 989 struct tee_pager_area *area = NULL; 990 991 if (!uctx->areas) 992 return; 993 994 while (true) { 995 area = TAILQ_FIRST(uctx->areas); 996 if (!area) 997 break; 998 unlink_area(uctx->areas, area); 999 free_area(area); 1000 } 1001 1002 free(uctx->areas); 1003 } 1004 1005 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem) 1006 { 1007 struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas); 1008 void *ctx = a->pgt->ctx; 1009 1010 do { 1011 a = TAILQ_NEXT(a, fobj_link); 1012 if (!a) 1013 return true; 1014 } while (a->pgt->ctx == ctx); 1015 1016 return false; 1017 } 1018 1019 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base, 1020 size_t size, uint32_t flags) 1021 { 1022 bool ret = false; 1023 vaddr_t b = base; 1024 size_t s = size; 1025 size_t s2 = 0; 1026 struct tee_pager_area *area = find_area(uctx->areas, b); 1027 uint32_t exceptions = 0; 1028 struct tee_pager_pmem *pmem = NULL; 1029 uint32_t a = 0; 1030 uint32_t f = 0; 1031 uint32_t mattr = 0; 1032 uint32_t f2 = 0; 1033 size_t tblidx = 0; 1034 1035 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 1036 if (f & TEE_MATTR_UW) 1037 f |= TEE_MATTR_PW; 1038 mattr = get_area_mattr(f); 1039 1040 exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1041 1042 while (s) { 1043 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 1044 if (!area || area->base != b || area->size != s2) { 1045 ret = false; 1046 goto out; 1047 } 1048 b += s2; 1049 s -= s2; 1050 1051 if (area->flags == f) 1052 goto next_area; 1053 1054 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1055 if (!pmem_is_covered_by_area(pmem, area)) 1056 continue; 1057 1058 tblidx = pmem_get_area_tblidx(pmem, area); 1059 area_get_entry(area, tblidx, NULL, &a); 1060 if (a == f) 1061 continue; 1062 area_set_entry(area, tblidx, 0, 0); 1063 area_tlbi_entry(area, tblidx); 1064 1065 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1066 if (pmem_is_dirty(pmem)) 1067 f2 = mattr; 1068 else 1069 f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW); 1070 area_set_entry(area, tblidx, get_pmem_pa(pmem), f2); 1071 if (!(a & TEE_MATTR_VALID_BLOCK)) 1072 pgt_inc_used_entries(area->pgt); 1073 /* 1074 * Make sure the table update is visible before 1075 * continuing. 1076 */ 1077 dsb_ishst(); 1078 1079 /* 1080 * Here's a problem if this page already is shared. 1081 * We need do icache invalidate for each context 1082 * in which it is shared. In practice this will 1083 * never happen. 1084 */ 1085 if (flags & TEE_MATTR_UX) { 1086 void *va = (void *)area_idx2va(area, tblidx); 1087 1088 /* Assert that the pmem isn't shared. */ 1089 assert(same_context(pmem)); 1090 1091 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1092 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1093 } 1094 } 1095 1096 area->flags = f; 1097 next_area: 1098 area = TAILQ_NEXT(area, link); 1099 } 1100 1101 ret = true; 1102 out: 1103 pager_unlock(exceptions); 1104 return ret; 1105 } 1106 1107 KEEP_PAGER(tee_pager_set_um_area_attr); 1108 #endif /*CFG_PAGED_USER_TA*/ 1109 1110 void tee_pager_invalidate_fobj(struct fobj *fobj) 1111 { 1112 struct tee_pager_pmem *pmem; 1113 uint32_t exceptions; 1114 1115 exceptions = pager_lock_check_stack(64); 1116 1117 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1118 if (pmem->fobj == fobj) { 1119 pmem->fobj = NULL; 1120 pmem->fobj_pgidx = INVALID_PGIDX; 1121 } 1122 } 1123 1124 pager_unlock(exceptions); 1125 } 1126 KEEP_PAGER(tee_pager_invalidate_fobj); 1127 1128 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area, 1129 unsigned int tblidx) 1130 { 1131 struct tee_pager_pmem *pmem = NULL; 1132 1133 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) 1134 if (pmem->fobj == area->fobj && 1135 pmem_get_area_tblidx(pmem, area) == tblidx) 1136 return pmem; 1137 1138 return NULL; 1139 } 1140 1141 static bool tee_pager_unhide_page(struct tee_pager_area *area, 1142 unsigned int tblidx) 1143 { 1144 struct tee_pager_pmem *pmem = pmem_find(area, tblidx); 1145 uint32_t a = get_area_mattr(area->flags); 1146 uint32_t attr = 0; 1147 paddr_t pa = 0; 1148 1149 if (!pmem) 1150 return false; 1151 1152 area_get_entry(area, tblidx, NULL, &attr); 1153 if (attr & TEE_MATTR_VALID_BLOCK) 1154 return false; 1155 1156 /* 1157 * The page is hidden, or not not mapped yet. Unhide the page and 1158 * move it to the tail. 1159 * 1160 * Since the page isn't mapped there doesn't exist a valid TLB entry 1161 * for this address, so no TLB invalidation is required after setting 1162 * the new entry. A DSB is needed though, to make the write visible. 1163 * 1164 * For user executable pages it's more complicated. Those pages can 1165 * be shared between multiple TA mappings and thus populated by 1166 * another TA. The reference manual states that: 1167 * 1168 * "instruction cache maintenance is required only after writing 1169 * new data to a physical address that holds an instruction." 1170 * 1171 * So for hidden pages we would not need to invalidate i-cache, but 1172 * for newly populated pages we do. Since we don't know which we 1173 * have to assume the worst and always invalidate the i-cache. We 1174 * don't need to clean the d-cache though, since that has already 1175 * been done earlier. 1176 * 1177 * Additional bookkeeping to tell if the i-cache invalidation is 1178 * needed or not is left as a future optimization. 1179 */ 1180 1181 /* If it's not a dirty block, then it should be read only. */ 1182 if (!pmem_is_dirty(pmem)) 1183 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1184 1185 pa = get_pmem_pa(pmem); 1186 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1187 if (area->flags & TEE_MATTR_UX) { 1188 void *va = (void *)area_idx2va(area, tblidx); 1189 1190 /* Set a temporary read-only mapping */ 1191 assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW))); 1192 area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX); 1193 dsb_ishst(); 1194 1195 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1196 1197 /* Set the final mapping */ 1198 area_set_entry(area, tblidx, pa, a); 1199 area_tlbi_entry(area, tblidx); 1200 } else { 1201 area_set_entry(area, tblidx, pa, a); 1202 dsb_ishst(); 1203 } 1204 pgt_inc_used_entries(area->pgt); 1205 1206 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1207 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1208 incr_hidden_hits(); 1209 return true; 1210 } 1211 1212 static void tee_pager_hide_pages(void) 1213 { 1214 struct tee_pager_pmem *pmem = NULL; 1215 size_t n = 0; 1216 1217 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1218 if (n >= TEE_PAGER_NHIDE) 1219 break; 1220 n++; 1221 1222 /* we cannot hide pages when pmem->fobj is not defined. */ 1223 if (!pmem->fobj) 1224 continue; 1225 1226 if (pmem_is_hidden(pmem)) 1227 continue; 1228 1229 pmem->flags |= PMEM_FLAG_HIDDEN; 1230 pmem_unmap(pmem, NULL); 1231 } 1232 } 1233 1234 /* 1235 * Find mapped pmem, hide and move to pageble pmem. 1236 * Return false if page was not mapped, and true if page was mapped. 1237 */ 1238 static bool tee_pager_release_one_phys(struct tee_pager_area *area, 1239 vaddr_t page_va) 1240 { 1241 struct tee_pager_pmem *pmem; 1242 size_t tblidx = 0; 1243 size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs - 1244 ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT); 1245 1246 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1247 if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx) 1248 continue; 1249 1250 /* 1251 * Locked pages may not be shared, these two asserts checks 1252 * that there's only a signed area recorded with this pmem. 1253 */ 1254 assert(TAILQ_FIRST(&pmem->fobj->areas) == area); 1255 assert(TAILQ_LAST(&pmem->fobj->areas, 1256 tee_pager_area_head) == area); 1257 1258 tblidx = pmem_get_area_tblidx(pmem, area); 1259 area_set_entry(area, tblidx, 0, 0); 1260 pgt_dec_used_entries(area->pgt); 1261 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1262 pmem->fobj = NULL; 1263 pmem->fobj_pgidx = INVALID_PGIDX; 1264 tee_pager_npages++; 1265 set_npages(); 1266 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1267 incr_zi_released(); 1268 return true; 1269 } 1270 1271 return false; 1272 } 1273 1274 /* Finds the oldest page and unmaps it from all tables */ 1275 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at) 1276 { 1277 struct tee_pager_pmem *pmem; 1278 1279 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1280 if (!pmem) { 1281 EMSG("No pmem entries"); 1282 return NULL; 1283 } 1284 1285 if (pmem->fobj) { 1286 pmem_unmap(pmem, NULL); 1287 tee_pager_save_page(pmem); 1288 } 1289 1290 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1291 pmem->fobj = NULL; 1292 pmem->fobj_pgidx = INVALID_PGIDX; 1293 pmem->flags = 0; 1294 if (at == PAGER_AREA_TYPE_LOCK) { 1295 /* Move page to lock list */ 1296 if (tee_pager_npages <= 0) 1297 panic("running out of page"); 1298 tee_pager_npages--; 1299 set_npages(); 1300 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1301 } else { 1302 /* move page to back */ 1303 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1304 } 1305 1306 return pmem; 1307 } 1308 1309 static bool pager_update_permissions(struct tee_pager_area *area, 1310 struct abort_info *ai, bool *handled) 1311 { 1312 unsigned int pgidx = area_va2idx(area, ai->va); 1313 struct tee_pager_pmem *pmem = NULL; 1314 uint32_t attr = 0; 1315 paddr_t pa = 0; 1316 1317 *handled = false; 1318 1319 area_get_entry(area, pgidx, &pa, &attr); 1320 1321 /* Not mapped */ 1322 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1323 return false; 1324 1325 /* Not readable, should not happen */ 1326 if (abort_is_user_exception(ai)) { 1327 if (!(attr & TEE_MATTR_UR)) 1328 return true; 1329 } else { 1330 if (!(attr & TEE_MATTR_PR)) { 1331 abort_print_error(ai); 1332 panic(); 1333 } 1334 } 1335 1336 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1337 case CORE_MMU_FAULT_TRANSLATION: 1338 case CORE_MMU_FAULT_READ_PERMISSION: 1339 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1340 /* Check attempting to execute from an NOX page */ 1341 if (abort_is_user_exception(ai)) { 1342 if (!(attr & TEE_MATTR_UX)) 1343 return true; 1344 } else { 1345 if (!(attr & TEE_MATTR_PX)) { 1346 abort_print_error(ai); 1347 panic(); 1348 } 1349 } 1350 } 1351 /* Since the page is mapped now it's OK */ 1352 break; 1353 case CORE_MMU_FAULT_WRITE_PERMISSION: 1354 /* Check attempting to write to an RO page */ 1355 pmem = pmem_find(area, pgidx); 1356 if (!pmem) 1357 panic(); 1358 if (abort_is_user_exception(ai)) { 1359 if (!(area->flags & TEE_MATTR_UW)) 1360 return true; 1361 if (!(attr & TEE_MATTR_UW)) { 1362 FMSG("Dirty %p", 1363 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1364 pmem->flags |= PMEM_FLAG_DIRTY; 1365 area_set_entry(area, pgidx, pa, 1366 get_area_mattr(area->flags)); 1367 area_tlbi_entry(area, pgidx); 1368 } 1369 1370 } else { 1371 if (!(area->flags & TEE_MATTR_PW)) { 1372 abort_print_error(ai); 1373 panic(); 1374 } 1375 if (!(attr & TEE_MATTR_PW)) { 1376 FMSG("Dirty %p", 1377 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1378 pmem->flags |= PMEM_FLAG_DIRTY; 1379 area_set_entry(area, pgidx, pa, 1380 get_area_mattr(area->flags)); 1381 tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK); 1382 } 1383 } 1384 /* Since permissions has been updated now it's OK */ 1385 break; 1386 default: 1387 /* Some fault we can't deal with */ 1388 if (abort_is_user_exception(ai)) 1389 return true; 1390 abort_print_error(ai); 1391 panic(); 1392 } 1393 *handled = true; 1394 return true; 1395 } 1396 1397 #ifdef CFG_TEE_CORE_DEBUG 1398 static void stat_handle_fault(void) 1399 { 1400 static size_t num_faults; 1401 static size_t min_npages = SIZE_MAX; 1402 static size_t total_min_npages = SIZE_MAX; 1403 1404 num_faults++; 1405 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1406 DMSG("nfaults %zu npages %zu (min %zu)", 1407 num_faults, tee_pager_npages, min_npages); 1408 min_npages = tee_pager_npages; /* reset */ 1409 } 1410 if (tee_pager_npages < min_npages) 1411 min_npages = tee_pager_npages; 1412 if (tee_pager_npages < total_min_npages) 1413 total_min_npages = tee_pager_npages; 1414 } 1415 #else 1416 static void stat_handle_fault(void) 1417 { 1418 } 1419 #endif 1420 1421 bool tee_pager_handle_fault(struct abort_info *ai) 1422 { 1423 struct tee_pager_area *area; 1424 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1425 uint32_t exceptions; 1426 bool ret; 1427 bool clean_user_cache = false; 1428 1429 #ifdef TEE_PAGER_DEBUG_PRINT 1430 if (!abort_is_user_exception(ai)) 1431 abort_print(ai); 1432 #endif 1433 1434 /* 1435 * We're updating pages that can affect several active CPUs at a 1436 * time below. We end up here because a thread tries to access some 1437 * memory that isn't available. We have to be careful when making 1438 * that memory available as other threads may succeed in accessing 1439 * that address the moment after we've made it available. 1440 * 1441 * That means that we can't just map the memory and populate the 1442 * page, instead we use the aliased mapping to populate the page 1443 * and once everything is ready we map it. 1444 */ 1445 exceptions = pager_lock(ai); 1446 1447 stat_handle_fault(); 1448 1449 /* check if the access is valid */ 1450 if (abort_is_user_exception(ai)) { 1451 area = find_uta_area(ai->va); 1452 clean_user_cache = true; 1453 } else { 1454 area = find_area(&tee_pager_area_head, ai->va); 1455 if (!area) { 1456 area = find_uta_area(ai->va); 1457 clean_user_cache = true; 1458 } 1459 } 1460 if (!area || !area->pgt) { 1461 ret = false; 1462 goto out; 1463 } 1464 1465 if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) { 1466 struct tee_pager_pmem *pmem = NULL; 1467 uint32_t attr = 0; 1468 paddr_t pa = 0; 1469 size_t tblidx = 0; 1470 1471 /* 1472 * The page wasn't hidden, but some other core may have 1473 * updated the table entry before we got here or we need 1474 * to make a read-only page read-write (dirty). 1475 */ 1476 if (pager_update_permissions(area, ai, &ret)) { 1477 /* 1478 * Nothing more to do with the abort. The problem 1479 * could already have been dealt with from another 1480 * core or if ret is false the TA will be paniced. 1481 */ 1482 goto out; 1483 } 1484 1485 pmem = tee_pager_get_page(area->type); 1486 if (!pmem) { 1487 abort_print(ai); 1488 panic(); 1489 } 1490 1491 /* load page code & data */ 1492 tee_pager_load_page(area, page_va, pmem->va_alias); 1493 1494 1495 pmem->fobj = area->fobj; 1496 pmem->fobj_pgidx = area_va2idx(area, page_va) + 1497 area->fobj_pgoffs - 1498 ((area->base & CORE_MMU_PGDIR_MASK) >> 1499 SMALL_PAGE_SHIFT); 1500 tblidx = pmem_get_area_tblidx(pmem, area); 1501 attr = get_area_mattr(area->flags); 1502 /* 1503 * Pages from PAGER_AREA_TYPE_RW starts read-only to be 1504 * able to tell when they are updated and should be tagged 1505 * as dirty. 1506 */ 1507 if (area->type == PAGER_AREA_TYPE_RW) 1508 attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1509 pa = get_pmem_pa(pmem); 1510 1511 /* 1512 * We've updated the page using the aliased mapping and 1513 * some cache maintenence is now needed if it's an 1514 * executable page. 1515 * 1516 * Since the d-cache is a Physically-indexed, 1517 * physically-tagged (PIPT) cache we can clean either the 1518 * aliased address or the real virtual address. In this 1519 * case we choose the real virtual address. 1520 * 1521 * The i-cache can also be PIPT, but may be something else 1522 * too like VIPT. The current code requires the caches to 1523 * implement the IVIPT extension, that is: 1524 * "instruction cache maintenance is required only after 1525 * writing new data to a physical address that holds an 1526 * instruction." 1527 * 1528 * To portably invalidate the icache the page has to 1529 * be mapped at the final virtual address but not 1530 * executable. 1531 */ 1532 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1533 uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX | 1534 TEE_MATTR_PW | TEE_MATTR_UW; 1535 void *va = (void *)page_va; 1536 1537 /* Set a temporary read-only mapping */ 1538 area_set_entry(area, tblidx, pa, attr & ~mask); 1539 area_tlbi_entry(area, tblidx); 1540 1541 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1542 if (clean_user_cache) 1543 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1544 else 1545 icache_inv_range(va, SMALL_PAGE_SIZE); 1546 1547 /* Set the final mapping */ 1548 area_set_entry(area, tblidx, pa, attr); 1549 area_tlbi_entry(area, tblidx); 1550 } else { 1551 area_set_entry(area, tblidx, pa, attr); 1552 /* 1553 * No need to flush TLB for this entry, it was 1554 * invalid. We should use a barrier though, to make 1555 * sure that the change is visible. 1556 */ 1557 dsb_ishst(); 1558 } 1559 pgt_inc_used_entries(area->pgt); 1560 1561 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa); 1562 1563 } 1564 1565 tee_pager_hide_pages(); 1566 ret = true; 1567 out: 1568 pager_unlock(exceptions); 1569 return ret; 1570 } 1571 1572 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1573 { 1574 size_t n; 1575 1576 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1577 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1578 1579 /* setup memory */ 1580 for (n = 0; n < npages; n++) { 1581 struct core_mmu_table_info *ti; 1582 struct tee_pager_pmem *pmem; 1583 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1584 unsigned int pgidx; 1585 paddr_t pa; 1586 uint32_t attr; 1587 1588 ti = find_table_info(va); 1589 pgidx = core_mmu_va2idx(ti, va); 1590 /* 1591 * Note that we can only support adding pages in the 1592 * valid range of this table info, currently not a problem. 1593 */ 1594 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1595 1596 /* Ignore unmapped pages/blocks */ 1597 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1598 continue; 1599 1600 pmem = calloc(1, sizeof(struct tee_pager_pmem)); 1601 if (!pmem) 1602 panic("out of mem"); 1603 1604 pmem->va_alias = pager_add_alias_page(pa); 1605 1606 if (unmap) { 1607 pmem->fobj = NULL; 1608 pmem->fobj_pgidx = INVALID_PGIDX; 1609 core_mmu_set_entry(ti, pgidx, 0, 0); 1610 pgt_dec_used_entries(find_core_pgt(va)); 1611 } else { 1612 struct tee_pager_area *area = NULL; 1613 1614 /* 1615 * The page is still mapped, let's assign the area 1616 * and update the protection bits accordingly. 1617 */ 1618 area = find_area(&tee_pager_area_head, va); 1619 assert(area && area->pgt == find_core_pgt(va)); 1620 pmem->fobj = area->fobj; 1621 pmem->fobj_pgidx = pgidx + area->fobj_pgoffs - 1622 ((area->base & 1623 CORE_MMU_PGDIR_MASK) >> 1624 SMALL_PAGE_SHIFT); 1625 assert(pgidx == pmem_get_area_tblidx(pmem, area)); 1626 assert(pa == get_pmem_pa(pmem)); 1627 area_set_entry(area, pgidx, pa, 1628 get_area_mattr(area->flags)); 1629 } 1630 1631 tee_pager_npages++; 1632 incr_npages_all(); 1633 set_npages(); 1634 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1635 } 1636 1637 /* 1638 * As this is done at inits, invalidate all TLBs once instead of 1639 * targeting only the modified entries. 1640 */ 1641 tlbi_all(); 1642 } 1643 1644 #ifdef CFG_PAGED_USER_TA 1645 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1646 { 1647 struct pgt *p = pgt; 1648 1649 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1650 p = SLIST_NEXT(p, link); 1651 return p; 1652 } 1653 1654 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx) 1655 { 1656 struct tee_pager_area *area = NULL; 1657 struct pgt *pgt = NULL; 1658 1659 if (!uctx->areas) 1660 return; 1661 1662 pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1663 TAILQ_FOREACH(area, uctx->areas, link) { 1664 if (!area->pgt) 1665 area->pgt = find_pgt(pgt, area->base); 1666 else 1667 assert(area->pgt == find_pgt(pgt, area->base)); 1668 if (!area->pgt) 1669 panic(); 1670 } 1671 } 1672 1673 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1674 { 1675 struct tee_pager_pmem *pmem = NULL; 1676 struct tee_pager_area *area = NULL; 1677 struct tee_pager_area_head *areas = NULL; 1678 uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1679 1680 if (!pgt->num_used_entries) 1681 goto out; 1682 1683 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1684 if (pmem->fobj) 1685 pmem_unmap(pmem, pgt); 1686 } 1687 assert(!pgt->num_used_entries); 1688 1689 out: 1690 areas = to_user_ta_ctx(pgt->ctx)->uctx.areas; 1691 if (areas) { 1692 TAILQ_FOREACH(area, areas, link) { 1693 if (area->pgt == pgt) 1694 area->pgt = NULL; 1695 } 1696 } 1697 1698 pager_unlock(exceptions); 1699 } 1700 KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1701 #endif /*CFG_PAGED_USER_TA*/ 1702 1703 void tee_pager_release_phys(void *addr, size_t size) 1704 { 1705 bool unmaped = false; 1706 vaddr_t va = (vaddr_t)addr; 1707 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1708 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1709 struct tee_pager_area *area; 1710 uint32_t exceptions; 1711 1712 if (end <= begin) 1713 return; 1714 1715 exceptions = pager_lock_check_stack(128); 1716 1717 for (va = begin; va < end; va += SMALL_PAGE_SIZE) { 1718 area = find_area(&tee_pager_area_head, va); 1719 if (!area) 1720 panic(); 1721 unmaped |= tee_pager_release_one_phys(area, va); 1722 } 1723 1724 if (unmaped) 1725 tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE); 1726 1727 pager_unlock(exceptions); 1728 } 1729 KEEP_PAGER(tee_pager_release_phys); 1730 1731 void *tee_pager_alloc(size_t size) 1732 { 1733 tee_mm_entry_t *mm = NULL; 1734 uint8_t *smem = NULL; 1735 size_t num_pages = 0; 1736 struct fobj *fobj = NULL; 1737 1738 if (!size) 1739 return NULL; 1740 1741 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 1742 if (!mm) 1743 return NULL; 1744 1745 smem = (uint8_t *)tee_mm_get_smem(mm); 1746 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 1747 fobj = fobj_locked_paged_alloc(num_pages); 1748 if (!fobj) { 1749 tee_mm_free(mm); 1750 return NULL; 1751 } 1752 1753 tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj); 1754 fobj_put(fobj); 1755 1756 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); 1757 1758 return smem; 1759 } 1760