1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <io.h> 10 #include <keep.h> 11 #include <kernel/abort.h> 12 #include <kernel/asan.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_misc.h> 16 #include <kernel/tee_ta_manager.h> 17 #include <kernel/thread.h> 18 #include <kernel/tlb_helpers.h> 19 #include <mm/core_memprot.h> 20 #include <mm/fobj.h> 21 #include <mm/tee_mm.h> 22 #include <mm/tee_pager.h> 23 #include <stdlib.h> 24 #include <sys/queue.h> 25 #include <tee_api_defines.h> 26 #include <trace.h> 27 #include <types_ext.h> 28 #include <utee_defines.h> 29 #include <util.h> 30 31 struct tee_pager_area { 32 struct fobj *fobj; 33 size_t fobj_pgidx; 34 enum tee_pager_area_type type; 35 uint32_t flags; 36 vaddr_t base; 37 size_t size; 38 struct pgt *pgt; 39 TAILQ_ENTRY(tee_pager_area) link; 40 }; 41 42 TAILQ_HEAD(tee_pager_area_head, tee_pager_area); 43 44 static struct tee_pager_area_head tee_pager_area_head = 45 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 46 47 #define INVALID_PGIDX UINT_MAX 48 49 /* 50 * struct tee_pager_pmem - Represents a physical page used for paging. 51 * 52 * @pgidx an index of the entry in area->ti. 53 * @va_alias Virtual address where the physical page always is aliased. 54 * Used during remapping of the page when the content need to 55 * be updated before it's available at the new location. 56 * @area a pointer to the pager area 57 */ 58 struct tee_pager_pmem { 59 unsigned pgidx; 60 void *va_alias; 61 struct tee_pager_area *area; 62 TAILQ_ENTRY(tee_pager_pmem) link; 63 }; 64 65 /* The list of physical pages. The first page in the list is the oldest */ 66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 67 68 static struct tee_pager_pmem_head tee_pager_pmem_head = 69 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 70 71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 72 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 73 74 /* number of pages hidden */ 75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 76 77 /* Number of registered physical pages, used hiding pages. */ 78 static size_t tee_pager_npages; 79 80 #ifdef CFG_WITH_STATS 81 static struct tee_pager_stats pager_stats; 82 83 static inline void incr_ro_hits(void) 84 { 85 pager_stats.ro_hits++; 86 } 87 88 static inline void incr_rw_hits(void) 89 { 90 pager_stats.rw_hits++; 91 } 92 93 static inline void incr_hidden_hits(void) 94 { 95 pager_stats.hidden_hits++; 96 } 97 98 static inline void incr_zi_released(void) 99 { 100 pager_stats.zi_released++; 101 } 102 103 static inline void incr_npages_all(void) 104 { 105 pager_stats.npages_all++; 106 } 107 108 static inline void set_npages(void) 109 { 110 pager_stats.npages = tee_pager_npages; 111 } 112 113 void tee_pager_get_stats(struct tee_pager_stats *stats) 114 { 115 *stats = pager_stats; 116 117 pager_stats.hidden_hits = 0; 118 pager_stats.ro_hits = 0; 119 pager_stats.rw_hits = 0; 120 pager_stats.zi_released = 0; 121 } 122 123 #else /* CFG_WITH_STATS */ 124 static inline void incr_ro_hits(void) { } 125 static inline void incr_rw_hits(void) { } 126 static inline void incr_hidden_hits(void) { } 127 static inline void incr_zi_released(void) { } 128 static inline void incr_npages_all(void) { } 129 static inline void set_npages(void) { } 130 131 void tee_pager_get_stats(struct tee_pager_stats *stats) 132 { 133 memset(stats, 0, sizeof(struct tee_pager_stats)); 134 } 135 #endif /* CFG_WITH_STATS */ 136 137 #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE) 138 #define TBL_LEVEL CORE_MMU_PGDIR_LEVEL 139 #define TBL_SHIFT SMALL_PAGE_SHIFT 140 141 #define EFFECTIVE_VA_SIZE \ 142 (ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \ 143 CORE_MMU_PGDIR_SIZE) - \ 144 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE)) 145 146 static struct pager_table { 147 struct pgt pgt; 148 struct core_mmu_table_info tbl_info; 149 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE]; 150 151 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 152 153 /* Defines the range of the alias area */ 154 static tee_mm_entry_t *pager_alias_area; 155 /* 156 * Physical pages are added in a stack like fashion to the alias area, 157 * @pager_alias_next_free gives the address of next free entry if 158 * @pager_alias_next_free is != 0 159 */ 160 static uintptr_t pager_alias_next_free; 161 162 #ifdef CFG_TEE_CORE_DEBUG 163 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai) 164 165 static uint32_t pager_lock_dldetect(const char *func, const int line, 166 struct abort_info *ai) 167 { 168 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 169 unsigned int retries = 0; 170 unsigned int reminder = 0; 171 172 while (!cpu_spin_trylock(&pager_spinlock)) { 173 retries++; 174 if (!retries) { 175 /* wrapped, time to report */ 176 trace_printf(func, line, TRACE_ERROR, true, 177 "possible spinlock deadlock reminder %u", 178 reminder); 179 if (reminder < UINT_MAX) 180 reminder++; 181 if (ai) 182 abort_print(ai); 183 } 184 } 185 186 return exceptions; 187 } 188 #else 189 static uint32_t pager_lock(struct abort_info __unused *ai) 190 { 191 return cpu_spin_lock_xsave(&pager_spinlock); 192 } 193 #endif 194 195 static uint32_t pager_lock_check_stack(size_t stack_size) 196 { 197 if (stack_size) { 198 int8_t buf[stack_size]; 199 size_t n; 200 201 /* 202 * Make sure to touch all pages of the stack that we expect 203 * to use with this lock held. We need to take eventual 204 * page faults before the lock is taken or we'll deadlock 205 * the pager. The pages that are populated in this way will 206 * eventually be released at certain save transitions of 207 * the thread. 208 */ 209 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) 210 io_write8((vaddr_t)buf + n, 1); 211 io_write8((vaddr_t)buf + stack_size - 1, 1); 212 } 213 214 return pager_lock(NULL); 215 } 216 217 static void pager_unlock(uint32_t exceptions) 218 { 219 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 220 } 221 222 void *tee_pager_phys_to_virt(paddr_t pa) 223 { 224 struct core_mmu_table_info ti; 225 unsigned idx; 226 uint32_t a; 227 paddr_t p; 228 vaddr_t v; 229 size_t n; 230 231 /* 232 * Most addresses are mapped lineary, try that first if possible. 233 */ 234 if (!tee_pager_get_table_info(pa, &ti)) 235 return NULL; /* impossible pa */ 236 idx = core_mmu_va2idx(&ti, pa); 237 core_mmu_get_entry(&ti, idx, &p, &a); 238 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 239 return (void *)core_mmu_idx2va(&ti, idx); 240 241 n = 0; 242 idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START); 243 while (true) { 244 while (idx < TBL_NUM_ENTRIES) { 245 v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx); 246 if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE)) 247 return NULL; 248 249 core_mmu_get_entry(&pager_tables[n].tbl_info, 250 idx, &p, &a); 251 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 252 return (void *)v; 253 idx++; 254 } 255 256 n++; 257 if (n >= ARRAY_SIZE(pager_tables)) 258 return NULL; 259 idx = 0; 260 } 261 262 return NULL; 263 } 264 265 static struct pager_table *find_pager_table_may_fail(vaddr_t va) 266 { 267 size_t n; 268 const vaddr_t mask = CORE_MMU_PGDIR_MASK; 269 270 n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >> 271 CORE_MMU_PGDIR_SHIFT; 272 if (n >= ARRAY_SIZE(pager_tables)) 273 return NULL; 274 275 assert(va >= pager_tables[n].tbl_info.va_base && 276 va <= (pager_tables[n].tbl_info.va_base | mask)); 277 278 return pager_tables + n; 279 } 280 281 static struct pager_table *find_pager_table(vaddr_t va) 282 { 283 struct pager_table *pt = find_pager_table_may_fail(va); 284 285 assert(pt); 286 return pt; 287 } 288 289 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti) 290 { 291 struct pager_table *pt = find_pager_table_may_fail(va); 292 293 if (!pt) 294 return false; 295 296 *ti = pt->tbl_info; 297 return true; 298 } 299 300 static struct core_mmu_table_info *find_table_info(vaddr_t va) 301 { 302 return &find_pager_table(va)->tbl_info; 303 } 304 305 static struct pgt *find_core_pgt(vaddr_t va) 306 { 307 return &find_pager_table(va)->pgt; 308 } 309 310 void tee_pager_set_alias_area(tee_mm_entry_t *mm) 311 { 312 struct pager_table *pt; 313 unsigned idx; 314 vaddr_t smem = tee_mm_get_smem(mm); 315 size_t nbytes = tee_mm_get_bytes(mm); 316 vaddr_t v; 317 318 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 319 320 assert(!pager_alias_area); 321 pager_alias_area = mm; 322 pager_alias_next_free = smem; 323 324 /* Clear all mapping in the alias area */ 325 pt = find_pager_table(smem); 326 idx = core_mmu_va2idx(&pt->tbl_info, smem); 327 while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) { 328 while (idx < TBL_NUM_ENTRIES) { 329 v = core_mmu_idx2va(&pt->tbl_info, idx); 330 if (v >= (smem + nbytes)) 331 goto out; 332 333 core_mmu_set_entry(&pt->tbl_info, idx, 0, 0); 334 idx++; 335 } 336 337 pt++; 338 idx = 0; 339 } 340 341 out: 342 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); 343 } 344 345 static size_t tbl_usage_count(struct core_mmu_table_info *ti) 346 { 347 size_t n; 348 paddr_t pa; 349 size_t usage = 0; 350 351 for (n = 0; n < ti->num_entries; n++) { 352 core_mmu_get_entry(ti, n, &pa, NULL); 353 if (pa) 354 usage++; 355 } 356 return usage; 357 } 358 359 static void area_get_entry(struct tee_pager_area *area, size_t idx, 360 paddr_t *pa, uint32_t *attr) 361 { 362 assert(area->pgt); 363 assert(idx < TBL_NUM_ENTRIES); 364 core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr); 365 } 366 367 static void area_set_entry(struct tee_pager_area *area, size_t idx, 368 paddr_t pa, uint32_t attr) 369 { 370 assert(area->pgt); 371 assert(idx < TBL_NUM_ENTRIES); 372 core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr); 373 } 374 375 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va) 376 { 377 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT; 378 } 379 380 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx) 381 { 382 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK); 383 } 384 385 void tee_pager_early_init(void) 386 { 387 size_t n; 388 389 /* 390 * Note that this depends on add_pager_vaspace() adding vaspace 391 * after end of memory. 392 */ 393 for (n = 0; n < ARRAY_SIZE(pager_tables); n++) { 394 if (!core_mmu_find_table(NULL, TEE_RAM_VA_START + 395 n * CORE_MMU_PGDIR_SIZE, UINT_MAX, 396 &pager_tables[n].tbl_info)) 397 panic("can't find mmu tables"); 398 399 if (pager_tables[n].tbl_info.shift != TBL_SHIFT) 400 panic("Unsupported page size in translation table"); 401 assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES); 402 assert(pager_tables[n].tbl_info.level == TBL_LEVEL); 403 404 pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table; 405 pgt_set_used_entries(&pager_tables[n].pgt, 406 tbl_usage_count(&pager_tables[n].tbl_info)); 407 } 408 } 409 410 static void *pager_add_alias_page(paddr_t pa) 411 { 412 unsigned idx; 413 struct core_mmu_table_info *ti; 414 /* Alias pages mapped without write permission: runtime will care */ 415 uint32_t attr = TEE_MATTR_VALID_BLOCK | 416 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 417 TEE_MATTR_SECURE | TEE_MATTR_PR; 418 419 DMSG("0x%" PRIxPA, pa); 420 421 ti = find_table_info(pager_alias_next_free); 422 idx = core_mmu_va2idx(ti, pager_alias_next_free); 423 core_mmu_set_entry(ti, idx, pa, attr); 424 pgt_inc_used_entries(find_core_pgt(pager_alias_next_free)); 425 pager_alias_next_free += SMALL_PAGE_SIZE; 426 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 427 tee_mm_get_bytes(pager_alias_area))) 428 pager_alias_next_free = 0; 429 return (void *)core_mmu_idx2va(ti, idx); 430 } 431 432 static void area_insert_tail(struct tee_pager_area *area) 433 { 434 uint32_t exceptions = pager_lock_check_stack(8); 435 436 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 437 438 pager_unlock(exceptions); 439 } 440 KEEP_PAGER(area_insert_tail); 441 442 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type, 443 struct fobj *fobj) 444 { 445 struct tee_pager_area *area = NULL; 446 uint32_t flags = 0; 447 size_t fobj_pgidx = 0; 448 vaddr_t b = base; 449 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 450 size_t s2 = 0; 451 452 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type); 453 454 if (base & SMALL_PAGE_MASK || !s) { 455 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s); 456 panic(); 457 } 458 459 switch (type) { 460 case PAGER_AREA_TYPE_RO: 461 flags = TEE_MATTR_PRX; 462 break; 463 case PAGER_AREA_TYPE_RW: 464 flags = TEE_MATTR_PRW; 465 break; 466 case PAGER_AREA_TYPE_LOCK: 467 flags = TEE_MATTR_PRW | TEE_MATTR_LOCKED; 468 break; 469 default: 470 panic(); 471 } 472 473 if (!fobj) 474 panic(); 475 476 while (s) { 477 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 478 area = calloc(1, sizeof(*area)); 479 if (!area) 480 panic("alloc_area"); 481 482 area->fobj = fobj_get(fobj); 483 area->fobj_pgidx = fobj_pgidx; 484 area->type = type; 485 area->pgt = find_core_pgt(b); 486 area->base = b; 487 area->size = s2; 488 area->flags = flags; 489 area_insert_tail(area); 490 491 b += s2; 492 s -= s2; 493 fobj_pgidx += s2 / SMALL_PAGE_SIZE; 494 } 495 } 496 497 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas, 498 vaddr_t va) 499 { 500 struct tee_pager_area *area; 501 502 if (!areas) 503 return NULL; 504 505 TAILQ_FOREACH(area, areas, link) { 506 if (core_is_buffer_inside(va, 1, area->base, area->size)) 507 return area; 508 } 509 return NULL; 510 } 511 512 #ifdef CFG_PAGED_USER_TA 513 static struct tee_pager_area *find_uta_area(vaddr_t va) 514 { 515 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx; 516 517 if (!is_user_ta_ctx(ctx)) 518 return NULL; 519 return find_area(to_user_ta_ctx(ctx)->areas, va); 520 } 521 #else 522 static struct tee_pager_area *find_uta_area(vaddr_t va __unused) 523 { 524 return NULL; 525 } 526 #endif /*CFG_PAGED_USER_TA*/ 527 528 529 static uint32_t get_area_mattr(uint32_t area_flags) 530 { 531 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 532 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 533 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 534 535 return attr; 536 } 537 538 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 539 { 540 struct core_mmu_table_info *ti; 541 paddr_t pa; 542 unsigned idx; 543 544 ti = find_table_info((vaddr_t)pmem->va_alias); 545 idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias); 546 core_mmu_get_entry(ti, idx, &pa, NULL); 547 return pa; 548 } 549 550 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 551 void *va_alias) 552 { 553 size_t fobj_pgidx = ((page_va - area->base) >> SMALL_PAGE_SHIFT) + 554 area->fobj_pgidx; 555 struct core_mmu_table_info *ti; 556 uint32_t attr_alias; 557 paddr_t pa_alias; 558 unsigned int idx_alias; 559 560 /* Insure we are allowed to write to aliased virtual page */ 561 ti = find_table_info((vaddr_t)va_alias); 562 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 563 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 564 if (!(attr_alias & TEE_MATTR_PW)) { 565 attr_alias |= TEE_MATTR_PW; 566 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 567 tlbi_mva_allasid((vaddr_t)va_alias); 568 } 569 570 asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE); 571 if (fobj_load_page(area->fobj, fobj_pgidx, va_alias)) { 572 EMSG("PH 0x%" PRIxVA " failed", page_va); 573 panic(); 574 } 575 switch (area->type) { 576 case PAGER_AREA_TYPE_RO: 577 incr_ro_hits(); 578 /* Forbid write to aliases for read-only (maybe exec) pages */ 579 attr_alias &= ~TEE_MATTR_PW; 580 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 581 tlbi_mva_allasid((vaddr_t)va_alias); 582 break; 583 case PAGER_AREA_TYPE_RW: 584 incr_rw_hits(); 585 break; 586 case PAGER_AREA_TYPE_LOCK: 587 break; 588 default: 589 panic(); 590 } 591 asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE); 592 } 593 594 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 595 { 596 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 597 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 598 599 if (pmem->area->type == PAGER_AREA_TYPE_RW && (attr & dirty_bits)) { 600 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK; 601 size_t fobj_pgidx = (pmem->pgidx - (offs >> SMALL_PAGE_SHIFT)) + 602 pmem->area->fobj_pgidx; 603 604 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW)); 605 asan_tag_access(pmem->va_alias, 606 (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE); 607 if (fobj_save_page(pmem->area->fobj, fobj_pgidx, 608 pmem->va_alias)) 609 panic("fobj_save_page"); 610 asan_tag_no_access(pmem->va_alias, 611 (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE); 612 } 613 } 614 615 #ifdef CFG_PAGED_USER_TA 616 static void free_area(struct tee_pager_area *area) 617 { 618 fobj_put(area->fobj); 619 free(area); 620 } 621 622 static TEE_Result pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, 623 struct fobj *fobj) 624 { 625 struct tee_pager_area *area; 626 vaddr_t b = base; 627 size_t fobj_pgidx = 0; 628 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 629 630 if (!utc->areas) { 631 utc->areas = malloc(sizeof(*utc->areas)); 632 if (!utc->areas) 633 return TEE_ERROR_OUT_OF_MEMORY; 634 TAILQ_INIT(utc->areas); 635 } 636 637 while (s) { 638 size_t s2; 639 640 if (find_area(utc->areas, b)) 641 return TEE_ERROR_BAD_PARAMETERS; 642 643 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 644 area = calloc(1, sizeof(*area)); 645 if (!area) 646 return TEE_ERROR_OUT_OF_MEMORY; 647 648 /* Table info will be set when the context is activated. */ 649 area->fobj = fobj_get(fobj); 650 area->fobj_pgidx = fobj_pgidx; 651 area->type = PAGER_AREA_TYPE_RW; 652 area->base = b; 653 area->size = s2; 654 area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX; 655 656 TAILQ_INSERT_TAIL(utc->areas, area, link); 657 b += s2; 658 s -= s2; 659 fobj_pgidx += s2 / SMALL_PAGE_SIZE; 660 } 661 662 return TEE_SUCCESS; 663 } 664 665 TEE_Result tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, 666 struct fobj *fobj) 667 { 668 TEE_Result res = TEE_SUCCESS; 669 struct thread_specific_data *tsd = thread_get_tsd(); 670 struct tee_pager_area *area = NULL; 671 struct core_mmu_table_info dir_info = { NULL }; 672 673 if (&utc->ctx != tsd->ctx) { 674 /* 675 * Changes are to an utc that isn't active. Just add the 676 * areas page tables will be dealt with later. 677 */ 678 return pager_add_uta_area(utc, base, fobj); 679 } 680 681 /* 682 * Assign page tables before adding areas to be able to tell which 683 * are newly added and should be removed in case of failure. 684 */ 685 tee_pager_assign_uta_tables(utc); 686 res = pager_add_uta_area(utc, base, fobj); 687 if (res) { 688 struct tee_pager_area *next_a; 689 690 /* Remove all added areas */ 691 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 692 if (!area->pgt) { 693 TAILQ_REMOVE(utc->areas, area, link); 694 free_area(area); 695 } 696 } 697 return res; 698 } 699 700 /* 701 * Assign page tables to the new areas and make sure that the page 702 * tables are registered in the upper table. 703 */ 704 tee_pager_assign_uta_tables(utc); 705 core_mmu_get_user_pgdir(&dir_info); 706 TAILQ_FOREACH(area, utc->areas, link) { 707 paddr_t pa; 708 size_t idx; 709 uint32_t attr; 710 711 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase); 712 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 713 714 /* 715 * Check if the page table already is used, if it is, it's 716 * already registered. 717 */ 718 if (area->pgt->num_used_entries) { 719 assert(attr & TEE_MATTR_TABLE); 720 assert(pa == virt_to_phys(area->pgt->tbl)); 721 continue; 722 } 723 724 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 725 pa = virt_to_phys(area->pgt->tbl); 726 assert(pa); 727 /* 728 * Note that the update of the table entry is guaranteed to 729 * be atomic. 730 */ 731 core_mmu_set_entry(&dir_info, idx, pa, attr); 732 } 733 734 return TEE_SUCCESS; 735 } 736 737 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti, 738 struct pgt *pgt) 739 { 740 assert(pgt); 741 ti->table = pgt->tbl; 742 ti->va_base = pgt->vabase; 743 ti->level = TBL_LEVEL; 744 ti->shift = TBL_SHIFT; 745 ti->num_entries = TBL_NUM_ENTRIES; 746 } 747 748 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt, 749 vaddr_t new_base) 750 { 751 uint32_t exceptions = pager_lock_check_stack(64); 752 753 /* 754 * If there's no pgt assigned to the old area there's no pages to 755 * deal with either, just update with a new pgt and base. 756 */ 757 if (area->pgt) { 758 struct core_mmu_table_info old_ti; 759 struct core_mmu_table_info new_ti; 760 struct tee_pager_pmem *pmem; 761 762 init_tbl_info_from_pgt(&old_ti, area->pgt); 763 init_tbl_info_from_pgt(&new_ti, new_pgt); 764 765 766 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 767 vaddr_t va; 768 paddr_t pa; 769 uint32_t attr; 770 771 if (pmem->area != area) 772 continue; 773 core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr); 774 core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0); 775 776 assert(pa == get_pmem_pa(pmem)); 777 assert(attr); 778 assert(area->pgt->num_used_entries); 779 area->pgt->num_used_entries--; 780 781 va = core_mmu_idx2va(&old_ti, pmem->pgidx); 782 va = va - area->base + new_base; 783 pmem->pgidx = core_mmu_va2idx(&new_ti, va); 784 core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr); 785 new_pgt->num_used_entries++; 786 } 787 } 788 789 area->pgt = new_pgt; 790 area->base = new_base; 791 pager_unlock(exceptions); 792 } 793 KEEP_PAGER(transpose_area); 794 795 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc, 796 vaddr_t src_base, 797 struct user_ta_ctx *dst_utc, 798 vaddr_t dst_base, struct pgt **dst_pgt, 799 size_t size) 800 { 801 struct tee_pager_area *area; 802 struct tee_pager_area *next_a; 803 804 TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) { 805 vaddr_t new_area_base; 806 size_t new_idx; 807 808 if (!core_is_buffer_inside(area->base, area->size, 809 src_base, size)) 810 continue; 811 812 TAILQ_REMOVE(src_utc->areas, area, link); 813 814 new_area_base = dst_base + (src_base - area->base); 815 new_idx = (new_area_base - dst_pgt[0]->vabase) / 816 CORE_MMU_PGDIR_SIZE; 817 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) == 818 dst_pgt[new_idx]->vabase); 819 transpose_area(area, dst_pgt[new_idx], new_area_base); 820 821 /* 822 * Assert that this will not cause any conflicts in the new 823 * utc. This should already be guaranteed, but a bug here 824 * could be tricky to find. 825 */ 826 assert(!find_area(dst_utc->areas, area->base)); 827 TAILQ_INSERT_TAIL(dst_utc->areas, area, link); 828 } 829 } 830 831 static void rem_area(struct tee_pager_area_head *area_head, 832 struct tee_pager_area *area) 833 { 834 struct tee_pager_pmem *pmem; 835 uint32_t exceptions; 836 837 exceptions = pager_lock_check_stack(64); 838 839 TAILQ_REMOVE(area_head, area, link); 840 841 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 842 if (pmem->area == area) { 843 area_set_entry(area, pmem->pgidx, 0, 0); 844 tlbi_mva_allasid(area_idx2va(area, pmem->pgidx)); 845 pgt_dec_used_entries(area->pgt); 846 pmem->area = NULL; 847 pmem->pgidx = INVALID_PGIDX; 848 } 849 } 850 851 pager_unlock(exceptions); 852 free_area(area); 853 } 854 KEEP_PAGER(rem_area); 855 856 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base, 857 size_t size) 858 { 859 struct tee_pager_area *area; 860 struct tee_pager_area *next_a; 861 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 862 863 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 864 if (core_is_buffer_inside(area->base, area->size, base, s)) 865 rem_area(utc->areas, area); 866 } 867 } 868 869 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc) 870 { 871 struct tee_pager_area *area; 872 873 if (!utc->areas) 874 return; 875 876 while (true) { 877 area = TAILQ_FIRST(utc->areas); 878 if (!area) 879 break; 880 TAILQ_REMOVE(utc->areas, area, link); 881 free_area(area); 882 } 883 884 free(utc->areas); 885 } 886 887 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base, 888 size_t size, uint32_t flags) 889 { 890 bool ret; 891 vaddr_t b = base; 892 size_t s = size; 893 size_t s2; 894 struct tee_pager_area *area = find_area(utc->areas, b); 895 uint32_t exceptions; 896 struct tee_pager_pmem *pmem; 897 paddr_t pa; 898 uint32_t a; 899 uint32_t f; 900 901 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 902 if (f & TEE_MATTR_UW) 903 f |= TEE_MATTR_PW; 904 f = get_area_mattr(f); 905 906 exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 907 908 while (s) { 909 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 910 if (!area || area->base != b || area->size != s2) { 911 ret = false; 912 goto out; 913 } 914 b += s2; 915 s -= s2; 916 917 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 918 if (pmem->area != area) 919 continue; 920 area_get_entry(pmem->area, pmem->pgidx, &pa, &a); 921 if (a & TEE_MATTR_VALID_BLOCK) 922 assert(pa == get_pmem_pa(pmem)); 923 else 924 pa = get_pmem_pa(pmem); 925 if (a == f) 926 continue; 927 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 928 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 929 if (!(flags & TEE_MATTR_UW)) 930 tee_pager_save_page(pmem, a); 931 932 area_set_entry(pmem->area, pmem->pgidx, pa, f); 933 /* 934 * Make sure the table update is visible before 935 * continuing. 936 */ 937 dsb_ishst(); 938 939 if (flags & TEE_MATTR_UX) { 940 void *va = (void *)area_idx2va(pmem->area, 941 pmem->pgidx); 942 943 cache_op_inner(DCACHE_AREA_CLEAN, va, 944 SMALL_PAGE_SIZE); 945 cache_op_inner(ICACHE_AREA_INVALIDATE, va, 946 SMALL_PAGE_SIZE); 947 } 948 } 949 950 area->flags = f; 951 area = TAILQ_NEXT(area, link); 952 } 953 954 ret = true; 955 out: 956 pager_unlock(exceptions); 957 return ret; 958 } 959 KEEP_PAGER(tee_pager_set_uta_area_attr); 960 #endif /*CFG_PAGED_USER_TA*/ 961 962 static bool tee_pager_unhide_page(vaddr_t page_va) 963 { 964 struct tee_pager_pmem *pmem; 965 966 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 967 paddr_t pa; 968 uint32_t attr; 969 970 if (pmem->pgidx == INVALID_PGIDX) 971 continue; 972 973 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 974 975 if (!(attr & 976 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 977 continue; 978 979 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) { 980 uint32_t a = get_area_mattr(pmem->area->flags); 981 982 /* page is hidden, show and move to back */ 983 if (pa != get_pmem_pa(pmem)) 984 panic("unexpected pa"); 985 986 /* 987 * If it's not a dirty block, then it should be 988 * read only. 989 */ 990 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 991 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 992 else 993 FMSG("Unhide %#" PRIxVA, page_va); 994 995 area_set_entry(pmem->area, pmem->pgidx, pa, a); 996 /* 997 * Note that TLB invalidation isn't needed since 998 * there wasn't a valid mapping before. We should 999 * use a barrier though, to make sure that the 1000 * change is visible. 1001 */ 1002 dsb_ishst(); 1003 1004 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1005 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1006 incr_hidden_hits(); 1007 return true; 1008 } 1009 } 1010 1011 return false; 1012 } 1013 1014 static void tee_pager_hide_pages(void) 1015 { 1016 struct tee_pager_pmem *pmem; 1017 size_t n = 0; 1018 1019 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1020 paddr_t pa; 1021 uint32_t attr; 1022 uint32_t a; 1023 1024 if (n >= TEE_PAGER_NHIDE) 1025 break; 1026 n++; 1027 1028 /* we cannot hide pages when pmem->area is not defined. */ 1029 if (!pmem->area) 1030 continue; 1031 1032 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 1033 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1034 continue; 1035 1036 assert(pa == get_pmem_pa(pmem)); 1037 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 1038 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 1039 FMSG("Hide %#" PRIxVA, 1040 area_idx2va(pmem->area, pmem->pgidx)); 1041 } else 1042 a = TEE_MATTR_HIDDEN_BLOCK; 1043 1044 area_set_entry(pmem->area, pmem->pgidx, pa, a); 1045 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1046 } 1047 } 1048 1049 /* 1050 * Find mapped pmem, hide and move to pageble pmem. 1051 * Return false if page was not mapped, and true if page was mapped. 1052 */ 1053 static bool tee_pager_release_one_phys(struct tee_pager_area *area, 1054 vaddr_t page_va) 1055 { 1056 struct tee_pager_pmem *pmem; 1057 unsigned pgidx; 1058 paddr_t pa; 1059 uint32_t attr; 1060 1061 pgidx = area_va2idx(area, page_va); 1062 area_get_entry(area, pgidx, &pa, &attr); 1063 1064 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 1065 1066 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1067 if (pmem->area != area || pmem->pgidx != pgidx) 1068 continue; 1069 1070 assert(pa == get_pmem_pa(pmem)); 1071 area_set_entry(area, pgidx, 0, 0); 1072 pgt_dec_used_entries(area->pgt); 1073 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1074 pmem->area = NULL; 1075 pmem->pgidx = INVALID_PGIDX; 1076 tee_pager_npages++; 1077 set_npages(); 1078 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1079 incr_zi_released(); 1080 return true; 1081 } 1082 1083 return false; 1084 } 1085 1086 /* Finds the oldest page and unmats it from its old virtual address */ 1087 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area) 1088 { 1089 struct tee_pager_pmem *pmem; 1090 1091 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1092 if (!pmem) { 1093 EMSG("No pmem entries"); 1094 return NULL; 1095 } 1096 if (pmem->pgidx != INVALID_PGIDX) { 1097 uint32_t a; 1098 1099 assert(pmem->area && pmem->area->pgt); 1100 area_get_entry(pmem->area, pmem->pgidx, NULL, &a); 1101 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1102 pgt_dec_used_entries(pmem->area->pgt); 1103 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1104 tee_pager_save_page(pmem, a); 1105 } 1106 1107 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1108 pmem->pgidx = INVALID_PGIDX; 1109 pmem->area = NULL; 1110 if (area->type == PAGER_AREA_TYPE_LOCK) { 1111 /* Move page to lock list */ 1112 if (tee_pager_npages <= 0) 1113 panic("running out of page"); 1114 tee_pager_npages--; 1115 set_npages(); 1116 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1117 } else { 1118 /* move page to back */ 1119 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1120 } 1121 1122 return pmem; 1123 } 1124 1125 static bool pager_update_permissions(struct tee_pager_area *area, 1126 struct abort_info *ai, bool *handled) 1127 { 1128 unsigned int pgidx = area_va2idx(area, ai->va); 1129 uint32_t attr; 1130 paddr_t pa; 1131 1132 *handled = false; 1133 1134 area_get_entry(area, pgidx, &pa, &attr); 1135 1136 /* Not mapped */ 1137 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1138 return false; 1139 1140 /* Not readable, should not happen */ 1141 if (abort_is_user_exception(ai)) { 1142 if (!(attr & TEE_MATTR_UR)) 1143 return true; 1144 } else { 1145 if (!(attr & TEE_MATTR_PR)) { 1146 abort_print_error(ai); 1147 panic(); 1148 } 1149 } 1150 1151 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1152 case CORE_MMU_FAULT_TRANSLATION: 1153 case CORE_MMU_FAULT_READ_PERMISSION: 1154 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1155 /* Check attempting to execute from an NOX page */ 1156 if (abort_is_user_exception(ai)) { 1157 if (!(attr & TEE_MATTR_UX)) 1158 return true; 1159 } else { 1160 if (!(attr & TEE_MATTR_PX)) { 1161 abort_print_error(ai); 1162 panic(); 1163 } 1164 } 1165 } 1166 /* Since the page is mapped now it's OK */ 1167 break; 1168 case CORE_MMU_FAULT_WRITE_PERMISSION: 1169 /* Check attempting to write to an RO page */ 1170 if (abort_is_user_exception(ai)) { 1171 if (!(area->flags & TEE_MATTR_UW)) 1172 return true; 1173 if (!(attr & TEE_MATTR_UW)) { 1174 FMSG("Dirty %p", 1175 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1176 area_set_entry(area, pgidx, pa, 1177 get_area_mattr(area->flags)); 1178 tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK); 1179 } 1180 1181 } else { 1182 if (!(area->flags & TEE_MATTR_PW)) { 1183 abort_print_error(ai); 1184 panic(); 1185 } 1186 if (!(attr & TEE_MATTR_PW)) { 1187 FMSG("Dirty %p", 1188 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1189 area_set_entry(area, pgidx, pa, 1190 get_area_mattr(area->flags)); 1191 tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK); 1192 } 1193 } 1194 /* Since permissions has been updated now it's OK */ 1195 break; 1196 default: 1197 /* Some fault we can't deal with */ 1198 if (abort_is_user_exception(ai)) 1199 return true; 1200 abort_print_error(ai); 1201 panic(); 1202 } 1203 *handled = true; 1204 return true; 1205 } 1206 1207 #ifdef CFG_TEE_CORE_DEBUG 1208 static void stat_handle_fault(void) 1209 { 1210 static size_t num_faults; 1211 static size_t min_npages = SIZE_MAX; 1212 static size_t total_min_npages = SIZE_MAX; 1213 1214 num_faults++; 1215 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1216 DMSG("nfaults %zu npages %zu (min %zu)", 1217 num_faults, tee_pager_npages, min_npages); 1218 min_npages = tee_pager_npages; /* reset */ 1219 } 1220 if (tee_pager_npages < min_npages) 1221 min_npages = tee_pager_npages; 1222 if (tee_pager_npages < total_min_npages) 1223 total_min_npages = tee_pager_npages; 1224 } 1225 #else 1226 static void stat_handle_fault(void) 1227 { 1228 } 1229 #endif 1230 1231 bool tee_pager_handle_fault(struct abort_info *ai) 1232 { 1233 struct tee_pager_area *area; 1234 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1235 uint32_t exceptions; 1236 bool ret; 1237 1238 #ifdef TEE_PAGER_DEBUG_PRINT 1239 abort_print(ai); 1240 #endif 1241 1242 /* 1243 * We're updating pages that can affect several active CPUs at a 1244 * time below. We end up here because a thread tries to access some 1245 * memory that isn't available. We have to be careful when making 1246 * that memory available as other threads may succeed in accessing 1247 * that address the moment after we've made it available. 1248 * 1249 * That means that we can't just map the memory and populate the 1250 * page, instead we use the aliased mapping to populate the page 1251 * and once everything is ready we map it. 1252 */ 1253 exceptions = pager_lock(ai); 1254 1255 stat_handle_fault(); 1256 1257 /* check if the access is valid */ 1258 if (abort_is_user_exception(ai)) { 1259 area = find_uta_area(ai->va); 1260 1261 } else { 1262 area = find_area(&tee_pager_area_head, ai->va); 1263 if (!area) 1264 area = find_uta_area(ai->va); 1265 } 1266 if (!area || !area->pgt) { 1267 ret = false; 1268 goto out; 1269 } 1270 1271 if (!tee_pager_unhide_page(page_va)) { 1272 struct tee_pager_pmem *pmem = NULL; 1273 uint32_t attr; 1274 paddr_t pa; 1275 1276 /* 1277 * The page wasn't hidden, but some other core may have 1278 * updated the table entry before we got here or we need 1279 * to make a read-only page read-write (dirty). 1280 */ 1281 if (pager_update_permissions(area, ai, &ret)) { 1282 /* 1283 * Nothing more to do with the abort. The problem 1284 * could already have been dealt with from another 1285 * core or if ret is false the TA will be paniced. 1286 */ 1287 goto out; 1288 } 1289 1290 pmem = tee_pager_get_page(area); 1291 if (!pmem) { 1292 abort_print(ai); 1293 panic(); 1294 } 1295 1296 /* load page code & data */ 1297 tee_pager_load_page(area, page_va, pmem->va_alias); 1298 1299 1300 pmem->area = area; 1301 pmem->pgidx = area_va2idx(area, ai->va); 1302 attr = get_area_mattr(area->flags) & 1303 ~(TEE_MATTR_PW | TEE_MATTR_UW); 1304 pa = get_pmem_pa(pmem); 1305 1306 /* 1307 * We've updated the page using the aliased mapping and 1308 * some cache maintenence is now needed if it's an 1309 * executable page. 1310 * 1311 * Since the d-cache is a Physically-indexed, 1312 * physically-tagged (PIPT) cache we can clean either the 1313 * aliased address or the real virtual address. In this 1314 * case we choose the real virtual address. 1315 * 1316 * The i-cache can also be PIPT, but may be something else 1317 * too like VIPT. The current code requires the caches to 1318 * implement the IVIPT extension, that is: 1319 * "instruction cache maintenance is required only after 1320 * writing new data to a physical address that holds an 1321 * instruction." 1322 * 1323 * To portably invalidate the icache the page has to 1324 * be mapped at the final virtual address but not 1325 * executable. 1326 */ 1327 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1328 uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX | 1329 TEE_MATTR_PW | TEE_MATTR_UW; 1330 1331 /* Set a temporary read-only mapping */ 1332 area_set_entry(pmem->area, pmem->pgidx, pa, 1333 attr & ~mask); 1334 tlbi_mva_allasid(page_va); 1335 1336 /* 1337 * Doing these operations to LoUIS (Level of 1338 * unification, Inner Shareable) would be enough 1339 */ 1340 cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va, 1341 SMALL_PAGE_SIZE); 1342 cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va, 1343 SMALL_PAGE_SIZE); 1344 1345 /* Set the final mapping */ 1346 area_set_entry(area, pmem->pgidx, pa, attr); 1347 tlbi_mva_allasid(page_va); 1348 } else { 1349 area_set_entry(area, pmem->pgidx, pa, attr); 1350 /* 1351 * No need to flush TLB for this entry, it was 1352 * invalid. We should use a barrier though, to make 1353 * sure that the change is visible. 1354 */ 1355 dsb_ishst(); 1356 } 1357 pgt_inc_used_entries(area->pgt); 1358 1359 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa); 1360 1361 } 1362 1363 tee_pager_hide_pages(); 1364 ret = true; 1365 out: 1366 pager_unlock(exceptions); 1367 return ret; 1368 } 1369 1370 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1371 { 1372 size_t n; 1373 1374 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1375 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1376 1377 /* setup memory */ 1378 for (n = 0; n < npages; n++) { 1379 struct core_mmu_table_info *ti; 1380 struct tee_pager_pmem *pmem; 1381 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1382 unsigned int pgidx; 1383 paddr_t pa; 1384 uint32_t attr; 1385 1386 ti = find_table_info(va); 1387 pgidx = core_mmu_va2idx(ti, va); 1388 /* 1389 * Note that we can only support adding pages in the 1390 * valid range of this table info, currently not a problem. 1391 */ 1392 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1393 1394 /* Ignore unmapped pages/blocks */ 1395 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1396 continue; 1397 1398 pmem = malloc(sizeof(struct tee_pager_pmem)); 1399 if (!pmem) 1400 panic("out of mem"); 1401 1402 pmem->va_alias = pager_add_alias_page(pa); 1403 1404 if (unmap) { 1405 pmem->area = NULL; 1406 pmem->pgidx = INVALID_PGIDX; 1407 core_mmu_set_entry(ti, pgidx, 0, 0); 1408 pgt_dec_used_entries(find_core_pgt(va)); 1409 } else { 1410 /* 1411 * The page is still mapped, let's assign the area 1412 * and update the protection bits accordingly. 1413 */ 1414 pmem->area = find_area(&tee_pager_area_head, va); 1415 assert(pmem->area->pgt == find_core_pgt(va)); 1416 pmem->pgidx = pgidx; 1417 assert(pa == get_pmem_pa(pmem)); 1418 area_set_entry(pmem->area, pgidx, pa, 1419 get_area_mattr(pmem->area->flags)); 1420 } 1421 1422 tee_pager_npages++; 1423 incr_npages_all(); 1424 set_npages(); 1425 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1426 } 1427 1428 /* 1429 * As this is done at inits, invalidate all TLBs once instead of 1430 * targeting only the modified entries. 1431 */ 1432 tlbi_all(); 1433 } 1434 1435 #ifdef CFG_PAGED_USER_TA 1436 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1437 { 1438 struct pgt *p = pgt; 1439 1440 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1441 p = SLIST_NEXT(p, link); 1442 return p; 1443 } 1444 1445 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc) 1446 { 1447 struct tee_pager_area *area; 1448 struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1449 1450 TAILQ_FOREACH(area, utc->areas, link) { 1451 if (!area->pgt) 1452 area->pgt = find_pgt(pgt, area->base); 1453 else 1454 assert(area->pgt == find_pgt(pgt, area->base)); 1455 if (!area->pgt) 1456 panic(); 1457 } 1458 } 1459 1460 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem) 1461 { 1462 uint32_t attr; 1463 1464 assert(pmem->area && pmem->area->pgt); 1465 1466 area_get_entry(pmem->area, pmem->pgidx, NULL, &attr); 1467 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1468 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1469 tee_pager_save_page(pmem, attr); 1470 assert(pmem->area->pgt->num_used_entries); 1471 pmem->area->pgt->num_used_entries--; 1472 pmem->pgidx = INVALID_PGIDX; 1473 pmem->area = NULL; 1474 } 1475 1476 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1477 { 1478 struct tee_pager_pmem *pmem; 1479 struct tee_pager_area *area; 1480 uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1481 1482 if (!pgt->num_used_entries) 1483 goto out; 1484 1485 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1486 if (!pmem->area || pmem->pgidx == INVALID_PGIDX) 1487 continue; 1488 if (pmem->area->pgt == pgt) 1489 pager_save_and_release_entry(pmem); 1490 } 1491 assert(!pgt->num_used_entries); 1492 1493 out: 1494 if (is_user_ta_ctx(pgt->ctx)) { 1495 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) { 1496 if (area->pgt == pgt) 1497 area->pgt = NULL; 1498 } 1499 } 1500 1501 pager_unlock(exceptions); 1502 } 1503 KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1504 #endif /*CFG_PAGED_USER_TA*/ 1505 1506 void tee_pager_release_phys(void *addr, size_t size) 1507 { 1508 bool unmaped = false; 1509 vaddr_t va = (vaddr_t)addr; 1510 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1511 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1512 struct tee_pager_area *area; 1513 uint32_t exceptions; 1514 1515 if (end <= begin) 1516 return; 1517 1518 exceptions = pager_lock_check_stack(128); 1519 1520 for (va = begin; va < end; va += SMALL_PAGE_SIZE) { 1521 area = find_area(&tee_pager_area_head, va); 1522 if (!area) 1523 panic(); 1524 unmaped |= tee_pager_release_one_phys(area, va); 1525 } 1526 1527 if (unmaped) 1528 tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE); 1529 1530 pager_unlock(exceptions); 1531 } 1532 KEEP_PAGER(tee_pager_release_phys); 1533 1534 void *tee_pager_alloc(size_t size) 1535 { 1536 tee_mm_entry_t *mm = NULL; 1537 uint8_t *smem = NULL; 1538 size_t num_pages = 0; 1539 struct fobj *fobj = NULL; 1540 1541 if (!size) 1542 return NULL; 1543 1544 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 1545 if (!mm) 1546 return NULL; 1547 1548 smem = (uint8_t *)tee_mm_get_smem(mm); 1549 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 1550 fobj = fobj_locked_paged_alloc(num_pages); 1551 if (!fobj) { 1552 tee_mm_free(mm); 1553 return NULL; 1554 } 1555 1556 tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj); 1557 fobj_put(fobj); 1558 1559 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); 1560 1561 return smem; 1562 } 1563