1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <io.h> 10 #include <keep.h> 11 #include <kernel/abort.h> 12 #include <kernel/asan.h> 13 #include <kernel/cache_helpers.h> 14 #include <kernel/linker.h> 15 #include <kernel/panic.h> 16 #include <kernel/spinlock.h> 17 #include <kernel/tee_misc.h> 18 #include <kernel/tee_ta_manager.h> 19 #include <kernel/thread.h> 20 #include <kernel/tlb_helpers.h> 21 #include <kernel/user_mode_ctx.h> 22 #include <mm/core_memprot.h> 23 #include <mm/fobj.h> 24 #include <mm/tee_mm.h> 25 #include <mm/tee_pager.h> 26 #include <stdlib.h> 27 #include <sys/queue.h> 28 #include <tee_api_defines.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 34 35 static struct vm_paged_region_head core_vm_regions = 36 TAILQ_HEAD_INITIALIZER(core_vm_regions); 37 38 #define INVALID_PGIDX UINT_MAX 39 #define PMEM_FLAG_DIRTY BIT(0) 40 #define PMEM_FLAG_HIDDEN BIT(1) 41 42 /* 43 * struct tee_pager_pmem - Represents a physical page used for paging. 44 * 45 * @flags flags defined by PMEM_FLAG_* above 46 * @fobj_pgidx index of the page in the @fobj 47 * @fobj File object of which a page is made visible. 48 * @va_alias Virtual address where the physical page always is aliased. 49 * Used during remapping of the page when the content need to 50 * be updated before it's available at the new location. 51 */ 52 struct tee_pager_pmem { 53 unsigned int flags; 54 unsigned int fobj_pgidx; 55 struct fobj *fobj; 56 void *va_alias; 57 TAILQ_ENTRY(tee_pager_pmem) link; 58 }; 59 60 struct tblidx { 61 struct pgt *pgt; 62 unsigned int idx; 63 }; 64 65 /* The list of physical pages. The first page in the list is the oldest */ 66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 67 68 static struct tee_pager_pmem_head tee_pager_pmem_head = 69 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 70 71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 72 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 73 74 /* number of pages hidden */ 75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 76 77 /* Number of registered physical pages, used hiding pages. */ 78 static size_t tee_pager_npages; 79 80 /* This area covers the IVs for all fobjs with paged IVs */ 81 static struct vm_paged_region *pager_iv_region; 82 /* Used by make_iv_available(), see make_iv_available() for details. */ 83 static struct tee_pager_pmem *pager_spare_pmem; 84 85 #ifdef CFG_WITH_STATS 86 static struct tee_pager_stats pager_stats; 87 88 static inline void incr_ro_hits(void) 89 { 90 pager_stats.ro_hits++; 91 } 92 93 static inline void incr_rw_hits(void) 94 { 95 pager_stats.rw_hits++; 96 } 97 98 static inline void incr_hidden_hits(void) 99 { 100 pager_stats.hidden_hits++; 101 } 102 103 static inline void incr_zi_released(void) 104 { 105 pager_stats.zi_released++; 106 } 107 108 static inline void incr_npages_all(void) 109 { 110 pager_stats.npages_all++; 111 } 112 113 static inline void set_npages(void) 114 { 115 pager_stats.npages = tee_pager_npages; 116 } 117 118 void tee_pager_get_stats(struct tee_pager_stats *stats) 119 { 120 *stats = pager_stats; 121 122 pager_stats.hidden_hits = 0; 123 pager_stats.ro_hits = 0; 124 pager_stats.rw_hits = 0; 125 pager_stats.zi_released = 0; 126 } 127 128 #else /* CFG_WITH_STATS */ 129 static inline void incr_ro_hits(void) { } 130 static inline void incr_rw_hits(void) { } 131 static inline void incr_hidden_hits(void) { } 132 static inline void incr_zi_released(void) { } 133 static inline void incr_npages_all(void) { } 134 static inline void set_npages(void) { } 135 136 void tee_pager_get_stats(struct tee_pager_stats *stats) 137 { 138 memset(stats, 0, sizeof(struct tee_pager_stats)); 139 } 140 #endif /* CFG_WITH_STATS */ 141 142 #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE) 143 #define TBL_LEVEL CORE_MMU_PGDIR_LEVEL 144 #define TBL_SHIFT SMALL_PAGE_SHIFT 145 146 #define EFFECTIVE_VA_SIZE \ 147 (ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \ 148 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE)) 149 150 static struct pager_table { 151 struct pgt pgt; 152 struct core_mmu_table_info tbl_info; 153 } *pager_tables; 154 static unsigned int num_pager_tables; 155 156 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 157 158 /* Defines the range of the alias area */ 159 static tee_mm_entry_t *pager_alias_area; 160 /* 161 * Physical pages are added in a stack like fashion to the alias area, 162 * @pager_alias_next_free gives the address of next free entry if 163 * @pager_alias_next_free is != 0 164 */ 165 static uintptr_t pager_alias_next_free; 166 167 #ifdef CFG_TEE_CORE_DEBUG 168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai) 169 170 static uint32_t pager_lock_dldetect(const char *func, const int line, 171 struct abort_info *ai) 172 { 173 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 174 unsigned int retries = 0; 175 unsigned int reminder = 0; 176 177 while (!cpu_spin_trylock(&pager_spinlock)) { 178 retries++; 179 if (!retries) { 180 /* wrapped, time to report */ 181 trace_printf(func, line, TRACE_ERROR, true, 182 "possible spinlock deadlock reminder %u", 183 reminder); 184 if (reminder < UINT_MAX) 185 reminder++; 186 if (ai) 187 abort_print(ai); 188 } 189 } 190 191 return exceptions; 192 } 193 #else 194 static uint32_t pager_lock(struct abort_info __unused *ai) 195 { 196 return cpu_spin_lock_xsave(&pager_spinlock); 197 } 198 #endif 199 200 static uint32_t pager_lock_check_stack(size_t stack_size) 201 { 202 if (stack_size) { 203 int8_t buf[stack_size]; 204 size_t n; 205 206 /* 207 * Make sure to touch all pages of the stack that we expect 208 * to use with this lock held. We need to take eventual 209 * page faults before the lock is taken or we'll deadlock 210 * the pager. The pages that are populated in this way will 211 * eventually be released at certain save transitions of 212 * the thread. 213 */ 214 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) 215 io_write8((vaddr_t)buf + n, 1); 216 io_write8((vaddr_t)buf + stack_size - 1, 1); 217 } 218 219 return pager_lock(NULL); 220 } 221 222 static void pager_unlock(uint32_t exceptions) 223 { 224 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 225 } 226 227 void *tee_pager_phys_to_virt(paddr_t pa) 228 { 229 struct core_mmu_table_info ti; 230 unsigned idx; 231 uint32_t a; 232 paddr_t p; 233 vaddr_t v; 234 size_t n; 235 236 /* 237 * Most addresses are mapped lineary, try that first if possible. 238 */ 239 if (!tee_pager_get_table_info(pa, &ti)) 240 return NULL; /* impossible pa */ 241 idx = core_mmu_va2idx(&ti, pa); 242 core_mmu_get_entry(&ti, idx, &p, &a); 243 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 244 return (void *)core_mmu_idx2va(&ti, idx); 245 246 n = 0; 247 idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START); 248 while (true) { 249 while (idx < TBL_NUM_ENTRIES) { 250 v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx); 251 if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE)) 252 return NULL; 253 254 core_mmu_get_entry(&pager_tables[n].tbl_info, 255 idx, &p, &a); 256 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 257 return (void *)v; 258 idx++; 259 } 260 261 n++; 262 if (n >= num_pager_tables) 263 return NULL; 264 idx = 0; 265 } 266 267 return NULL; 268 } 269 270 static bool pmem_is_hidden(struct tee_pager_pmem *pmem) 271 { 272 return pmem->flags & PMEM_FLAG_HIDDEN; 273 } 274 275 static bool pmem_is_dirty(struct tee_pager_pmem *pmem) 276 { 277 return pmem->flags & PMEM_FLAG_DIRTY; 278 } 279 280 static bool pmem_is_covered_by_region(struct tee_pager_pmem *pmem, 281 struct vm_paged_region *reg) 282 { 283 if (pmem->fobj != reg->fobj) 284 return false; 285 if (pmem->fobj_pgidx < reg->fobj_pgoffs) 286 return false; 287 if ((pmem->fobj_pgidx - reg->fobj_pgoffs) >= 288 (reg->size >> SMALL_PAGE_SHIFT)) 289 return false; 290 291 return true; 292 } 293 294 static size_t get_pgt_count(vaddr_t base, size_t size) 295 { 296 assert(size); 297 298 return (base + size - 1) / CORE_MMU_PGDIR_SIZE + 1 - 299 base / CORE_MMU_PGDIR_SIZE; 300 } 301 302 static bool region_have_pgt(struct vm_paged_region *reg, struct pgt *pgt) 303 { 304 size_t n = 0; 305 306 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) 307 if (reg->pgt_array[n] == pgt) 308 return true; 309 310 return false; 311 } 312 313 static struct tblidx pmem_get_region_tblidx(struct tee_pager_pmem *pmem, 314 struct vm_paged_region *reg) 315 { 316 size_t tbloffs = (reg->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT; 317 size_t idx = pmem->fobj_pgidx - reg->fobj_pgoffs + tbloffs; 318 319 assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX); 320 assert(idx / TBL_NUM_ENTRIES < get_pgt_count(reg->base, reg->size)); 321 322 return (struct tblidx){ 323 .idx = idx % TBL_NUM_ENTRIES, 324 .pgt = reg->pgt_array[idx / TBL_NUM_ENTRIES], 325 }; 326 } 327 328 static struct pager_table *find_pager_table_may_fail(vaddr_t va) 329 { 330 size_t n; 331 const vaddr_t mask = CORE_MMU_PGDIR_MASK; 332 333 if (!pager_tables) 334 return NULL; 335 336 n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >> 337 CORE_MMU_PGDIR_SHIFT; 338 if (n >= num_pager_tables) 339 return NULL; 340 341 assert(va >= pager_tables[n].tbl_info.va_base && 342 va <= (pager_tables[n].tbl_info.va_base | mask)); 343 344 return pager_tables + n; 345 } 346 347 static struct pager_table *find_pager_table(vaddr_t va) 348 { 349 struct pager_table *pt = find_pager_table_may_fail(va); 350 351 assert(pt); 352 return pt; 353 } 354 355 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti) 356 { 357 struct pager_table *pt = find_pager_table_may_fail(va); 358 359 if (!pt) 360 return false; 361 362 *ti = pt->tbl_info; 363 return true; 364 } 365 366 static struct core_mmu_table_info *find_table_info(vaddr_t va) 367 { 368 return &find_pager_table(va)->tbl_info; 369 } 370 371 static struct pgt *find_core_pgt(vaddr_t va) 372 { 373 return &find_pager_table(va)->pgt; 374 } 375 376 void tee_pager_set_alias_area(tee_mm_entry_t *mm) 377 { 378 struct pager_table *pt; 379 unsigned idx; 380 vaddr_t smem = tee_mm_get_smem(mm); 381 size_t nbytes = tee_mm_get_bytes(mm); 382 vaddr_t v; 383 uint32_t a = 0; 384 385 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 386 387 assert(!pager_alias_area); 388 pager_alias_area = mm; 389 pager_alias_next_free = smem; 390 391 /* Clear all mapping in the alias area */ 392 pt = find_pager_table(smem); 393 idx = core_mmu_va2idx(&pt->tbl_info, smem); 394 while (pt <= (pager_tables + num_pager_tables - 1)) { 395 while (idx < TBL_NUM_ENTRIES) { 396 v = core_mmu_idx2va(&pt->tbl_info, idx); 397 if (v >= (smem + nbytes)) 398 goto out; 399 400 core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a); 401 core_mmu_set_entry(&pt->tbl_info, idx, 0, 0); 402 if (a & TEE_MATTR_VALID_BLOCK) 403 pgt_dec_used_entries(&pt->pgt); 404 idx++; 405 } 406 407 pt++; 408 idx = 0; 409 } 410 411 out: 412 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); 413 } 414 415 static size_t tbl_usage_count(struct core_mmu_table_info *ti) 416 { 417 size_t n; 418 uint32_t a = 0; 419 size_t usage = 0; 420 421 for (n = 0; n < ti->num_entries; n++) { 422 core_mmu_get_entry(ti, n, NULL, &a); 423 if (a & TEE_MATTR_VALID_BLOCK) 424 usage++; 425 } 426 return usage; 427 } 428 429 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr) 430 { 431 assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES); 432 core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx, 433 pa, attr); 434 } 435 436 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr) 437 { 438 assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES); 439 core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx, 440 pa, attr); 441 } 442 443 static struct tblidx region_va2tblidx(struct vm_paged_region *reg, vaddr_t va) 444 { 445 paddr_t mask = CORE_MMU_PGDIR_MASK; 446 size_t n = 0; 447 448 assert(va >= reg->base && va < (reg->base + reg->size)); 449 n = (va - (reg->base & ~mask)) / CORE_MMU_PGDIR_SIZE; 450 451 return (struct tblidx){ 452 .idx = (va & mask) / SMALL_PAGE_SIZE, 453 .pgt = reg->pgt_array[n], 454 }; 455 } 456 457 static vaddr_t tblidx2va(struct tblidx tblidx) 458 { 459 return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT); 460 } 461 462 static void tblidx_tlbi_entry(struct tblidx tblidx) 463 { 464 vaddr_t va = tblidx2va(tblidx); 465 466 #if defined(CFG_PAGED_USER_TA) 467 if (tblidx.pgt->ctx) { 468 uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid; 469 470 tlbi_mva_asid(va, asid); 471 return; 472 } 473 #endif 474 tlbi_mva_allasid(va); 475 } 476 477 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem, 478 struct vm_paged_region *reg, vaddr_t va) 479 { 480 struct tee_pager_pmem *p = NULL; 481 unsigned int fobj_pgidx = 0; 482 483 assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX); 484 485 assert(va >= reg->base && va < (reg->base + reg->size)); 486 fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs; 487 488 TAILQ_FOREACH(p, &tee_pager_pmem_head, link) 489 assert(p->fobj != reg->fobj || p->fobj_pgidx != fobj_pgidx); 490 491 pmem->fobj = reg->fobj; 492 pmem->fobj_pgidx = fobj_pgidx; 493 } 494 495 static void pmem_clear(struct tee_pager_pmem *pmem) 496 { 497 pmem->fobj = NULL; 498 pmem->fobj_pgidx = INVALID_PGIDX; 499 pmem->flags = 0; 500 } 501 502 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt) 503 { 504 struct vm_paged_region *reg = NULL; 505 struct tblidx tblidx = { }; 506 uint32_t a = 0; 507 508 TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) { 509 /* 510 * If only_this_pgt points to a pgt then the pgt of this 511 * region has to match or we'll skip over it. 512 */ 513 if (only_this_pgt && !region_have_pgt(reg, only_this_pgt)) 514 continue; 515 if (!pmem_is_covered_by_region(pmem, reg)) 516 continue; 517 tblidx = pmem_get_region_tblidx(pmem, reg); 518 if (!tblidx.pgt) 519 continue; 520 tblidx_get_entry(tblidx, NULL, &a); 521 if (a & TEE_MATTR_VALID_BLOCK) { 522 tblidx_set_entry(tblidx, 0, 0); 523 pgt_dec_used_entries(tblidx.pgt); 524 tblidx_tlbi_entry(tblidx); 525 } 526 } 527 } 528 529 void tee_pager_early_init(void) 530 { 531 size_t n = 0; 532 533 num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE; 534 pager_tables = calloc(num_pager_tables, sizeof(*pager_tables)); 535 if (!pager_tables) 536 panic("Cannot allocate pager_tables"); 537 538 /* 539 * Note that this depends on add_pager_vaspace() adding vaspace 540 * after end of memory. 541 */ 542 for (n = 0; n < num_pager_tables; n++) { 543 if (!core_mmu_find_table(NULL, VCORE_START_VA + 544 n * CORE_MMU_PGDIR_SIZE, UINT_MAX, 545 &pager_tables[n].tbl_info)) 546 panic("can't find mmu tables"); 547 548 if (pager_tables[n].tbl_info.shift != TBL_SHIFT) 549 panic("Unsupported page size in translation table"); 550 assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES); 551 assert(pager_tables[n].tbl_info.level == TBL_LEVEL); 552 553 pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table; 554 pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base; 555 pgt_set_used_entries(&pager_tables[n].pgt, 556 tbl_usage_count(&pager_tables[n].tbl_info)); 557 } 558 } 559 560 static void *pager_add_alias_page(paddr_t pa) 561 { 562 unsigned idx; 563 struct core_mmu_table_info *ti; 564 /* Alias pages mapped without write permission: runtime will care */ 565 uint32_t attr = TEE_MATTR_VALID_BLOCK | 566 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 567 TEE_MATTR_SECURE | TEE_MATTR_PR; 568 569 DMSG("0x%" PRIxPA, pa); 570 571 ti = find_table_info(pager_alias_next_free); 572 idx = core_mmu_va2idx(ti, pager_alias_next_free); 573 core_mmu_set_entry(ti, idx, pa, attr); 574 pgt_inc_used_entries(find_core_pgt(pager_alias_next_free)); 575 pager_alias_next_free += SMALL_PAGE_SIZE; 576 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 577 tee_mm_get_bytes(pager_alias_area))) 578 pager_alias_next_free = 0; 579 return (void *)core_mmu_idx2va(ti, idx); 580 } 581 582 static void region_insert(struct vm_paged_region_head *regions, 583 struct vm_paged_region *reg, 584 struct vm_paged_region *r_prev) 585 { 586 uint32_t exceptions = pager_lock_check_stack(8); 587 588 if (r_prev) 589 TAILQ_INSERT_AFTER(regions, r_prev, reg, link); 590 else 591 TAILQ_INSERT_HEAD(regions, reg, link); 592 TAILQ_INSERT_TAIL(®->fobj->regions, reg, fobj_link); 593 594 pager_unlock(exceptions); 595 } 596 DECLARE_KEEP_PAGER(region_insert); 597 598 static struct vm_paged_region *alloc_region(vaddr_t base, size_t size) 599 { 600 struct vm_paged_region *reg = NULL; 601 602 if ((base & SMALL_PAGE_MASK) || !size) { 603 EMSG("invalid pager region [%" PRIxVA " +0x%zx]", base, size); 604 panic(); 605 } 606 607 reg = calloc(1, sizeof(*reg)); 608 if (!reg) 609 return NULL; 610 reg->pgt_array = calloc(get_pgt_count(base, size), 611 sizeof(struct pgt *)); 612 if (!reg->pgt_array) { 613 free(reg); 614 return NULL; 615 } 616 617 reg->base = base; 618 reg->size = size; 619 return reg; 620 } 621 622 void tee_pager_add_core_region(vaddr_t base, enum vm_paged_region_type type, 623 struct fobj *fobj) 624 { 625 struct vm_paged_region *reg = NULL; 626 size_t n = 0; 627 628 assert(fobj); 629 630 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", 631 base, base + fobj->num_pages * SMALL_PAGE_SIZE, type); 632 633 reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE); 634 if (!reg) 635 panic("alloc_region"); 636 637 reg->fobj = fobj_get(fobj); 638 reg->fobj_pgoffs = 0; 639 reg->type = type; 640 641 switch (type) { 642 case PAGED_REGION_TYPE_RO: 643 reg->flags = TEE_MATTR_PRX; 644 break; 645 case PAGED_REGION_TYPE_RW: 646 case PAGED_REGION_TYPE_LOCK: 647 reg->flags = TEE_MATTR_PRW; 648 break; 649 default: 650 panic(); 651 } 652 653 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) 654 reg->pgt_array[n] = find_core_pgt(base + 655 n * CORE_MMU_PGDIR_SIZE); 656 region_insert(&core_vm_regions, reg, NULL); 657 } 658 659 static struct vm_paged_region *find_region(struct vm_paged_region_head *regions, 660 vaddr_t va) 661 { 662 struct vm_paged_region *reg; 663 664 if (!regions) 665 return NULL; 666 667 TAILQ_FOREACH(reg, regions, link) { 668 if (core_is_buffer_inside(va, 1, reg->base, reg->size)) 669 return reg; 670 } 671 return NULL; 672 } 673 674 #ifdef CFG_PAGED_USER_TA 675 static struct vm_paged_region *find_uta_region(vaddr_t va) 676 { 677 struct ts_ctx *ctx = thread_get_tsd()->ctx; 678 679 if (!is_user_mode_ctx(ctx)) 680 return NULL; 681 return find_region(to_user_mode_ctx(ctx)->regions, va); 682 } 683 #else 684 static struct vm_paged_region *find_uta_region(vaddr_t va __unused) 685 { 686 return NULL; 687 } 688 #endif /*CFG_PAGED_USER_TA*/ 689 690 691 static uint32_t get_region_mattr(uint32_t reg_flags) 692 { 693 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 694 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 695 (reg_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 696 697 return attr; 698 } 699 700 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 701 { 702 struct core_mmu_table_info *ti; 703 paddr_t pa; 704 unsigned idx; 705 706 ti = find_table_info((vaddr_t)pmem->va_alias); 707 idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias); 708 core_mmu_get_entry(ti, idx, &pa, NULL); 709 return pa; 710 } 711 712 #ifdef CFG_PAGED_USER_TA 713 static void unlink_region(struct vm_paged_region_head *regions, 714 struct vm_paged_region *reg) 715 { 716 uint32_t exceptions = pager_lock_check_stack(64); 717 718 TAILQ_REMOVE(regions, reg, link); 719 TAILQ_REMOVE(®->fobj->regions, reg, fobj_link); 720 721 pager_unlock(exceptions); 722 } 723 DECLARE_KEEP_PAGER(unlink_region); 724 725 static void free_region(struct vm_paged_region *reg) 726 { 727 fobj_put(reg->fobj); 728 free(reg->pgt_array); 729 free(reg); 730 } 731 732 static TEE_Result pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base, 733 struct fobj *fobj, uint32_t prot) 734 { 735 struct vm_paged_region *r_prev = NULL; 736 struct vm_paged_region *reg = NULL; 737 vaddr_t b = base; 738 size_t fobj_pgoffs = 0; 739 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 740 741 if (!uctx->regions) { 742 uctx->regions = malloc(sizeof(*uctx->regions)); 743 if (!uctx->regions) 744 return TEE_ERROR_OUT_OF_MEMORY; 745 TAILQ_INIT(uctx->regions); 746 } 747 748 reg = TAILQ_FIRST(uctx->regions); 749 while (reg) { 750 if (core_is_buffer_intersect(b, s, reg->base, reg->size)) 751 return TEE_ERROR_BAD_PARAMETERS; 752 if (b < reg->base) 753 break; 754 r_prev = reg; 755 reg = TAILQ_NEXT(reg, link); 756 } 757 758 reg = alloc_region(b, s); 759 if (!reg) 760 return TEE_ERROR_OUT_OF_MEMORY; 761 762 /* Table info will be set when the context is activated. */ 763 reg->fobj = fobj_get(fobj); 764 reg->fobj_pgoffs = fobj_pgoffs; 765 reg->type = PAGED_REGION_TYPE_RW; 766 reg->flags = prot; 767 768 region_insert(uctx->regions, reg, r_prev); 769 770 return TEE_SUCCESS; 771 } 772 773 static void map_pgts(struct vm_paged_region *reg) 774 { 775 struct core_mmu_table_info dir_info = { NULL }; 776 size_t n = 0; 777 778 core_mmu_get_user_pgdir(&dir_info); 779 780 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) { 781 struct pgt *pgt = reg->pgt_array[n]; 782 uint32_t attr = 0; 783 paddr_t pa = 0; 784 size_t idx = 0; 785 786 idx = core_mmu_va2idx(&dir_info, pgt->vabase); 787 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 788 789 /* 790 * Check if the page table already is used, if it is, it's 791 * already registered. 792 */ 793 if (pgt->num_used_entries) { 794 assert(attr & TEE_MATTR_TABLE); 795 assert(pa == virt_to_phys(pgt->tbl)); 796 continue; 797 } 798 799 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 800 pa = virt_to_phys(pgt->tbl); 801 assert(pa); 802 /* 803 * Note that the update of the table entry is guaranteed to 804 * be atomic. 805 */ 806 core_mmu_set_entry(&dir_info, idx, pa, attr); 807 } 808 } 809 810 TEE_Result tee_pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base, 811 struct fobj *fobj, uint32_t prot) 812 { 813 TEE_Result res = TEE_SUCCESS; 814 struct thread_specific_data *tsd = thread_get_tsd(); 815 struct vm_paged_region *reg = NULL; 816 817 res = pager_add_um_region(uctx, base, fobj, prot); 818 if (res) 819 return res; 820 821 if (uctx->ts_ctx == tsd->ctx) { 822 /* 823 * We're chaning the currently active utc. Assign page 824 * tables to the new regions and make sure that the page 825 * tables are registered in the upper table. 826 */ 827 tee_pager_assign_um_tables(uctx); 828 TAILQ_FOREACH(reg, uctx->regions, link) 829 map_pgts(reg); 830 } 831 832 return TEE_SUCCESS; 833 } 834 835 static void split_region(struct vm_paged_region_head *regions, 836 struct vm_paged_region *reg, 837 struct vm_paged_region *r2, vaddr_t va) 838 { 839 uint32_t exceptions = pager_lock_check_stack(64); 840 size_t diff = va - reg->base; 841 size_t r2_pgt_count = 0; 842 size_t reg_pgt_count = 0; 843 size_t n0 = 0; 844 size_t n = 0; 845 846 assert(r2->base == va); 847 assert(r2->size == reg->size - diff); 848 849 r2->fobj = fobj_get(reg->fobj); 850 r2->fobj_pgoffs = reg->fobj_pgoffs + diff / SMALL_PAGE_SIZE; 851 r2->type = reg->type; 852 r2->flags = reg->flags; 853 854 r2_pgt_count = get_pgt_count(r2->base, r2->size); 855 reg_pgt_count = get_pgt_count(reg->base, reg->size); 856 n0 = reg_pgt_count - r2_pgt_count; 857 for (n = n0; n < reg_pgt_count; n++) 858 r2->pgt_array[n - n0] = reg->pgt_array[n]; 859 reg->size = diff; 860 861 TAILQ_INSERT_AFTER(regions, reg, r2, link); 862 TAILQ_INSERT_AFTER(®->fobj->regions, reg, r2, fobj_link); 863 864 pager_unlock(exceptions); 865 } 866 DECLARE_KEEP_PAGER(split_region); 867 868 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va) 869 { 870 struct vm_paged_region *reg = NULL; 871 struct vm_paged_region *r2 = NULL; 872 873 if (va & SMALL_PAGE_MASK) 874 return TEE_ERROR_BAD_PARAMETERS; 875 876 TAILQ_FOREACH(reg, uctx->regions, link) { 877 if (va == reg->base || va == reg->base + reg->size) 878 return TEE_SUCCESS; 879 if (va > reg->base && va < reg->base + reg->size) { 880 size_t diff = va - reg->base; 881 882 r2 = alloc_region(va, reg->size - diff); 883 if (!r2) 884 return TEE_ERROR_OUT_OF_MEMORY; 885 split_region(uctx->regions, reg, r2, va); 886 return TEE_SUCCESS; 887 } 888 } 889 890 return TEE_SUCCESS; 891 } 892 893 static struct pgt ** 894 merge_region_with_next(struct vm_paged_region_head *regions, 895 struct vm_paged_region *reg, 896 struct vm_paged_region *r_next, struct pgt **pgt_array) 897 { 898 uint32_t exceptions = pager_lock_check_stack(64); 899 struct pgt **old_pgt_array = reg->pgt_array; 900 901 reg->pgt_array = pgt_array; 902 TAILQ_REMOVE(regions, r_next, link); 903 TAILQ_REMOVE(&r_next->fobj->regions, r_next, fobj_link); 904 905 pager_unlock(exceptions); 906 return old_pgt_array; 907 } 908 DECLARE_KEEP_PAGER(merge_region_with_next); 909 910 static struct pgt **alloc_merged_pgt_array(struct vm_paged_region *a, 911 struct vm_paged_region *a_next) 912 { 913 size_t a_next_pgt_count = get_pgt_count(a_next->base, a_next->size); 914 size_t a_pgt_count = get_pgt_count(a->base, a->size); 915 size_t pgt_count = get_pgt_count(a->base, a->size + a_next->size); 916 struct pgt **pgt_array = NULL; 917 bool have_shared_pgt = false; 918 919 have_shared_pgt = ((a->base + a->size) & ~CORE_MMU_PGDIR_MASK) == 920 (a_next->base & ~CORE_MMU_PGDIR_MASK); 921 922 if (have_shared_pgt) 923 assert(pgt_count == a_pgt_count + a_next_pgt_count - 1); 924 else 925 assert(pgt_count == a_pgt_count + a_next_pgt_count); 926 927 /* In case there's a shared pgt they must match */ 928 if (have_shared_pgt && 929 a->pgt_array[a_pgt_count - 1] != a_next->pgt_array[0]) 930 return NULL; 931 932 pgt_array = calloc(sizeof(struct pgt *), pgt_count); 933 if (!pgt_array) 934 return NULL; 935 936 /* 937 * Copy and merge the two pgt_arrays, note the special case 938 * where a pgt is shared. 939 */ 940 memcpy(pgt_array, a->pgt_array, a_pgt_count * sizeof(struct pgt *)); 941 if (have_shared_pgt) 942 memcpy(pgt_array + a_pgt_count, a_next->pgt_array + 1, 943 (a_next_pgt_count - 1) * sizeof(struct pgt *)); 944 else 945 memcpy(pgt_array + a_pgt_count, a_next->pgt_array, 946 a_next_pgt_count * sizeof(struct pgt *)); 947 948 return pgt_array; 949 } 950 951 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va, 952 size_t len) 953 { 954 struct vm_paged_region *r_next = NULL; 955 struct vm_paged_region *reg = NULL; 956 struct pgt **pgt_array = NULL; 957 vaddr_t end_va = 0; 958 959 if ((va | len) & SMALL_PAGE_MASK) 960 return; 961 if (ADD_OVERFLOW(va, len, &end_va)) 962 return; 963 964 for (reg = TAILQ_FIRST(uctx->regions);; reg = r_next) { 965 r_next = TAILQ_NEXT(reg, link); 966 if (!r_next) 967 return; 968 969 /* Try merging with the area just before va */ 970 if (reg->base + reg->size < va) 971 continue; 972 973 /* 974 * If reg->base is well past our range we're done. 975 * Note that if it's just the page after our range we'll 976 * try to merge. 977 */ 978 if (reg->base > end_va) 979 return; 980 981 if (reg->base + reg->size != r_next->base) 982 continue; 983 if (reg->fobj != r_next->fobj || reg->type != r_next->type || 984 reg->flags != r_next->flags) 985 continue; 986 if (reg->fobj_pgoffs + reg->size / SMALL_PAGE_SIZE != 987 r_next->fobj_pgoffs) 988 continue; 989 990 pgt_array = alloc_merged_pgt_array(reg, r_next); 991 if (!pgt_array) 992 continue; 993 994 /* 995 * merge_region_with_next() returns the old pgt array which 996 * was replaced in reg. We don't want to call free() 997 * directly from merge_region_with_next() that would pull 998 * free() and its dependencies into the unpaged area. 999 */ 1000 free(merge_region_with_next(uctx->regions, reg, r_next, 1001 pgt_array)); 1002 free_region(r_next); 1003 r_next = reg; 1004 } 1005 } 1006 1007 static void rem_region(struct vm_paged_region_head *regions, 1008 struct vm_paged_region *reg) 1009 { 1010 struct tee_pager_pmem *pmem; 1011 size_t last_pgoffs = reg->fobj_pgoffs + 1012 (reg->size >> SMALL_PAGE_SHIFT) - 1; 1013 uint32_t exceptions; 1014 struct tblidx tblidx = { }; 1015 uint32_t a = 0; 1016 1017 exceptions = pager_lock_check_stack(64); 1018 1019 TAILQ_REMOVE(regions, reg, link); 1020 TAILQ_REMOVE(®->fobj->regions, reg, fobj_link); 1021 1022 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1023 if (pmem->fobj != reg->fobj || 1024 pmem->fobj_pgidx < reg->fobj_pgoffs || 1025 pmem->fobj_pgidx > last_pgoffs) 1026 continue; 1027 1028 tblidx = pmem_get_region_tblidx(pmem, reg); 1029 tblidx_get_entry(tblidx, NULL, &a); 1030 if (!(a & TEE_MATTR_VALID_BLOCK)) 1031 continue; 1032 1033 tblidx_set_entry(tblidx, 0, 0); 1034 tblidx_tlbi_entry(tblidx); 1035 pgt_dec_used_entries(tblidx.pgt); 1036 } 1037 1038 pager_unlock(exceptions); 1039 1040 free_region(reg); 1041 } 1042 DECLARE_KEEP_PAGER(rem_region); 1043 1044 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base, 1045 size_t size) 1046 { 1047 struct vm_paged_region *reg; 1048 struct vm_paged_region *r_next; 1049 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 1050 1051 TAILQ_FOREACH_SAFE(reg, uctx->regions, link, r_next) { 1052 if (core_is_buffer_inside(reg->base, reg->size, base, s)) 1053 rem_region(uctx->regions, reg); 1054 } 1055 tlbi_asid(uctx->vm_info.asid); 1056 } 1057 1058 void tee_pager_rem_um_regions(struct user_mode_ctx *uctx) 1059 { 1060 struct vm_paged_region *reg = NULL; 1061 1062 if (!uctx->regions) 1063 return; 1064 1065 while (true) { 1066 reg = TAILQ_FIRST(uctx->regions); 1067 if (!reg) 1068 break; 1069 unlink_region(uctx->regions, reg); 1070 free_region(reg); 1071 } 1072 1073 free(uctx->regions); 1074 } 1075 1076 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem) 1077 { 1078 struct vm_paged_region *reg = TAILQ_FIRST(&pmem->fobj->regions); 1079 void *ctx = reg->pgt_array[0]->ctx; 1080 1081 do { 1082 reg = TAILQ_NEXT(reg, fobj_link); 1083 if (!reg) 1084 return true; 1085 } while (reg->pgt_array[0]->ctx == ctx); 1086 1087 return false; 1088 } 1089 1090 bool tee_pager_set_um_region_attr(struct user_mode_ctx *uctx, vaddr_t base, 1091 size_t size, uint32_t flags) 1092 { 1093 bool ret = false; 1094 vaddr_t b = base; 1095 size_t s = size; 1096 size_t s2 = 0; 1097 struct vm_paged_region *reg = find_region(uctx->regions, b); 1098 uint32_t exceptions = 0; 1099 struct tee_pager_pmem *pmem = NULL; 1100 uint32_t a = 0; 1101 uint32_t f = 0; 1102 uint32_t mattr = 0; 1103 uint32_t f2 = 0; 1104 struct tblidx tblidx = { }; 1105 1106 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 1107 if (f & TEE_MATTR_UW) 1108 f |= TEE_MATTR_PW; 1109 mattr = get_region_mattr(f); 1110 1111 exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1112 1113 while (s) { 1114 if (!reg) { 1115 ret = false; 1116 goto out; 1117 } 1118 s2 = MIN(reg->size, s); 1119 b += s2; 1120 s -= s2; 1121 1122 if (reg->flags == f) 1123 goto next_region; 1124 1125 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1126 if (!pmem_is_covered_by_region(pmem, reg)) 1127 continue; 1128 1129 tblidx = pmem_get_region_tblidx(pmem, reg); 1130 tblidx_get_entry(tblidx, NULL, &a); 1131 if (a == f) 1132 continue; 1133 tblidx_set_entry(tblidx, 0, 0); 1134 tblidx_tlbi_entry(tblidx); 1135 1136 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1137 if (pmem_is_dirty(pmem)) 1138 f2 = mattr; 1139 else 1140 f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW); 1141 tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2); 1142 if (!(a & TEE_MATTR_VALID_BLOCK)) 1143 pgt_inc_used_entries(tblidx.pgt); 1144 /* 1145 * Make sure the table update is visible before 1146 * continuing. 1147 */ 1148 dsb_ishst(); 1149 1150 /* 1151 * Here's a problem if this page already is shared. 1152 * We need do icache invalidate for each context 1153 * in which it is shared. In practice this will 1154 * never happen. 1155 */ 1156 if (flags & TEE_MATTR_UX) { 1157 void *va = (void *)tblidx2va(tblidx); 1158 1159 /* Assert that the pmem isn't shared. */ 1160 assert(same_context(pmem)); 1161 1162 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1163 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1164 } 1165 } 1166 1167 reg->flags = f; 1168 next_region: 1169 reg = TAILQ_NEXT(reg, link); 1170 } 1171 1172 ret = true; 1173 out: 1174 pager_unlock(exceptions); 1175 return ret; 1176 } 1177 1178 DECLARE_KEEP_PAGER(tee_pager_set_um_region_attr); 1179 #endif /*CFG_PAGED_USER_TA*/ 1180 1181 void tee_pager_invalidate_fobj(struct fobj *fobj) 1182 { 1183 struct tee_pager_pmem *pmem; 1184 uint32_t exceptions; 1185 1186 exceptions = pager_lock_check_stack(64); 1187 1188 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) 1189 if (pmem->fobj == fobj) 1190 pmem_clear(pmem); 1191 1192 pager_unlock(exceptions); 1193 } 1194 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj); 1195 1196 static struct tee_pager_pmem *pmem_find(struct vm_paged_region *reg, vaddr_t va) 1197 { 1198 struct tee_pager_pmem *pmem = NULL; 1199 size_t fobj_pgidx = 0; 1200 1201 assert(va >= reg->base && va < (reg->base + reg->size)); 1202 fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs; 1203 1204 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) 1205 if (pmem->fobj == reg->fobj && pmem->fobj_pgidx == fobj_pgidx) 1206 return pmem; 1207 1208 return NULL; 1209 } 1210 1211 static bool tee_pager_unhide_page(struct vm_paged_region *reg, vaddr_t page_va) 1212 { 1213 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1214 struct tee_pager_pmem *pmem = pmem_find(reg, page_va); 1215 uint32_t a = get_region_mattr(reg->flags); 1216 uint32_t attr = 0; 1217 paddr_t pa = 0; 1218 1219 if (!pmem) 1220 return false; 1221 1222 tblidx_get_entry(tblidx, NULL, &attr); 1223 if (attr & TEE_MATTR_VALID_BLOCK) 1224 return false; 1225 1226 /* 1227 * The page is hidden, or not not mapped yet. Unhide the page and 1228 * move it to the tail. 1229 * 1230 * Since the page isn't mapped there doesn't exist a valid TLB entry 1231 * for this address, so no TLB invalidation is required after setting 1232 * the new entry. A DSB is needed though, to make the write visible. 1233 * 1234 * For user executable pages it's more complicated. Those pages can 1235 * be shared between multiple TA mappings and thus populated by 1236 * another TA. The reference manual states that: 1237 * 1238 * "instruction cache maintenance is required only after writing 1239 * new data to a physical address that holds an instruction." 1240 * 1241 * So for hidden pages we would not need to invalidate i-cache, but 1242 * for newly populated pages we do. Since we don't know which we 1243 * have to assume the worst and always invalidate the i-cache. We 1244 * don't need to clean the d-cache though, since that has already 1245 * been done earlier. 1246 * 1247 * Additional bookkeeping to tell if the i-cache invalidation is 1248 * needed or not is left as a future optimization. 1249 */ 1250 1251 /* If it's not a dirty block, then it should be read only. */ 1252 if (!pmem_is_dirty(pmem)) 1253 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1254 1255 pa = get_pmem_pa(pmem); 1256 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1257 if (reg->flags & TEE_MATTR_UX) { 1258 void *va = (void *)tblidx2va(tblidx); 1259 1260 /* Set a temporary read-only mapping */ 1261 assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW))); 1262 tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX); 1263 dsb_ishst(); 1264 1265 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1266 1267 /* Set the final mapping */ 1268 tblidx_set_entry(tblidx, pa, a); 1269 tblidx_tlbi_entry(tblidx); 1270 } else { 1271 tblidx_set_entry(tblidx, pa, a); 1272 dsb_ishst(); 1273 } 1274 pgt_inc_used_entries(tblidx.pgt); 1275 1276 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1277 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1278 incr_hidden_hits(); 1279 return true; 1280 } 1281 1282 static void tee_pager_hide_pages(void) 1283 { 1284 struct tee_pager_pmem *pmem = NULL; 1285 size_t n = 0; 1286 1287 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1288 if (n >= TEE_PAGER_NHIDE) 1289 break; 1290 n++; 1291 1292 /* we cannot hide pages when pmem->fobj is not defined. */ 1293 if (!pmem->fobj) 1294 continue; 1295 1296 if (pmem_is_hidden(pmem)) 1297 continue; 1298 1299 pmem->flags |= PMEM_FLAG_HIDDEN; 1300 pmem_unmap(pmem, NULL); 1301 } 1302 } 1303 1304 static unsigned int __maybe_unused 1305 num_regions_with_pmem(struct tee_pager_pmem *pmem) 1306 { 1307 struct vm_paged_region *reg = NULL; 1308 unsigned int num_matches = 0; 1309 1310 TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) 1311 if (pmem_is_covered_by_region(pmem, reg)) 1312 num_matches++; 1313 1314 return num_matches; 1315 } 1316 1317 /* 1318 * Find mapped pmem, hide and move to pageble pmem. 1319 * Return false if page was not mapped, and true if page was mapped. 1320 */ 1321 static bool tee_pager_release_one_phys(struct vm_paged_region *reg, 1322 vaddr_t page_va) 1323 { 1324 struct tee_pager_pmem *pmem = NULL; 1325 struct tblidx tblidx = { }; 1326 size_t fobj_pgidx = 0; 1327 1328 assert(page_va >= reg->base && page_va < (reg->base + reg->size)); 1329 fobj_pgidx = (page_va - reg->base) / SMALL_PAGE_SIZE + 1330 reg->fobj_pgoffs; 1331 1332 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1333 if (pmem->fobj != reg->fobj || pmem->fobj_pgidx != fobj_pgidx) 1334 continue; 1335 1336 /* 1337 * Locked pages may not be shared. We're asserting that the 1338 * number of regions using this pmem is one and only one as 1339 * we're about to unmap it. 1340 */ 1341 assert(num_regions_with_pmem(pmem) == 1); 1342 1343 tblidx = pmem_get_region_tblidx(pmem, reg); 1344 tblidx_set_entry(tblidx, 0, 0); 1345 pgt_dec_used_entries(tblidx.pgt); 1346 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1347 pmem_clear(pmem); 1348 tee_pager_npages++; 1349 set_npages(); 1350 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1351 incr_zi_released(); 1352 return true; 1353 } 1354 1355 return false; 1356 } 1357 1358 static void pager_deploy_page(struct tee_pager_pmem *pmem, 1359 struct vm_paged_region *reg, vaddr_t page_va, 1360 bool clean_user_cache, bool writable) 1361 { 1362 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1363 uint32_t attr = get_region_mattr(reg->flags); 1364 struct core_mmu_table_info *ti = NULL; 1365 uint8_t *va_alias = pmem->va_alias; 1366 paddr_t pa = get_pmem_pa(pmem); 1367 unsigned int idx_alias = 0; 1368 uint32_t attr_alias = 0; 1369 paddr_t pa_alias = 0; 1370 1371 /* Ensure we are allowed to write to aliased virtual page */ 1372 ti = find_table_info((vaddr_t)va_alias); 1373 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 1374 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 1375 if (!(attr_alias & TEE_MATTR_PW)) { 1376 attr_alias |= TEE_MATTR_PW; 1377 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 1378 tlbi_mva_allasid((vaddr_t)va_alias); 1379 } 1380 1381 asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE); 1382 if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) { 1383 EMSG("PH 0x%" PRIxVA " failed", page_va); 1384 panic(); 1385 } 1386 switch (reg->type) { 1387 case PAGED_REGION_TYPE_RO: 1388 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1389 incr_ro_hits(); 1390 /* Forbid write to aliases for read-only (maybe exec) pages */ 1391 attr_alias &= ~TEE_MATTR_PW; 1392 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 1393 tlbi_mva_allasid((vaddr_t)va_alias); 1394 break; 1395 case PAGED_REGION_TYPE_RW: 1396 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1397 if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW))) 1398 pmem->flags |= PMEM_FLAG_DIRTY; 1399 incr_rw_hits(); 1400 break; 1401 case PAGED_REGION_TYPE_LOCK: 1402 /* Move page to lock list */ 1403 if (tee_pager_npages <= 0) 1404 panic("Running out of pages"); 1405 tee_pager_npages--; 1406 set_npages(); 1407 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1408 break; 1409 default: 1410 panic(); 1411 } 1412 asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE); 1413 1414 if (!writable) 1415 attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1416 1417 /* 1418 * We've updated the page using the aliased mapping and 1419 * some cache maintenance is now needed if it's an 1420 * executable page. 1421 * 1422 * Since the d-cache is a Physically-indexed, 1423 * physically-tagged (PIPT) cache we can clean either the 1424 * aliased address or the real virtual address. In this 1425 * case we choose the real virtual address. 1426 * 1427 * The i-cache can also be PIPT, but may be something else 1428 * too like VIPT. The current code requires the caches to 1429 * implement the IVIPT extension, that is: 1430 * "instruction cache maintenance is required only after 1431 * writing new data to a physical address that holds an 1432 * instruction." 1433 * 1434 * To portably invalidate the icache the page has to 1435 * be mapped at the final virtual address but not 1436 * executable. 1437 */ 1438 if (reg->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1439 uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX | 1440 TEE_MATTR_PW | TEE_MATTR_UW; 1441 void *va = (void *)page_va; 1442 1443 /* Set a temporary read-only mapping */ 1444 tblidx_set_entry(tblidx, pa, attr & ~mask); 1445 tblidx_tlbi_entry(tblidx); 1446 1447 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1448 if (clean_user_cache) 1449 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1450 else 1451 icache_inv_range(va, SMALL_PAGE_SIZE); 1452 1453 /* Set the final mapping */ 1454 tblidx_set_entry(tblidx, pa, attr); 1455 tblidx_tlbi_entry(tblidx); 1456 } else { 1457 tblidx_set_entry(tblidx, pa, attr); 1458 /* 1459 * No need to flush TLB for this entry, it was 1460 * invalid. We should use a barrier though, to make 1461 * sure that the change is visible. 1462 */ 1463 dsb_ishst(); 1464 } 1465 pgt_inc_used_entries(tblidx.pgt); 1466 1467 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa); 1468 } 1469 1470 static void make_dirty_page(struct tee_pager_pmem *pmem, 1471 struct vm_paged_region *reg, struct tblidx tblidx, 1472 paddr_t pa) 1473 { 1474 assert(reg->flags & (TEE_MATTR_UW | TEE_MATTR_PW)); 1475 assert(!(pmem->flags & PMEM_FLAG_DIRTY)); 1476 1477 FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx)); 1478 pmem->flags |= PMEM_FLAG_DIRTY; 1479 tblidx_set_entry(tblidx, pa, get_region_mattr(reg->flags)); 1480 tblidx_tlbi_entry(tblidx); 1481 } 1482 1483 /* 1484 * This function takes a reference to a page (@fobj + fobj_pgidx) and makes 1485 * the corresponding IV available. 1486 * 1487 * In case the page needs to be saved the IV must be writable, consequently 1488 * is the page holding the IV made dirty. If the page instead only is to 1489 * be verified it's enough that the page holding the IV is readonly and 1490 * thus doesn't have to be made dirty too. 1491 * 1492 * This function depends on pager_spare_pmem pointing to a free pmem when 1493 * entered. In case the page holding the needed IV isn't mapped this spare 1494 * pmem is used to map the page. If this function has used pager_spare_pmem 1495 * and assigned it to NULL it must be reassigned with a new free pmem 1496 * before this function can be called again. 1497 */ 1498 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx, 1499 bool writable) 1500 { 1501 struct vm_paged_region *reg = pager_iv_region; 1502 struct tee_pager_pmem *pmem = NULL; 1503 struct tblidx tblidx = { }; 1504 vaddr_t page_va = 0; 1505 uint32_t attr = 0; 1506 paddr_t pa = 0; 1507 1508 page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK; 1509 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) { 1510 assert(!page_va); 1511 return; 1512 } 1513 1514 assert(reg && reg->type == PAGED_REGION_TYPE_RW); 1515 assert(pager_spare_pmem); 1516 assert(core_is_buffer_inside(page_va, 1, reg->base, reg->size)); 1517 1518 tblidx = region_va2tblidx(reg, page_va); 1519 /* 1520 * We don't care if tee_pager_unhide_page() succeeds or not, we're 1521 * still checking the attributes afterwards. 1522 */ 1523 tee_pager_unhide_page(reg, page_va); 1524 tblidx_get_entry(tblidx, &pa, &attr); 1525 if (!(attr & TEE_MATTR_VALID_BLOCK)) { 1526 /* 1527 * We're using the spare pmem to map the IV corresponding 1528 * to another page. 1529 */ 1530 pmem = pager_spare_pmem; 1531 pager_spare_pmem = NULL; 1532 pmem_assign_fobj_page(pmem, reg, page_va); 1533 1534 if (writable) 1535 pmem->flags |= PMEM_FLAG_DIRTY; 1536 1537 pager_deploy_page(pmem, reg, page_va, 1538 false /*!clean_user_cache*/, writable); 1539 } else if (writable && !(attr & TEE_MATTR_PW)) { 1540 pmem = pmem_find(reg, page_va); 1541 /* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */ 1542 make_dirty_page(pmem, reg, tblidx, pa); 1543 } 1544 } 1545 1546 static void pager_get_page(struct vm_paged_region *reg, struct abort_info *ai, 1547 bool clean_user_cache) 1548 { 1549 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1550 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1551 struct tee_pager_pmem *pmem = NULL; 1552 bool writable = false; 1553 uint32_t attr = 0; 1554 1555 /* 1556 * Get a pmem to load code and data into, also make sure 1557 * the corresponding IV page is available. 1558 */ 1559 while (true) { 1560 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1561 if (!pmem) { 1562 EMSG("No pmem entries"); 1563 abort_print(ai); 1564 panic(); 1565 } 1566 1567 if (pmem->fobj) { 1568 pmem_unmap(pmem, NULL); 1569 if (pmem_is_dirty(pmem)) { 1570 uint8_t *va = pmem->va_alias; 1571 1572 make_iv_available(pmem->fobj, pmem->fobj_pgidx, 1573 true /*writable*/); 1574 asan_tag_access(va, va + SMALL_PAGE_SIZE); 1575 if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx, 1576 pmem->va_alias)) 1577 panic("fobj_save_page"); 1578 asan_tag_no_access(va, va + SMALL_PAGE_SIZE); 1579 1580 pmem_clear(pmem); 1581 1582 /* 1583 * If the spare pmem was used by 1584 * make_iv_available() we need to replace 1585 * it with the just freed pmem. 1586 * 1587 * See make_iv_available() for details. 1588 */ 1589 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) && 1590 !pager_spare_pmem) { 1591 TAILQ_REMOVE(&tee_pager_pmem_head, 1592 pmem, link); 1593 pager_spare_pmem = pmem; 1594 pmem = NULL; 1595 } 1596 1597 /* 1598 * Check if the needed virtual page was 1599 * made available as a side effect of the 1600 * call to make_iv_available() above. If so 1601 * we're done. 1602 */ 1603 tblidx_get_entry(tblidx, NULL, &attr); 1604 if (attr & TEE_MATTR_VALID_BLOCK) 1605 return; 1606 1607 /* 1608 * The freed pmem was used to replace the 1609 * consumed pager_spare_pmem above. Restart 1610 * to find another pmem. 1611 */ 1612 if (!pmem) 1613 continue; 1614 } 1615 } 1616 1617 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1618 pmem_clear(pmem); 1619 1620 pmem_assign_fobj_page(pmem, reg, page_va); 1621 make_iv_available(pmem->fobj, pmem->fobj_pgidx, 1622 false /*!writable*/); 1623 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem) 1624 break; 1625 1626 /* 1627 * The spare pmem was used by make_iv_available(). We need 1628 * to replace it with the just freed pmem. And get another 1629 * pmem. 1630 * 1631 * See make_iv_available() for details. 1632 */ 1633 pmem_clear(pmem); 1634 pager_spare_pmem = pmem; 1635 } 1636 1637 /* 1638 * PAGED_REGION_TYPE_LOCK are always writable while PAGED_REGION_TYPE_RO 1639 * are never writable. 1640 * 1641 * Pages from PAGED_REGION_TYPE_RW starts read-only to be 1642 * able to tell when they are updated and should be tagged 1643 * as dirty. 1644 */ 1645 if (reg->type == PAGED_REGION_TYPE_LOCK || 1646 (reg->type == PAGED_REGION_TYPE_RW && abort_is_write_fault(ai))) 1647 writable = true; 1648 else 1649 writable = false; 1650 1651 pager_deploy_page(pmem, reg, page_va, clean_user_cache, writable); 1652 } 1653 1654 static bool pager_update_permissions(struct vm_paged_region *reg, 1655 struct abort_info *ai, bool *handled) 1656 { 1657 struct tblidx tblidx = region_va2tblidx(reg, ai->va); 1658 struct tee_pager_pmem *pmem = NULL; 1659 uint32_t attr = 0; 1660 paddr_t pa = 0; 1661 1662 *handled = false; 1663 1664 tblidx_get_entry(tblidx, &pa, &attr); 1665 1666 /* Not mapped */ 1667 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1668 return false; 1669 1670 /* Not readable, should not happen */ 1671 if (abort_is_user_exception(ai)) { 1672 if (!(attr & TEE_MATTR_UR)) 1673 return true; 1674 } else { 1675 if (!(attr & TEE_MATTR_PR)) { 1676 abort_print_error(ai); 1677 panic(); 1678 } 1679 } 1680 1681 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1682 case CORE_MMU_FAULT_TRANSLATION: 1683 case CORE_MMU_FAULT_READ_PERMISSION: 1684 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1685 /* Check attempting to execute from an NOX page */ 1686 if (abort_is_user_exception(ai)) { 1687 if (!(attr & TEE_MATTR_UX)) 1688 return true; 1689 } else { 1690 if (!(attr & TEE_MATTR_PX)) { 1691 abort_print_error(ai); 1692 panic(); 1693 } 1694 } 1695 } 1696 /* Since the page is mapped now it's OK */ 1697 break; 1698 case CORE_MMU_FAULT_WRITE_PERMISSION: 1699 /* Check attempting to write to an RO page */ 1700 pmem = pmem_find(reg, ai->va); 1701 if (!pmem) 1702 panic(); 1703 if (abort_is_user_exception(ai)) { 1704 if (!(reg->flags & TEE_MATTR_UW)) 1705 return true; 1706 if (!(attr & TEE_MATTR_UW)) 1707 make_dirty_page(pmem, reg, tblidx, pa); 1708 } else { 1709 if (!(reg->flags & TEE_MATTR_PW)) { 1710 abort_print_error(ai); 1711 panic(); 1712 } 1713 if (!(attr & TEE_MATTR_PW)) 1714 make_dirty_page(pmem, reg, tblidx, pa); 1715 } 1716 /* Since permissions has been updated now it's OK */ 1717 break; 1718 default: 1719 /* Some fault we can't deal with */ 1720 if (abort_is_user_exception(ai)) 1721 return true; 1722 abort_print_error(ai); 1723 panic(); 1724 } 1725 *handled = true; 1726 return true; 1727 } 1728 1729 #ifdef CFG_TEE_CORE_DEBUG 1730 static void stat_handle_fault(void) 1731 { 1732 static size_t num_faults; 1733 static size_t min_npages = SIZE_MAX; 1734 static size_t total_min_npages = SIZE_MAX; 1735 1736 num_faults++; 1737 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1738 DMSG("nfaults %zu npages %zu (min %zu)", 1739 num_faults, tee_pager_npages, min_npages); 1740 min_npages = tee_pager_npages; /* reset */ 1741 } 1742 if (tee_pager_npages < min_npages) 1743 min_npages = tee_pager_npages; 1744 if (tee_pager_npages < total_min_npages) 1745 total_min_npages = tee_pager_npages; 1746 } 1747 #else 1748 static void stat_handle_fault(void) 1749 { 1750 } 1751 #endif 1752 1753 bool tee_pager_handle_fault(struct abort_info *ai) 1754 { 1755 struct vm_paged_region *reg; 1756 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1757 uint32_t exceptions; 1758 bool ret; 1759 bool clean_user_cache = false; 1760 1761 #ifdef TEE_PAGER_DEBUG_PRINT 1762 if (!abort_is_user_exception(ai)) 1763 abort_print(ai); 1764 #endif 1765 1766 /* 1767 * We're updating pages that can affect several active CPUs at a 1768 * time below. We end up here because a thread tries to access some 1769 * memory that isn't available. We have to be careful when making 1770 * that memory available as other threads may succeed in accessing 1771 * that address the moment after we've made it available. 1772 * 1773 * That means that we can't just map the memory and populate the 1774 * page, instead we use the aliased mapping to populate the page 1775 * and once everything is ready we map it. 1776 */ 1777 exceptions = pager_lock(ai); 1778 1779 stat_handle_fault(); 1780 1781 /* check if the access is valid */ 1782 if (abort_is_user_exception(ai)) { 1783 reg = find_uta_region(ai->va); 1784 clean_user_cache = true; 1785 } else { 1786 reg = find_region(&core_vm_regions, ai->va); 1787 if (!reg) { 1788 reg = find_uta_region(ai->va); 1789 clean_user_cache = true; 1790 } 1791 } 1792 if (!reg || !reg->pgt_array[0]) { 1793 ret = false; 1794 goto out; 1795 } 1796 1797 if (tee_pager_unhide_page(reg, page_va)) 1798 goto out_success; 1799 1800 /* 1801 * The page wasn't hidden, but some other core may have 1802 * updated the table entry before we got here or we need 1803 * to make a read-only page read-write (dirty). 1804 */ 1805 if (pager_update_permissions(reg, ai, &ret)) { 1806 /* 1807 * Nothing more to do with the abort. The problem 1808 * could already have been dealt with from another 1809 * core or if ret is false the TA will be paniced. 1810 */ 1811 goto out; 1812 } 1813 1814 pager_get_page(reg, ai, clean_user_cache); 1815 1816 out_success: 1817 tee_pager_hide_pages(); 1818 ret = true; 1819 out: 1820 pager_unlock(exceptions); 1821 return ret; 1822 } 1823 1824 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1825 { 1826 size_t n = 0; 1827 1828 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1829 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1830 1831 /* setup memory */ 1832 for (n = 0; n < npages; n++) { 1833 struct core_mmu_table_info *ti = NULL; 1834 struct tee_pager_pmem *pmem = NULL; 1835 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1836 struct tblidx tblidx = { }; 1837 unsigned int pgidx = 0; 1838 paddr_t pa = 0; 1839 uint32_t attr = 0; 1840 1841 ti = find_table_info(va); 1842 pgidx = core_mmu_va2idx(ti, va); 1843 /* 1844 * Note that we can only support adding pages in the 1845 * valid range of this table info, currently not a problem. 1846 */ 1847 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1848 1849 /* Ignore unmapped pages/blocks */ 1850 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1851 continue; 1852 1853 pmem = calloc(1, sizeof(struct tee_pager_pmem)); 1854 if (!pmem) 1855 panic("out of mem"); 1856 pmem_clear(pmem); 1857 1858 pmem->va_alias = pager_add_alias_page(pa); 1859 1860 if (unmap) { 1861 core_mmu_set_entry(ti, pgidx, 0, 0); 1862 pgt_dec_used_entries(find_core_pgt(va)); 1863 } else { 1864 struct vm_paged_region *reg = NULL; 1865 1866 /* 1867 * The page is still mapped, let's assign the region 1868 * and update the protection bits accordingly. 1869 */ 1870 reg = find_region(&core_vm_regions, va); 1871 assert(reg); 1872 pmem_assign_fobj_page(pmem, reg, va); 1873 tblidx = pmem_get_region_tblidx(pmem, reg); 1874 assert(tblidx.pgt == find_core_pgt(va)); 1875 assert(pa == get_pmem_pa(pmem)); 1876 tblidx_set_entry(tblidx, pa, 1877 get_region_mattr(reg->flags)); 1878 } 1879 1880 if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) && 1881 !pager_spare_pmem) { 1882 pager_spare_pmem = pmem; 1883 } else { 1884 tee_pager_npages++; 1885 incr_npages_all(); 1886 set_npages(); 1887 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1888 } 1889 } 1890 1891 /* 1892 * As this is done at inits, invalidate all TLBs once instead of 1893 * targeting only the modified entries. 1894 */ 1895 tlbi_all(); 1896 } 1897 1898 #ifdef CFG_PAGED_USER_TA 1899 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1900 { 1901 struct pgt *p = pgt; 1902 1903 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1904 p = SLIST_NEXT(p, link); 1905 return p; 1906 } 1907 1908 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx) 1909 { 1910 struct vm_paged_region *reg = NULL; 1911 struct pgt *pgt = NULL; 1912 size_t n = 0; 1913 1914 if (!uctx->regions) 1915 return; 1916 1917 pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1918 TAILQ_FOREACH(reg, uctx->regions, link) { 1919 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) { 1920 vaddr_t va = reg->base + CORE_MMU_PGDIR_SIZE * n; 1921 struct pgt *p __maybe_unused = find_pgt(pgt, va); 1922 1923 if (!reg->pgt_array[n]) 1924 reg->pgt_array[n] = p; 1925 else 1926 assert(reg->pgt_array[n] == p); 1927 } 1928 } 1929 } 1930 1931 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1932 { 1933 struct tee_pager_pmem *pmem = NULL; 1934 struct vm_paged_region *reg = NULL; 1935 struct vm_paged_region_head *regions = NULL; 1936 uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1937 size_t n = 0; 1938 1939 if (!pgt->num_used_entries) 1940 goto out; 1941 1942 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1943 if (pmem->fobj) 1944 pmem_unmap(pmem, pgt); 1945 } 1946 assert(!pgt->num_used_entries); 1947 1948 out: 1949 regions = to_user_mode_ctx(pgt->ctx)->regions; 1950 if (regions) { 1951 TAILQ_FOREACH(reg, regions, link) { 1952 for (n = 0; n < get_pgt_count(reg->base, reg->size); 1953 n++) { 1954 if (reg->pgt_array[n] == pgt) { 1955 reg->pgt_array[n] = NULL; 1956 break; 1957 } 1958 } 1959 } 1960 } 1961 1962 pager_unlock(exceptions); 1963 } 1964 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1965 #endif /*CFG_PAGED_USER_TA*/ 1966 1967 void tee_pager_release_phys(void *addr, size_t size) 1968 { 1969 bool unmaped = false; 1970 vaddr_t va = (vaddr_t)addr; 1971 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1972 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1973 struct vm_paged_region *reg; 1974 uint32_t exceptions; 1975 1976 if (end <= begin) 1977 return; 1978 1979 exceptions = pager_lock_check_stack(128); 1980 1981 for (va = begin; va < end; va += SMALL_PAGE_SIZE) { 1982 reg = find_region(&core_vm_regions, va); 1983 if (!reg) 1984 panic(); 1985 unmaped |= tee_pager_release_one_phys(reg, va); 1986 } 1987 1988 if (unmaped) 1989 tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE); 1990 1991 pager_unlock(exceptions); 1992 } 1993 DECLARE_KEEP_PAGER(tee_pager_release_phys); 1994 1995 void *tee_pager_alloc(size_t size) 1996 { 1997 tee_mm_entry_t *mm = NULL; 1998 uint8_t *smem = NULL; 1999 size_t num_pages = 0; 2000 struct fobj *fobj = NULL; 2001 2002 if (!size) 2003 return NULL; 2004 2005 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 2006 if (!mm) 2007 return NULL; 2008 2009 smem = (uint8_t *)tee_mm_get_smem(mm); 2010 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 2011 fobj = fobj_locked_paged_alloc(num_pages); 2012 if (!fobj) { 2013 tee_mm_free(mm); 2014 return NULL; 2015 } 2016 2017 tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_LOCK, fobj); 2018 fobj_put(fobj); 2019 2020 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); 2021 2022 return smem; 2023 } 2024 2025 vaddr_t tee_pager_init_iv_region(struct fobj *fobj) 2026 { 2027 tee_mm_entry_t *mm = NULL; 2028 uint8_t *smem = NULL; 2029 2030 assert(!pager_iv_region); 2031 2032 mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE); 2033 if (!mm) 2034 panic(); 2035 2036 smem = (uint8_t *)tee_mm_get_smem(mm); 2037 tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_RW, fobj); 2038 fobj_put(fobj); 2039 2040 asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE); 2041 2042 pager_iv_region = find_region(&core_vm_regions, (vaddr_t)smem); 2043 assert(pager_iv_region && pager_iv_region->fobj == fobj); 2044 2045 return (vaddr_t)smem; 2046 } 2047