1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <io.h> 10 #include <keep.h> 11 #include <kernel/abort.h> 12 #include <kernel/asan.h> 13 #include <kernel/cache_helpers.h> 14 #include <kernel/linker.h> 15 #include <kernel/panic.h> 16 #include <kernel/spinlock.h> 17 #include <kernel/tee_misc.h> 18 #include <kernel/tee_ta_manager.h> 19 #include <kernel/thread.h> 20 #include <kernel/tlb_helpers.h> 21 #include <kernel/user_mode_ctx.h> 22 #include <mm/core_memprot.h> 23 #include <mm/fobj.h> 24 #include <mm/tee_mm.h> 25 #include <mm/tee_pager.h> 26 #include <stdlib.h> 27 #include <sys/queue.h> 28 #include <tee_api_defines.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <utee_defines.h> 32 #include <util.h> 33 34 35 static struct vm_paged_region_head core_vm_regions = 36 TAILQ_HEAD_INITIALIZER(core_vm_regions); 37 38 #define INVALID_PGIDX UINT_MAX 39 #define PMEM_FLAG_DIRTY BIT(0) 40 #define PMEM_FLAG_HIDDEN BIT(1) 41 42 /* 43 * struct tee_pager_pmem - Represents a physical page used for paging. 44 * 45 * @flags flags defined by PMEM_FLAG_* above 46 * @fobj_pgidx index of the page in the @fobj 47 * @fobj File object of which a page is made visible. 48 * @va_alias Virtual address where the physical page always is aliased. 49 * Used during remapping of the page when the content need to 50 * be updated before it's available at the new location. 51 */ 52 struct tee_pager_pmem { 53 unsigned int flags; 54 unsigned int fobj_pgidx; 55 struct fobj *fobj; 56 void *va_alias; 57 TAILQ_ENTRY(tee_pager_pmem) link; 58 }; 59 60 struct tblidx { 61 struct pgt *pgt; 62 unsigned int idx; 63 }; 64 65 /* The list of physical pages. The first page in the list is the oldest */ 66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 67 68 static struct tee_pager_pmem_head tee_pager_pmem_head = 69 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 70 71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 72 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 73 74 /* number of pages hidden */ 75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 76 77 /* Number of registered physical pages, used hiding pages. */ 78 static size_t tee_pager_npages; 79 80 /* This area covers the IVs for all fobjs with paged IVs */ 81 static struct vm_paged_region *pager_iv_region; 82 /* Used by make_iv_available(), see make_iv_available() for details. */ 83 static struct tee_pager_pmem *pager_spare_pmem; 84 85 #ifdef CFG_WITH_STATS 86 static struct tee_pager_stats pager_stats; 87 88 static inline void incr_ro_hits(void) 89 { 90 pager_stats.ro_hits++; 91 } 92 93 static inline void incr_rw_hits(void) 94 { 95 pager_stats.rw_hits++; 96 } 97 98 static inline void incr_hidden_hits(void) 99 { 100 pager_stats.hidden_hits++; 101 } 102 103 static inline void incr_zi_released(void) 104 { 105 pager_stats.zi_released++; 106 } 107 108 static inline void incr_npages_all(void) 109 { 110 pager_stats.npages_all++; 111 } 112 113 static inline void set_npages(void) 114 { 115 pager_stats.npages = tee_pager_npages; 116 } 117 118 void tee_pager_get_stats(struct tee_pager_stats *stats) 119 { 120 *stats = pager_stats; 121 122 pager_stats.hidden_hits = 0; 123 pager_stats.ro_hits = 0; 124 pager_stats.rw_hits = 0; 125 pager_stats.zi_released = 0; 126 } 127 128 #else /* CFG_WITH_STATS */ 129 static inline void incr_ro_hits(void) { } 130 static inline void incr_rw_hits(void) { } 131 static inline void incr_hidden_hits(void) { } 132 static inline void incr_zi_released(void) { } 133 static inline void incr_npages_all(void) { } 134 static inline void set_npages(void) { } 135 136 void tee_pager_get_stats(struct tee_pager_stats *stats) 137 { 138 memset(stats, 0, sizeof(struct tee_pager_stats)); 139 } 140 #endif /* CFG_WITH_STATS */ 141 142 #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE) 143 #define TBL_LEVEL CORE_MMU_PGDIR_LEVEL 144 #define TBL_SHIFT SMALL_PAGE_SHIFT 145 146 #define EFFECTIVE_VA_SIZE \ 147 (ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \ 148 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE)) 149 150 static struct pager_table { 151 struct pgt pgt; 152 struct core_mmu_table_info tbl_info; 153 } *pager_tables; 154 static unsigned int num_pager_tables; 155 156 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 157 158 /* Defines the range of the alias area */ 159 static tee_mm_entry_t *pager_alias_area; 160 /* 161 * Physical pages are added in a stack like fashion to the alias area, 162 * @pager_alias_next_free gives the address of next free entry if 163 * @pager_alias_next_free is != 0 164 */ 165 static uintptr_t pager_alias_next_free; 166 167 #ifdef CFG_TEE_CORE_DEBUG 168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai) 169 170 static uint32_t pager_lock_dldetect(const char *func, const int line, 171 struct abort_info *ai) 172 { 173 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 174 unsigned int retries = 0; 175 unsigned int reminder = 0; 176 177 while (!cpu_spin_trylock(&pager_spinlock)) { 178 retries++; 179 if (!retries) { 180 /* wrapped, time to report */ 181 trace_printf(func, line, TRACE_ERROR, true, 182 "possible spinlock deadlock reminder %u", 183 reminder); 184 if (reminder < UINT_MAX) 185 reminder++; 186 if (ai) 187 abort_print(ai); 188 } 189 } 190 191 return exceptions; 192 } 193 #else 194 static uint32_t pager_lock(struct abort_info __unused *ai) 195 { 196 return cpu_spin_lock_xsave(&pager_spinlock); 197 } 198 #endif 199 200 static uint32_t pager_lock_check_stack(size_t stack_size) 201 { 202 if (stack_size) { 203 int8_t buf[stack_size]; 204 size_t n; 205 206 /* 207 * Make sure to touch all pages of the stack that we expect 208 * to use with this lock held. We need to take eventual 209 * page faults before the lock is taken or we'll deadlock 210 * the pager. The pages that are populated in this way will 211 * eventually be released at certain save transitions of 212 * the thread. 213 */ 214 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) 215 io_write8((vaddr_t)buf + n, 1); 216 io_write8((vaddr_t)buf + stack_size - 1, 1); 217 } 218 219 return pager_lock(NULL); 220 } 221 222 static void pager_unlock(uint32_t exceptions) 223 { 224 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 225 } 226 227 void *tee_pager_phys_to_virt(paddr_t pa) 228 { 229 struct core_mmu_table_info ti; 230 unsigned idx; 231 uint32_t a; 232 paddr_t p; 233 vaddr_t v; 234 size_t n; 235 236 /* 237 * Most addresses are mapped lineary, try that first if possible. 238 */ 239 if (!tee_pager_get_table_info(pa, &ti)) 240 return NULL; /* impossible pa */ 241 idx = core_mmu_va2idx(&ti, pa); 242 core_mmu_get_entry(&ti, idx, &p, &a); 243 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 244 return (void *)core_mmu_idx2va(&ti, idx); 245 246 n = 0; 247 idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START); 248 while (true) { 249 while (idx < TBL_NUM_ENTRIES) { 250 v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx); 251 if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE)) 252 return NULL; 253 254 core_mmu_get_entry(&pager_tables[n].tbl_info, 255 idx, &p, &a); 256 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 257 return (void *)v; 258 idx++; 259 } 260 261 n++; 262 if (n >= num_pager_tables) 263 return NULL; 264 idx = 0; 265 } 266 267 return NULL; 268 } 269 270 static bool pmem_is_hidden(struct tee_pager_pmem *pmem) 271 { 272 return pmem->flags & PMEM_FLAG_HIDDEN; 273 } 274 275 static bool pmem_is_dirty(struct tee_pager_pmem *pmem) 276 { 277 return pmem->flags & PMEM_FLAG_DIRTY; 278 } 279 280 static bool pmem_is_covered_by_region(struct tee_pager_pmem *pmem, 281 struct vm_paged_region *reg) 282 { 283 if (pmem->fobj != reg->fobj) 284 return false; 285 if (pmem->fobj_pgidx < reg->fobj_pgoffs) 286 return false; 287 if ((pmem->fobj_pgidx - reg->fobj_pgoffs) >= 288 (reg->size >> SMALL_PAGE_SHIFT)) 289 return false; 290 291 return true; 292 } 293 294 static size_t get_pgt_count(vaddr_t base, size_t size) 295 { 296 assert(size); 297 298 return (base + size - 1) / CORE_MMU_PGDIR_SIZE + 1 - 299 base / CORE_MMU_PGDIR_SIZE; 300 } 301 302 static bool region_have_pgt(struct vm_paged_region *reg, struct pgt *pgt) 303 { 304 size_t n = 0; 305 306 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) 307 if (reg->pgt_array[n] == pgt) 308 return true; 309 310 return false; 311 } 312 313 static struct tblidx pmem_get_region_tblidx(struct tee_pager_pmem *pmem, 314 struct vm_paged_region *reg) 315 { 316 size_t tbloffs = (reg->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT; 317 size_t idx = pmem->fobj_pgidx - reg->fobj_pgoffs + tbloffs; 318 319 assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX); 320 assert(idx / TBL_NUM_ENTRIES < get_pgt_count(reg->base, reg->size)); 321 322 return (struct tblidx){ 323 .idx = idx % TBL_NUM_ENTRIES, 324 .pgt = reg->pgt_array[idx / TBL_NUM_ENTRIES], 325 }; 326 } 327 328 static struct pager_table *find_pager_table_may_fail(vaddr_t va) 329 { 330 size_t n; 331 const vaddr_t mask = CORE_MMU_PGDIR_MASK; 332 333 if (!pager_tables) 334 return NULL; 335 336 n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >> 337 CORE_MMU_PGDIR_SHIFT; 338 if (n >= num_pager_tables) 339 return NULL; 340 341 assert(va >= pager_tables[n].tbl_info.va_base && 342 va <= (pager_tables[n].tbl_info.va_base | mask)); 343 344 return pager_tables + n; 345 } 346 347 static struct pager_table *find_pager_table(vaddr_t va) 348 { 349 struct pager_table *pt = find_pager_table_may_fail(va); 350 351 assert(pt); 352 return pt; 353 } 354 355 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti) 356 { 357 struct pager_table *pt = find_pager_table_may_fail(va); 358 359 if (!pt) 360 return false; 361 362 *ti = pt->tbl_info; 363 return true; 364 } 365 366 static struct core_mmu_table_info *find_table_info(vaddr_t va) 367 { 368 return &find_pager_table(va)->tbl_info; 369 } 370 371 static struct pgt *find_core_pgt(vaddr_t va) 372 { 373 return &find_pager_table(va)->pgt; 374 } 375 376 void tee_pager_set_alias_area(tee_mm_entry_t *mm) 377 { 378 struct pager_table *pt; 379 unsigned idx; 380 vaddr_t smem = tee_mm_get_smem(mm); 381 size_t nbytes = tee_mm_get_bytes(mm); 382 vaddr_t v; 383 uint32_t a = 0; 384 385 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 386 387 assert(!pager_alias_area); 388 pager_alias_area = mm; 389 pager_alias_next_free = smem; 390 391 /* Clear all mapping in the alias area */ 392 pt = find_pager_table(smem); 393 idx = core_mmu_va2idx(&pt->tbl_info, smem); 394 while (pt <= (pager_tables + num_pager_tables - 1)) { 395 while (idx < TBL_NUM_ENTRIES) { 396 v = core_mmu_idx2va(&pt->tbl_info, idx); 397 if (v >= (smem + nbytes)) 398 goto out; 399 400 core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a); 401 core_mmu_set_entry(&pt->tbl_info, idx, 0, 0); 402 if (a & TEE_MATTR_VALID_BLOCK) 403 pgt_dec_used_entries(&pt->pgt); 404 idx++; 405 } 406 407 pt++; 408 idx = 0; 409 } 410 411 out: 412 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); 413 } 414 415 static size_t tbl_usage_count(struct core_mmu_table_info *ti) 416 { 417 size_t n; 418 uint32_t a = 0; 419 size_t usage = 0; 420 421 for (n = 0; n < ti->num_entries; n++) { 422 core_mmu_get_entry(ti, n, NULL, &a); 423 if (a & TEE_MATTR_VALID_BLOCK) 424 usage++; 425 } 426 return usage; 427 } 428 429 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr) 430 { 431 assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES); 432 core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx, 433 pa, attr); 434 } 435 436 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr) 437 { 438 assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES); 439 core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx, 440 pa, attr); 441 } 442 443 static struct tblidx region_va2tblidx(struct vm_paged_region *reg, vaddr_t va) 444 { 445 paddr_t mask = CORE_MMU_PGDIR_MASK; 446 size_t n = 0; 447 448 assert(va >= reg->base && va < (reg->base + reg->size)); 449 n = (va - (reg->base & ~mask)) / CORE_MMU_PGDIR_SIZE; 450 451 return (struct tblidx){ 452 .idx = (va & mask) / SMALL_PAGE_SIZE, 453 .pgt = reg->pgt_array[n], 454 }; 455 } 456 457 static vaddr_t tblidx2va(struct tblidx tblidx) 458 { 459 return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT); 460 } 461 462 static void tblidx_tlbi_entry(struct tblidx tblidx) 463 { 464 vaddr_t va = tblidx2va(tblidx); 465 466 #if defined(CFG_PAGED_USER_TA) 467 if (tblidx.pgt->ctx) { 468 uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid; 469 470 tlbi_mva_asid(va, asid); 471 return; 472 } 473 #endif 474 tlbi_mva_allasid(va); 475 } 476 477 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem, 478 struct vm_paged_region *reg, vaddr_t va) 479 { 480 struct tee_pager_pmem *p = NULL; 481 unsigned int fobj_pgidx = 0; 482 483 assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX); 484 485 assert(va >= reg->base && va < (reg->base + reg->size)); 486 fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs; 487 488 TAILQ_FOREACH(p, &tee_pager_pmem_head, link) 489 assert(p->fobj != reg->fobj || p->fobj_pgidx != fobj_pgidx); 490 491 pmem->fobj = reg->fobj; 492 pmem->fobj_pgidx = fobj_pgidx; 493 } 494 495 static void pmem_clear(struct tee_pager_pmem *pmem) 496 { 497 pmem->fobj = NULL; 498 pmem->fobj_pgidx = INVALID_PGIDX; 499 pmem->flags = 0; 500 } 501 502 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt) 503 { 504 struct vm_paged_region *reg = NULL; 505 struct tblidx tblidx = { }; 506 uint32_t a = 0; 507 508 TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) { 509 /* 510 * If only_this_pgt points to a pgt then the pgt of this 511 * region has to match or we'll skip over it. 512 */ 513 if (only_this_pgt && !region_have_pgt(reg, only_this_pgt)) 514 continue; 515 if (!pmem_is_covered_by_region(pmem, reg)) 516 continue; 517 tblidx = pmem_get_region_tblidx(pmem, reg); 518 if (!tblidx.pgt) 519 continue; 520 tblidx_get_entry(tblidx, NULL, &a); 521 if (a & TEE_MATTR_VALID_BLOCK) { 522 tblidx_set_entry(tblidx, 0, 0); 523 pgt_dec_used_entries(tblidx.pgt); 524 tblidx_tlbi_entry(tblidx); 525 } 526 } 527 } 528 529 void tee_pager_early_init(void) 530 { 531 size_t n = 0; 532 533 num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE; 534 pager_tables = calloc(num_pager_tables, sizeof(*pager_tables)); 535 if (!pager_tables) 536 panic("Cannot allocate pager_tables"); 537 538 /* 539 * Note that this depends on add_pager_vaspace() adding vaspace 540 * after end of memory. 541 */ 542 for (n = 0; n < num_pager_tables; n++) { 543 if (!core_mmu_find_table(NULL, VCORE_START_VA + 544 n * CORE_MMU_PGDIR_SIZE, UINT_MAX, 545 &pager_tables[n].tbl_info)) 546 panic("can't find mmu tables"); 547 548 if (pager_tables[n].tbl_info.shift != TBL_SHIFT) 549 panic("Unsupported page size in translation table"); 550 assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES); 551 assert(pager_tables[n].tbl_info.level == TBL_LEVEL); 552 553 pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table; 554 pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base; 555 pgt_set_used_entries(&pager_tables[n].pgt, 556 tbl_usage_count(&pager_tables[n].tbl_info)); 557 } 558 } 559 560 static void *pager_add_alias_page(paddr_t pa) 561 { 562 unsigned idx; 563 struct core_mmu_table_info *ti; 564 /* Alias pages mapped without write permission: runtime will care */ 565 uint32_t attr = TEE_MATTR_VALID_BLOCK | 566 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 567 TEE_MATTR_SECURE | TEE_MATTR_PR; 568 569 DMSG("0x%" PRIxPA, pa); 570 571 ti = find_table_info(pager_alias_next_free); 572 idx = core_mmu_va2idx(ti, pager_alias_next_free); 573 core_mmu_set_entry(ti, idx, pa, attr); 574 pgt_inc_used_entries(find_core_pgt(pager_alias_next_free)); 575 pager_alias_next_free += SMALL_PAGE_SIZE; 576 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 577 tee_mm_get_bytes(pager_alias_area))) 578 pager_alias_next_free = 0; 579 return (void *)core_mmu_idx2va(ti, idx); 580 } 581 582 static void region_insert(struct vm_paged_region_head *regions, 583 struct vm_paged_region *reg, 584 struct vm_paged_region *r_prev) 585 { 586 uint32_t exceptions = pager_lock_check_stack(8); 587 588 if (r_prev) 589 TAILQ_INSERT_AFTER(regions, r_prev, reg, link); 590 else 591 TAILQ_INSERT_HEAD(regions, reg, link); 592 TAILQ_INSERT_TAIL(®->fobj->regions, reg, fobj_link); 593 594 pager_unlock(exceptions); 595 } 596 DECLARE_KEEP_PAGER(region_insert); 597 598 static struct vm_paged_region *alloc_region(vaddr_t base, size_t size) 599 { 600 struct vm_paged_region *reg = NULL; 601 602 if ((base & SMALL_PAGE_MASK) || !size) { 603 EMSG("invalid pager region [%" PRIxVA " +0x%zx]", base, size); 604 panic(); 605 } 606 607 reg = calloc(1, sizeof(*reg)); 608 if (!reg) 609 return NULL; 610 reg->pgt_array = calloc(get_pgt_count(base, size), 611 sizeof(struct pgt *)); 612 if (!reg->pgt_array) { 613 free(reg); 614 return NULL; 615 } 616 617 reg->base = base; 618 reg->size = size; 619 return reg; 620 } 621 622 void tee_pager_add_core_region(vaddr_t base, enum vm_paged_region_type type, 623 struct fobj *fobj) 624 { 625 struct vm_paged_region *reg = NULL; 626 size_t n = 0; 627 628 assert(fobj); 629 630 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", 631 base, base + fobj->num_pages * SMALL_PAGE_SIZE, type); 632 633 reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE); 634 if (!reg) 635 panic("alloc_region"); 636 637 reg->fobj = fobj_get(fobj); 638 reg->fobj_pgoffs = 0; 639 reg->type = type; 640 641 switch (type) { 642 case PAGED_REGION_TYPE_RO: 643 reg->flags = TEE_MATTR_PRX; 644 break; 645 case PAGED_REGION_TYPE_RW: 646 case PAGED_REGION_TYPE_LOCK: 647 reg->flags = TEE_MATTR_PRW; 648 break; 649 default: 650 panic(); 651 } 652 653 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) 654 reg->pgt_array[n] = find_core_pgt(base + 655 n * CORE_MMU_PGDIR_SIZE); 656 region_insert(&core_vm_regions, reg, NULL); 657 } 658 659 static struct vm_paged_region *find_region(struct vm_paged_region_head *regions, 660 vaddr_t va) 661 { 662 struct vm_paged_region *reg; 663 664 if (!regions) 665 return NULL; 666 667 TAILQ_FOREACH(reg, regions, link) { 668 if (core_is_buffer_inside(va, 1, reg->base, reg->size)) 669 return reg; 670 } 671 return NULL; 672 } 673 674 #ifdef CFG_PAGED_USER_TA 675 static struct vm_paged_region *find_uta_region(vaddr_t va) 676 { 677 struct ts_ctx *ctx = thread_get_tsd()->ctx; 678 679 if (!is_user_mode_ctx(ctx)) 680 return NULL; 681 return find_region(to_user_mode_ctx(ctx)->regions, va); 682 } 683 #else 684 static struct vm_paged_region *find_uta_region(vaddr_t va __unused) 685 { 686 return NULL; 687 } 688 #endif /*CFG_PAGED_USER_TA*/ 689 690 691 static uint32_t get_region_mattr(uint32_t reg_flags) 692 { 693 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 694 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 695 (reg_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 696 697 return attr; 698 } 699 700 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 701 { 702 struct core_mmu_table_info *ti; 703 paddr_t pa; 704 unsigned idx; 705 706 ti = find_table_info((vaddr_t)pmem->va_alias); 707 idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias); 708 core_mmu_get_entry(ti, idx, &pa, NULL); 709 return pa; 710 } 711 712 #ifdef CFG_PAGED_USER_TA 713 static void unlink_region(struct vm_paged_region_head *regions, 714 struct vm_paged_region *reg) 715 { 716 uint32_t exceptions = pager_lock_check_stack(64); 717 718 TAILQ_REMOVE(regions, reg, link); 719 TAILQ_REMOVE(®->fobj->regions, reg, fobj_link); 720 721 pager_unlock(exceptions); 722 } 723 DECLARE_KEEP_PAGER(unlink_region); 724 725 static void free_region(struct vm_paged_region *reg) 726 { 727 fobj_put(reg->fobj); 728 free(reg->pgt_array); 729 free(reg); 730 } 731 732 static TEE_Result pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base, 733 struct fobj *fobj, uint32_t prot) 734 { 735 struct vm_paged_region *r_prev = NULL; 736 struct vm_paged_region *reg = NULL; 737 vaddr_t b = base; 738 size_t fobj_pgoffs = 0; 739 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; 740 741 if (!uctx->regions) { 742 uctx->regions = malloc(sizeof(*uctx->regions)); 743 if (!uctx->regions) 744 return TEE_ERROR_OUT_OF_MEMORY; 745 TAILQ_INIT(uctx->regions); 746 } 747 748 reg = TAILQ_FIRST(uctx->regions); 749 while (reg) { 750 if (core_is_buffer_intersect(b, s, reg->base, reg->size)) 751 return TEE_ERROR_BAD_PARAMETERS; 752 if (b < reg->base) 753 break; 754 r_prev = reg; 755 reg = TAILQ_NEXT(reg, link); 756 } 757 758 reg = alloc_region(b, s); 759 if (!reg) 760 return TEE_ERROR_OUT_OF_MEMORY; 761 762 /* Table info will be set when the context is activated. */ 763 reg->fobj = fobj_get(fobj); 764 reg->fobj_pgoffs = fobj_pgoffs; 765 reg->type = PAGED_REGION_TYPE_RW; 766 reg->flags = prot; 767 768 region_insert(uctx->regions, reg, r_prev); 769 770 return TEE_SUCCESS; 771 } 772 773 static void map_pgts(struct vm_paged_region *reg) 774 { 775 struct core_mmu_table_info dir_info = { NULL }; 776 size_t n = 0; 777 778 core_mmu_get_user_pgdir(&dir_info); 779 780 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) { 781 struct pgt *pgt = reg->pgt_array[n]; 782 uint32_t attr = 0; 783 paddr_t pa = 0; 784 size_t idx = 0; 785 786 idx = core_mmu_va2idx(&dir_info, pgt->vabase); 787 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 788 789 /* 790 * Check if the page table already is used, if it is, it's 791 * already registered. 792 */ 793 if (pgt->num_used_entries) { 794 assert(attr & TEE_MATTR_TABLE); 795 assert(pa == virt_to_phys(pgt->tbl)); 796 continue; 797 } 798 799 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 800 pa = virt_to_phys(pgt->tbl); 801 assert(pa); 802 /* 803 * Note that the update of the table entry is guaranteed to 804 * be atomic. 805 */ 806 core_mmu_set_entry(&dir_info, idx, pa, attr); 807 } 808 } 809 810 TEE_Result tee_pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base, 811 struct fobj *fobj, uint32_t prot) 812 { 813 TEE_Result res = TEE_SUCCESS; 814 struct thread_specific_data *tsd = thread_get_tsd(); 815 struct vm_paged_region *reg = NULL; 816 817 res = pager_add_um_region(uctx, base, fobj, prot); 818 if (res) 819 return res; 820 821 if (uctx->ts_ctx == tsd->ctx) { 822 /* 823 * We're chaning the currently active utc. Assign page 824 * tables to the new regions and make sure that the page 825 * tables are registered in the upper table. 826 */ 827 tee_pager_assign_um_tables(uctx); 828 TAILQ_FOREACH(reg, uctx->regions, link) 829 map_pgts(reg); 830 } 831 832 return TEE_SUCCESS; 833 } 834 835 static void split_region(struct vm_paged_region *reg, 836 struct vm_paged_region *r2, vaddr_t va) 837 { 838 uint32_t exceptions = pager_lock_check_stack(64); 839 size_t diff = va - reg->base; 840 size_t r2_pgt_count = 0; 841 size_t reg_pgt_count = 0; 842 size_t n0 = 0; 843 size_t n = 0; 844 845 assert(r2->base == va); 846 assert(r2->size == reg->size - diff); 847 848 r2->fobj = fobj_get(reg->fobj); 849 r2->fobj_pgoffs = reg->fobj_pgoffs + diff / SMALL_PAGE_SIZE; 850 r2->type = reg->type; 851 r2->flags = reg->flags; 852 853 r2_pgt_count = get_pgt_count(r2->base, r2->size); 854 reg_pgt_count = get_pgt_count(reg->base, reg->size); 855 n0 = reg_pgt_count - r2_pgt_count; 856 for (n = n0; n < reg_pgt_count; n++) 857 r2->pgt_array[n - n0] = reg->pgt_array[n]; 858 reg->size = diff; 859 860 TAILQ_INSERT_BEFORE(reg, r2, link); 861 TAILQ_INSERT_AFTER(®->fobj->regions, reg, r2, fobj_link); 862 863 pager_unlock(exceptions); 864 } 865 DECLARE_KEEP_PAGER(split_region); 866 867 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va) 868 { 869 struct vm_paged_region *reg = NULL; 870 struct vm_paged_region *r2 = NULL; 871 872 if (va & SMALL_PAGE_MASK) 873 return TEE_ERROR_BAD_PARAMETERS; 874 875 TAILQ_FOREACH(reg, uctx->regions, link) { 876 if (va == reg->base || va == reg->base + reg->size) 877 return TEE_SUCCESS; 878 if (va > reg->base && va < reg->base + reg->size) { 879 size_t diff = va - reg->base; 880 881 r2 = alloc_region(va, reg->size - diff); 882 if (!r2) 883 return TEE_ERROR_OUT_OF_MEMORY; 884 split_region(reg, r2, va); 885 return TEE_SUCCESS; 886 } 887 } 888 889 return TEE_SUCCESS; 890 } 891 892 static struct pgt ** 893 merge_region_with_next(struct vm_paged_region_head *regions, 894 struct vm_paged_region *reg, 895 struct vm_paged_region *r_next, struct pgt **pgt_array) 896 { 897 uint32_t exceptions = pager_lock_check_stack(64); 898 struct pgt **old_pgt_array = reg->pgt_array; 899 900 reg->pgt_array = pgt_array; 901 TAILQ_REMOVE(regions, r_next, link); 902 TAILQ_REMOVE(&r_next->fobj->regions, r_next, fobj_link); 903 904 pager_unlock(exceptions); 905 return old_pgt_array; 906 } 907 DECLARE_KEEP_PAGER(merge_region_with_next); 908 909 static struct pgt **alloc_merged_pgt_array(struct vm_paged_region *a, 910 struct vm_paged_region *a_next) 911 { 912 size_t a_next_pgt_count = get_pgt_count(a_next->base, a_next->size); 913 size_t a_pgt_count = get_pgt_count(a->base, a->size); 914 size_t pgt_count = get_pgt_count(a->base, a->size + a_next->size); 915 struct pgt **pgt_array = NULL; 916 bool have_shared_pgt = false; 917 918 have_shared_pgt = ((a->base + a->size) & ~CORE_MMU_PGDIR_MASK) == 919 (a_next->base & ~CORE_MMU_PGDIR_MASK); 920 921 if (have_shared_pgt) 922 assert(pgt_count == a_pgt_count + a_next_pgt_count - 1); 923 else 924 assert(pgt_count == a_pgt_count + a_next_pgt_count); 925 926 /* In case there's a shared pgt they must match */ 927 if (have_shared_pgt && 928 a->pgt_array[a_pgt_count - 1] != a_next->pgt_array[0]) 929 return NULL; 930 931 pgt_array = calloc(sizeof(struct pgt *), pgt_count); 932 if (!pgt_array) 933 return NULL; 934 935 /* 936 * Copy and merge the two pgt_arrays, note the special case 937 * where a pgt is shared. 938 */ 939 memcpy(pgt_array, a->pgt_array, a_pgt_count * sizeof(struct pgt *)); 940 if (have_shared_pgt) 941 memcpy(pgt_array + a_pgt_count, a_next->pgt_array + 1, 942 (a_next_pgt_count - 1) * sizeof(struct pgt *)); 943 else 944 memcpy(pgt_array + a_pgt_count, a_next->pgt_array, 945 a_next_pgt_count * sizeof(struct pgt *)); 946 947 return pgt_array; 948 } 949 950 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va, 951 size_t len) 952 { 953 struct vm_paged_region *r_next = NULL; 954 struct vm_paged_region *reg = NULL; 955 struct pgt **pgt_array = NULL; 956 vaddr_t end_va = 0; 957 958 if ((va | len) & SMALL_PAGE_MASK) 959 return; 960 if (ADD_OVERFLOW(va, len, &end_va)) 961 return; 962 963 for (reg = TAILQ_FIRST(uctx->regions);; reg = r_next) { 964 r_next = TAILQ_NEXT(reg, link); 965 if (!r_next) 966 return; 967 968 /* Try merging with the area just before va */ 969 if (reg->base + reg->size < va) 970 continue; 971 972 /* 973 * If reg->base is well past our range we're done. 974 * Note that if it's just the page after our range we'll 975 * try to merge. 976 */ 977 if (reg->base > end_va) 978 return; 979 980 if (reg->base + reg->size != r_next->base) 981 continue; 982 if (reg->fobj != r_next->fobj || reg->type != r_next->type || 983 reg->flags != r_next->flags) 984 continue; 985 if (reg->fobj_pgoffs + reg->size / SMALL_PAGE_SIZE != 986 r_next->fobj_pgoffs) 987 continue; 988 989 pgt_array = alloc_merged_pgt_array(reg, r_next); 990 if (!pgt_array) 991 continue; 992 993 /* 994 * merge_region_with_next() returns the old pgt array which 995 * was replaced in reg. We don't want to call free() 996 * directly from merge_region_with_next() that would pull 997 * free() and its dependencies into the unpaged area. 998 */ 999 free(merge_region_with_next(uctx->regions, reg, r_next, 1000 pgt_array)); 1001 free_region(r_next); 1002 r_next = reg; 1003 } 1004 } 1005 1006 static void rem_region(struct vm_paged_region_head *regions, 1007 struct vm_paged_region *reg) 1008 { 1009 struct tee_pager_pmem *pmem; 1010 size_t last_pgoffs = reg->fobj_pgoffs + 1011 (reg->size >> SMALL_PAGE_SHIFT) - 1; 1012 uint32_t exceptions; 1013 struct tblidx tblidx = { }; 1014 uint32_t a = 0; 1015 1016 exceptions = pager_lock_check_stack(64); 1017 1018 TAILQ_REMOVE(regions, reg, link); 1019 TAILQ_REMOVE(®->fobj->regions, reg, fobj_link); 1020 1021 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1022 if (pmem->fobj != reg->fobj || 1023 pmem->fobj_pgidx < reg->fobj_pgoffs || 1024 pmem->fobj_pgidx > last_pgoffs) 1025 continue; 1026 1027 tblidx = pmem_get_region_tblidx(pmem, reg); 1028 tblidx_get_entry(tblidx, NULL, &a); 1029 if (!(a & TEE_MATTR_VALID_BLOCK)) 1030 continue; 1031 1032 tblidx_set_entry(tblidx, 0, 0); 1033 tblidx_tlbi_entry(tblidx); 1034 pgt_dec_used_entries(tblidx.pgt); 1035 } 1036 1037 pager_unlock(exceptions); 1038 } 1039 DECLARE_KEEP_PAGER(rem_region); 1040 1041 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base, 1042 size_t size) 1043 { 1044 struct vm_paged_region *reg; 1045 struct vm_paged_region *r_next; 1046 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 1047 1048 TAILQ_FOREACH_SAFE(reg, uctx->regions, link, r_next) { 1049 if (core_is_buffer_inside(reg->base, reg->size, base, s)) { 1050 rem_region(uctx->regions, reg); 1051 free_region(reg); 1052 } 1053 } 1054 tlbi_asid(uctx->vm_info.asid); 1055 } 1056 1057 void tee_pager_rem_um_regions(struct user_mode_ctx *uctx) 1058 { 1059 struct vm_paged_region *reg = NULL; 1060 1061 if (!uctx->regions) 1062 return; 1063 1064 while (true) { 1065 reg = TAILQ_FIRST(uctx->regions); 1066 if (!reg) 1067 break; 1068 unlink_region(uctx->regions, reg); 1069 free_region(reg); 1070 } 1071 1072 free(uctx->regions); 1073 } 1074 1075 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem) 1076 { 1077 struct vm_paged_region *reg = TAILQ_FIRST(&pmem->fobj->regions); 1078 void *ctx = reg->pgt_array[0]->ctx; 1079 1080 do { 1081 reg = TAILQ_NEXT(reg, fobj_link); 1082 if (!reg) 1083 return true; 1084 } while (reg->pgt_array[0]->ctx == ctx); 1085 1086 return false; 1087 } 1088 1089 bool tee_pager_set_um_region_attr(struct user_mode_ctx *uctx, vaddr_t base, 1090 size_t size, uint32_t flags) 1091 { 1092 bool ret = false; 1093 vaddr_t b = base; 1094 size_t s = size; 1095 size_t s2 = 0; 1096 struct vm_paged_region *reg = find_region(uctx->regions, b); 1097 uint32_t exceptions = 0; 1098 struct tee_pager_pmem *pmem = NULL; 1099 uint32_t a = 0; 1100 uint32_t f = 0; 1101 uint32_t mattr = 0; 1102 uint32_t f2 = 0; 1103 struct tblidx tblidx = { }; 1104 1105 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 1106 if (f & TEE_MATTR_UW) 1107 f |= TEE_MATTR_PW; 1108 mattr = get_region_mattr(f); 1109 1110 exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1111 1112 while (s) { 1113 if (!reg) { 1114 ret = false; 1115 goto out; 1116 } 1117 s2 = MIN(reg->size, s); 1118 b += s2; 1119 s -= s2; 1120 1121 if (reg->flags == f) 1122 goto next_region; 1123 1124 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1125 if (!pmem_is_covered_by_region(pmem, reg)) 1126 continue; 1127 1128 tblidx = pmem_get_region_tblidx(pmem, reg); 1129 tblidx_get_entry(tblidx, NULL, &a); 1130 if (a == f) 1131 continue; 1132 tblidx_set_entry(tblidx, 0, 0); 1133 tblidx_tlbi_entry(tblidx); 1134 1135 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1136 if (pmem_is_dirty(pmem)) 1137 f2 = mattr; 1138 else 1139 f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW); 1140 tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2); 1141 if (!(a & TEE_MATTR_VALID_BLOCK)) 1142 pgt_inc_used_entries(tblidx.pgt); 1143 /* 1144 * Make sure the table update is visible before 1145 * continuing. 1146 */ 1147 dsb_ishst(); 1148 1149 /* 1150 * Here's a problem if this page already is shared. 1151 * We need do icache invalidate for each context 1152 * in which it is shared. In practice this will 1153 * never happen. 1154 */ 1155 if (flags & TEE_MATTR_UX) { 1156 void *va = (void *)tblidx2va(tblidx); 1157 1158 /* Assert that the pmem isn't shared. */ 1159 assert(same_context(pmem)); 1160 1161 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1162 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1163 } 1164 } 1165 1166 reg->flags = f; 1167 next_region: 1168 reg = TAILQ_NEXT(reg, link); 1169 } 1170 1171 ret = true; 1172 out: 1173 pager_unlock(exceptions); 1174 return ret; 1175 } 1176 1177 DECLARE_KEEP_PAGER(tee_pager_set_um_region_attr); 1178 #endif /*CFG_PAGED_USER_TA*/ 1179 1180 void tee_pager_invalidate_fobj(struct fobj *fobj) 1181 { 1182 struct tee_pager_pmem *pmem; 1183 uint32_t exceptions; 1184 1185 exceptions = pager_lock_check_stack(64); 1186 1187 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) 1188 if (pmem->fobj == fobj) 1189 pmem_clear(pmem); 1190 1191 pager_unlock(exceptions); 1192 } 1193 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj); 1194 1195 static struct tee_pager_pmem *pmem_find(struct vm_paged_region *reg, vaddr_t va) 1196 { 1197 struct tee_pager_pmem *pmem = NULL; 1198 size_t fobj_pgidx = 0; 1199 1200 assert(va >= reg->base && va < (reg->base + reg->size)); 1201 fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs; 1202 1203 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) 1204 if (pmem->fobj == reg->fobj && pmem->fobj_pgidx == fobj_pgidx) 1205 return pmem; 1206 1207 return NULL; 1208 } 1209 1210 static bool tee_pager_unhide_page(struct vm_paged_region *reg, vaddr_t page_va) 1211 { 1212 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1213 struct tee_pager_pmem *pmem = pmem_find(reg, page_va); 1214 uint32_t a = get_region_mattr(reg->flags); 1215 uint32_t attr = 0; 1216 paddr_t pa = 0; 1217 1218 if (!pmem) 1219 return false; 1220 1221 tblidx_get_entry(tblidx, NULL, &attr); 1222 if (attr & TEE_MATTR_VALID_BLOCK) 1223 return false; 1224 1225 /* 1226 * The page is hidden, or not not mapped yet. Unhide the page and 1227 * move it to the tail. 1228 * 1229 * Since the page isn't mapped there doesn't exist a valid TLB entry 1230 * for this address, so no TLB invalidation is required after setting 1231 * the new entry. A DSB is needed though, to make the write visible. 1232 * 1233 * For user executable pages it's more complicated. Those pages can 1234 * be shared between multiple TA mappings and thus populated by 1235 * another TA. The reference manual states that: 1236 * 1237 * "instruction cache maintenance is required only after writing 1238 * new data to a physical address that holds an instruction." 1239 * 1240 * So for hidden pages we would not need to invalidate i-cache, but 1241 * for newly populated pages we do. Since we don't know which we 1242 * have to assume the worst and always invalidate the i-cache. We 1243 * don't need to clean the d-cache though, since that has already 1244 * been done earlier. 1245 * 1246 * Additional bookkeeping to tell if the i-cache invalidation is 1247 * needed or not is left as a future optimization. 1248 */ 1249 1250 /* If it's not a dirty block, then it should be read only. */ 1251 if (!pmem_is_dirty(pmem)) 1252 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1253 1254 pa = get_pmem_pa(pmem); 1255 pmem->flags &= ~PMEM_FLAG_HIDDEN; 1256 if (reg->flags & TEE_MATTR_UX) { 1257 void *va = (void *)tblidx2va(tblidx); 1258 1259 /* Set a temporary read-only mapping */ 1260 assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW))); 1261 tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX); 1262 dsb_ishst(); 1263 1264 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1265 1266 /* Set the final mapping */ 1267 tblidx_set_entry(tblidx, pa, a); 1268 tblidx_tlbi_entry(tblidx); 1269 } else { 1270 tblidx_set_entry(tblidx, pa, a); 1271 dsb_ishst(); 1272 } 1273 pgt_inc_used_entries(tblidx.pgt); 1274 1275 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1276 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1277 incr_hidden_hits(); 1278 return true; 1279 } 1280 1281 static void tee_pager_hide_pages(void) 1282 { 1283 struct tee_pager_pmem *pmem = NULL; 1284 size_t n = 0; 1285 1286 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1287 if (n >= TEE_PAGER_NHIDE) 1288 break; 1289 n++; 1290 1291 /* we cannot hide pages when pmem->fobj is not defined. */ 1292 if (!pmem->fobj) 1293 continue; 1294 1295 if (pmem_is_hidden(pmem)) 1296 continue; 1297 1298 pmem->flags |= PMEM_FLAG_HIDDEN; 1299 pmem_unmap(pmem, NULL); 1300 } 1301 } 1302 1303 static unsigned int __maybe_unused 1304 num_regions_with_pmem(struct tee_pager_pmem *pmem) 1305 { 1306 struct vm_paged_region *reg = NULL; 1307 unsigned int num_matches = 0; 1308 1309 TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) 1310 if (pmem_is_covered_by_region(pmem, reg)) 1311 num_matches++; 1312 1313 return num_matches; 1314 } 1315 1316 /* 1317 * Find mapped pmem, hide and move to pageble pmem. 1318 * Return false if page was not mapped, and true if page was mapped. 1319 */ 1320 static bool tee_pager_release_one_phys(struct vm_paged_region *reg, 1321 vaddr_t page_va) 1322 { 1323 struct tee_pager_pmem *pmem = NULL; 1324 struct tblidx tblidx = { }; 1325 size_t fobj_pgidx = 0; 1326 1327 assert(page_va >= reg->base && page_va < (reg->base + reg->size)); 1328 fobj_pgidx = (page_va - reg->base) / SMALL_PAGE_SIZE + 1329 reg->fobj_pgoffs; 1330 1331 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1332 if (pmem->fobj != reg->fobj || pmem->fobj_pgidx != fobj_pgidx) 1333 continue; 1334 1335 /* 1336 * Locked pages may not be shared. We're asserting that the 1337 * number of regions using this pmem is one and only one as 1338 * we're about to unmap it. 1339 */ 1340 assert(num_regions_with_pmem(pmem) == 1); 1341 1342 tblidx = pmem_get_region_tblidx(pmem, reg); 1343 tblidx_set_entry(tblidx, 0, 0); 1344 pgt_dec_used_entries(tblidx.pgt); 1345 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1346 pmem_clear(pmem); 1347 tee_pager_npages++; 1348 set_npages(); 1349 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1350 incr_zi_released(); 1351 return true; 1352 } 1353 1354 return false; 1355 } 1356 1357 static void pager_deploy_page(struct tee_pager_pmem *pmem, 1358 struct vm_paged_region *reg, vaddr_t page_va, 1359 bool clean_user_cache, bool writable) 1360 { 1361 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1362 uint32_t attr = get_region_mattr(reg->flags); 1363 struct core_mmu_table_info *ti = NULL; 1364 uint8_t *va_alias = pmem->va_alias; 1365 paddr_t pa = get_pmem_pa(pmem); 1366 unsigned int idx_alias = 0; 1367 uint32_t attr_alias = 0; 1368 paddr_t pa_alias = 0; 1369 1370 /* Ensure we are allowed to write to aliased virtual page */ 1371 ti = find_table_info((vaddr_t)va_alias); 1372 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 1373 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 1374 if (!(attr_alias & TEE_MATTR_PW)) { 1375 attr_alias |= TEE_MATTR_PW; 1376 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 1377 tlbi_mva_allasid((vaddr_t)va_alias); 1378 } 1379 1380 asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE); 1381 if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) { 1382 EMSG("PH 0x%" PRIxVA " failed", page_va); 1383 panic(); 1384 } 1385 switch (reg->type) { 1386 case PAGED_REGION_TYPE_RO: 1387 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1388 incr_ro_hits(); 1389 /* Forbid write to aliases for read-only (maybe exec) pages */ 1390 attr_alias &= ~TEE_MATTR_PW; 1391 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 1392 tlbi_mva_allasid((vaddr_t)va_alias); 1393 break; 1394 case PAGED_REGION_TYPE_RW: 1395 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1396 if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW))) 1397 pmem->flags |= PMEM_FLAG_DIRTY; 1398 incr_rw_hits(); 1399 break; 1400 case PAGED_REGION_TYPE_LOCK: 1401 /* Move page to lock list */ 1402 if (tee_pager_npages <= 0) 1403 panic("Running out of pages"); 1404 tee_pager_npages--; 1405 set_npages(); 1406 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1407 break; 1408 default: 1409 panic(); 1410 } 1411 asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE); 1412 1413 if (!writable) 1414 attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1415 1416 /* 1417 * We've updated the page using the aliased mapping and 1418 * some cache maintenance is now needed if it's an 1419 * executable page. 1420 * 1421 * Since the d-cache is a Physically-indexed, 1422 * physically-tagged (PIPT) cache we can clean either the 1423 * aliased address or the real virtual address. In this 1424 * case we choose the real virtual address. 1425 * 1426 * The i-cache can also be PIPT, but may be something else 1427 * too like VIPT. The current code requires the caches to 1428 * implement the IVIPT extension, that is: 1429 * "instruction cache maintenance is required only after 1430 * writing new data to a physical address that holds an 1431 * instruction." 1432 * 1433 * To portably invalidate the icache the page has to 1434 * be mapped at the final virtual address but not 1435 * executable. 1436 */ 1437 if (reg->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1438 uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX | 1439 TEE_MATTR_PW | TEE_MATTR_UW; 1440 void *va = (void *)page_va; 1441 1442 /* Set a temporary read-only mapping */ 1443 tblidx_set_entry(tblidx, pa, attr & ~mask); 1444 tblidx_tlbi_entry(tblidx); 1445 1446 dcache_clean_range_pou(va, SMALL_PAGE_SIZE); 1447 if (clean_user_cache) 1448 icache_inv_user_range(va, SMALL_PAGE_SIZE); 1449 else 1450 icache_inv_range(va, SMALL_PAGE_SIZE); 1451 1452 /* Set the final mapping */ 1453 tblidx_set_entry(tblidx, pa, attr); 1454 tblidx_tlbi_entry(tblidx); 1455 } else { 1456 tblidx_set_entry(tblidx, pa, attr); 1457 /* 1458 * No need to flush TLB for this entry, it was 1459 * invalid. We should use a barrier though, to make 1460 * sure that the change is visible. 1461 */ 1462 dsb_ishst(); 1463 } 1464 pgt_inc_used_entries(tblidx.pgt); 1465 1466 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa); 1467 } 1468 1469 static void make_dirty_page(struct tee_pager_pmem *pmem, 1470 struct vm_paged_region *reg, struct tblidx tblidx, 1471 paddr_t pa) 1472 { 1473 assert(reg->flags & (TEE_MATTR_UW | TEE_MATTR_PW)); 1474 assert(!(pmem->flags & PMEM_FLAG_DIRTY)); 1475 1476 FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx)); 1477 pmem->flags |= PMEM_FLAG_DIRTY; 1478 tblidx_set_entry(tblidx, pa, get_region_mattr(reg->flags)); 1479 tblidx_tlbi_entry(tblidx); 1480 } 1481 1482 /* 1483 * This function takes a reference to a page (@fobj + fobj_pgidx) and makes 1484 * the corresponding IV available. 1485 * 1486 * In case the page needs to be saved the IV must be writable, consequently 1487 * is the page holding the IV made dirty. If the page instead only is to 1488 * be verified it's enough that the page holding the IV is readonly and 1489 * thus doesn't have to be made dirty too. 1490 * 1491 * This function depends on pager_spare_pmem pointing to a free pmem when 1492 * entered. In case the page holding the needed IV isn't mapped this spare 1493 * pmem is used to map the page. If this function has used pager_spare_pmem 1494 * and assigned it to NULL it must be reassigned with a new free pmem 1495 * before this function can be called again. 1496 */ 1497 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx, 1498 bool writable) 1499 { 1500 struct vm_paged_region *reg = pager_iv_region; 1501 struct tee_pager_pmem *pmem = NULL; 1502 struct tblidx tblidx = { }; 1503 vaddr_t page_va = 0; 1504 uint32_t attr = 0; 1505 paddr_t pa = 0; 1506 1507 page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK; 1508 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) { 1509 assert(!page_va); 1510 return; 1511 } 1512 1513 assert(reg && reg->type == PAGED_REGION_TYPE_RW); 1514 assert(pager_spare_pmem); 1515 assert(core_is_buffer_inside(page_va, 1, reg->base, reg->size)); 1516 1517 tblidx = region_va2tblidx(reg, page_va); 1518 /* 1519 * We don't care if tee_pager_unhide_page() succeeds or not, we're 1520 * still checking the attributes afterwards. 1521 */ 1522 tee_pager_unhide_page(reg, page_va); 1523 tblidx_get_entry(tblidx, &pa, &attr); 1524 if (!(attr & TEE_MATTR_VALID_BLOCK)) { 1525 /* 1526 * We're using the spare pmem to map the IV corresponding 1527 * to another page. 1528 */ 1529 pmem = pager_spare_pmem; 1530 pager_spare_pmem = NULL; 1531 pmem_assign_fobj_page(pmem, reg, page_va); 1532 1533 if (writable) 1534 pmem->flags |= PMEM_FLAG_DIRTY; 1535 1536 pager_deploy_page(pmem, reg, page_va, 1537 false /*!clean_user_cache*/, writable); 1538 } else if (writable && !(attr & TEE_MATTR_PW)) { 1539 pmem = pmem_find(reg, page_va); 1540 /* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */ 1541 make_dirty_page(pmem, reg, tblidx, pa); 1542 } 1543 } 1544 1545 static void pager_get_page(struct vm_paged_region *reg, struct abort_info *ai, 1546 bool clean_user_cache) 1547 { 1548 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1549 struct tblidx tblidx = region_va2tblidx(reg, page_va); 1550 struct tee_pager_pmem *pmem = NULL; 1551 bool writable = false; 1552 uint32_t attr = 0; 1553 1554 /* 1555 * Get a pmem to load code and data into, also make sure 1556 * the corresponding IV page is available. 1557 */ 1558 while (true) { 1559 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1560 if (!pmem) { 1561 EMSG("No pmem entries"); 1562 abort_print(ai); 1563 panic(); 1564 } 1565 1566 if (pmem->fobj) { 1567 pmem_unmap(pmem, NULL); 1568 if (pmem_is_dirty(pmem)) { 1569 uint8_t *va = pmem->va_alias; 1570 1571 make_iv_available(pmem->fobj, pmem->fobj_pgidx, 1572 true /*writable*/); 1573 asan_tag_access(va, va + SMALL_PAGE_SIZE); 1574 if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx, 1575 pmem->va_alias)) 1576 panic("fobj_save_page"); 1577 asan_tag_no_access(va, va + SMALL_PAGE_SIZE); 1578 1579 pmem_clear(pmem); 1580 1581 /* 1582 * If the spare pmem was used by 1583 * make_iv_available() we need to replace 1584 * it with the just freed pmem. 1585 * 1586 * See make_iv_available() for details. 1587 */ 1588 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) && 1589 !pager_spare_pmem) { 1590 TAILQ_REMOVE(&tee_pager_pmem_head, 1591 pmem, link); 1592 pager_spare_pmem = pmem; 1593 pmem = NULL; 1594 } 1595 1596 /* 1597 * Check if the needed virtual page was 1598 * made available as a side effect of the 1599 * call to make_iv_available() above. If so 1600 * we're done. 1601 */ 1602 tblidx_get_entry(tblidx, NULL, &attr); 1603 if (attr & TEE_MATTR_VALID_BLOCK) 1604 return; 1605 1606 /* 1607 * The freed pmem was used to replace the 1608 * consumed pager_spare_pmem above. Restart 1609 * to find another pmem. 1610 */ 1611 if (!pmem) 1612 continue; 1613 } 1614 } 1615 1616 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1617 pmem_clear(pmem); 1618 1619 pmem_assign_fobj_page(pmem, reg, page_va); 1620 make_iv_available(pmem->fobj, pmem->fobj_pgidx, 1621 false /*!writable*/); 1622 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem) 1623 break; 1624 1625 /* 1626 * The spare pmem was used by make_iv_available(). We need 1627 * to replace it with the just freed pmem. And get another 1628 * pmem. 1629 * 1630 * See make_iv_available() for details. 1631 */ 1632 pmem_clear(pmem); 1633 pager_spare_pmem = pmem; 1634 } 1635 1636 /* 1637 * PAGED_REGION_TYPE_LOCK are always writable while PAGED_REGION_TYPE_RO 1638 * are never writable. 1639 * 1640 * Pages from PAGED_REGION_TYPE_RW starts read-only to be 1641 * able to tell when they are updated and should be tagged 1642 * as dirty. 1643 */ 1644 if (reg->type == PAGED_REGION_TYPE_LOCK || 1645 (reg->type == PAGED_REGION_TYPE_RW && abort_is_write_fault(ai))) 1646 writable = true; 1647 else 1648 writable = false; 1649 1650 pager_deploy_page(pmem, reg, page_va, clean_user_cache, writable); 1651 } 1652 1653 static bool pager_update_permissions(struct vm_paged_region *reg, 1654 struct abort_info *ai, bool *handled) 1655 { 1656 struct tblidx tblidx = region_va2tblidx(reg, ai->va); 1657 struct tee_pager_pmem *pmem = NULL; 1658 uint32_t attr = 0; 1659 paddr_t pa = 0; 1660 1661 *handled = false; 1662 1663 tblidx_get_entry(tblidx, &pa, &attr); 1664 1665 /* Not mapped */ 1666 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1667 return false; 1668 1669 /* Not readable, should not happen */ 1670 if (abort_is_user_exception(ai)) { 1671 if (!(attr & TEE_MATTR_UR)) 1672 return true; 1673 } else { 1674 if (!(attr & TEE_MATTR_PR)) { 1675 abort_print_error(ai); 1676 panic(); 1677 } 1678 } 1679 1680 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1681 case CORE_MMU_FAULT_TRANSLATION: 1682 case CORE_MMU_FAULT_READ_PERMISSION: 1683 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1684 /* Check attempting to execute from an NOX page */ 1685 if (abort_is_user_exception(ai)) { 1686 if (!(attr & TEE_MATTR_UX)) 1687 return true; 1688 } else { 1689 if (!(attr & TEE_MATTR_PX)) { 1690 abort_print_error(ai); 1691 panic(); 1692 } 1693 } 1694 } 1695 /* Since the page is mapped now it's OK */ 1696 break; 1697 case CORE_MMU_FAULT_WRITE_PERMISSION: 1698 /* Check attempting to write to an RO page */ 1699 pmem = pmem_find(reg, ai->va); 1700 if (!pmem) 1701 panic(); 1702 if (abort_is_user_exception(ai)) { 1703 if (!(reg->flags & TEE_MATTR_UW)) 1704 return true; 1705 if (!(attr & TEE_MATTR_UW)) 1706 make_dirty_page(pmem, reg, tblidx, pa); 1707 } else { 1708 if (!(reg->flags & TEE_MATTR_PW)) { 1709 abort_print_error(ai); 1710 panic(); 1711 } 1712 if (!(attr & TEE_MATTR_PW)) 1713 make_dirty_page(pmem, reg, tblidx, pa); 1714 } 1715 /* Since permissions has been updated now it's OK */ 1716 break; 1717 default: 1718 /* Some fault we can't deal with */ 1719 if (abort_is_user_exception(ai)) 1720 return true; 1721 abort_print_error(ai); 1722 panic(); 1723 } 1724 *handled = true; 1725 return true; 1726 } 1727 1728 #ifdef CFG_TEE_CORE_DEBUG 1729 static void stat_handle_fault(void) 1730 { 1731 static size_t num_faults; 1732 static size_t min_npages = SIZE_MAX; 1733 static size_t total_min_npages = SIZE_MAX; 1734 1735 num_faults++; 1736 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1737 DMSG("nfaults %zu npages %zu (min %zu)", 1738 num_faults, tee_pager_npages, min_npages); 1739 min_npages = tee_pager_npages; /* reset */ 1740 } 1741 if (tee_pager_npages < min_npages) 1742 min_npages = tee_pager_npages; 1743 if (tee_pager_npages < total_min_npages) 1744 total_min_npages = tee_pager_npages; 1745 } 1746 #else 1747 static void stat_handle_fault(void) 1748 { 1749 } 1750 #endif 1751 1752 bool tee_pager_handle_fault(struct abort_info *ai) 1753 { 1754 struct vm_paged_region *reg; 1755 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1756 uint32_t exceptions; 1757 bool ret; 1758 bool clean_user_cache = false; 1759 1760 #ifdef TEE_PAGER_DEBUG_PRINT 1761 if (!abort_is_user_exception(ai)) 1762 abort_print(ai); 1763 #endif 1764 1765 /* 1766 * We're updating pages that can affect several active CPUs at a 1767 * time below. We end up here because a thread tries to access some 1768 * memory that isn't available. We have to be careful when making 1769 * that memory available as other threads may succeed in accessing 1770 * that address the moment after we've made it available. 1771 * 1772 * That means that we can't just map the memory and populate the 1773 * page, instead we use the aliased mapping to populate the page 1774 * and once everything is ready we map it. 1775 */ 1776 exceptions = pager_lock(ai); 1777 1778 stat_handle_fault(); 1779 1780 /* check if the access is valid */ 1781 if (abort_is_user_exception(ai)) { 1782 reg = find_uta_region(ai->va); 1783 clean_user_cache = true; 1784 } else { 1785 reg = find_region(&core_vm_regions, ai->va); 1786 if (!reg) { 1787 reg = find_uta_region(ai->va); 1788 clean_user_cache = true; 1789 } 1790 } 1791 if (!reg || !reg->pgt_array[0]) { 1792 ret = false; 1793 goto out; 1794 } 1795 1796 if (tee_pager_unhide_page(reg, page_va)) 1797 goto out_success; 1798 1799 /* 1800 * The page wasn't hidden, but some other core may have 1801 * updated the table entry before we got here or we need 1802 * to make a read-only page read-write (dirty). 1803 */ 1804 if (pager_update_permissions(reg, ai, &ret)) { 1805 /* 1806 * Nothing more to do with the abort. The problem 1807 * could already have been dealt with from another 1808 * core or if ret is false the TA will be paniced. 1809 */ 1810 goto out; 1811 } 1812 1813 pager_get_page(reg, ai, clean_user_cache); 1814 1815 out_success: 1816 tee_pager_hide_pages(); 1817 ret = true; 1818 out: 1819 pager_unlock(exceptions); 1820 return ret; 1821 } 1822 1823 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1824 { 1825 size_t n = 0; 1826 1827 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1828 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1829 1830 /* setup memory */ 1831 for (n = 0; n < npages; n++) { 1832 struct core_mmu_table_info *ti = NULL; 1833 struct tee_pager_pmem *pmem = NULL; 1834 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1835 struct tblidx tblidx = { }; 1836 unsigned int pgidx = 0; 1837 paddr_t pa = 0; 1838 uint32_t attr = 0; 1839 1840 ti = find_table_info(va); 1841 pgidx = core_mmu_va2idx(ti, va); 1842 /* 1843 * Note that we can only support adding pages in the 1844 * valid range of this table info, currently not a problem. 1845 */ 1846 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1847 1848 /* Ignore unmapped pages/blocks */ 1849 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1850 continue; 1851 1852 pmem = calloc(1, sizeof(struct tee_pager_pmem)); 1853 if (!pmem) 1854 panic("out of mem"); 1855 pmem_clear(pmem); 1856 1857 pmem->va_alias = pager_add_alias_page(pa); 1858 1859 if (unmap) { 1860 core_mmu_set_entry(ti, pgidx, 0, 0); 1861 pgt_dec_used_entries(find_core_pgt(va)); 1862 } else { 1863 struct vm_paged_region *reg = NULL; 1864 1865 /* 1866 * The page is still mapped, let's assign the region 1867 * and update the protection bits accordingly. 1868 */ 1869 reg = find_region(&core_vm_regions, va); 1870 assert(reg); 1871 pmem_assign_fobj_page(pmem, reg, va); 1872 tblidx = pmem_get_region_tblidx(pmem, reg); 1873 assert(tblidx.pgt == find_core_pgt(va)); 1874 assert(pa == get_pmem_pa(pmem)); 1875 tblidx_set_entry(tblidx, pa, 1876 get_region_mattr(reg->flags)); 1877 } 1878 1879 if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) && 1880 !pager_spare_pmem) { 1881 pager_spare_pmem = pmem; 1882 } else { 1883 tee_pager_npages++; 1884 incr_npages_all(); 1885 set_npages(); 1886 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1887 } 1888 } 1889 1890 /* 1891 * As this is done at inits, invalidate all TLBs once instead of 1892 * targeting only the modified entries. 1893 */ 1894 tlbi_all(); 1895 } 1896 1897 #ifdef CFG_PAGED_USER_TA 1898 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1899 { 1900 struct pgt *p = pgt; 1901 1902 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1903 p = SLIST_NEXT(p, link); 1904 return p; 1905 } 1906 1907 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx) 1908 { 1909 struct vm_paged_region *reg = NULL; 1910 struct pgt *pgt = NULL; 1911 size_t n = 0; 1912 1913 if (!uctx->regions) 1914 return; 1915 1916 pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1917 TAILQ_FOREACH(reg, uctx->regions, link) { 1918 for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) { 1919 vaddr_t va = reg->base + CORE_MMU_PGDIR_SIZE * n; 1920 struct pgt *p __maybe_unused = find_pgt(pgt, va); 1921 1922 if (!reg->pgt_array[n]) 1923 reg->pgt_array[n] = p; 1924 else 1925 assert(reg->pgt_array[n] == p); 1926 } 1927 } 1928 } 1929 1930 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1931 { 1932 struct tee_pager_pmem *pmem = NULL; 1933 struct vm_paged_region *reg = NULL; 1934 struct vm_paged_region_head *regions = NULL; 1935 uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE); 1936 size_t n = 0; 1937 1938 if (!pgt->num_used_entries) 1939 goto out; 1940 1941 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1942 if (pmem->fobj) 1943 pmem_unmap(pmem, pgt); 1944 } 1945 assert(!pgt->num_used_entries); 1946 1947 out: 1948 regions = to_user_mode_ctx(pgt->ctx)->regions; 1949 if (regions) { 1950 TAILQ_FOREACH(reg, regions, link) { 1951 for (n = 0; n < get_pgt_count(reg->base, reg->size); 1952 n++) { 1953 if (reg->pgt_array[n] == pgt) { 1954 reg->pgt_array[n] = NULL; 1955 break; 1956 } 1957 } 1958 } 1959 } 1960 1961 pager_unlock(exceptions); 1962 } 1963 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1964 #endif /*CFG_PAGED_USER_TA*/ 1965 1966 void tee_pager_release_phys(void *addr, size_t size) 1967 { 1968 bool unmaped = false; 1969 vaddr_t va = (vaddr_t)addr; 1970 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1971 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1972 struct vm_paged_region *reg; 1973 uint32_t exceptions; 1974 1975 if (end <= begin) 1976 return; 1977 1978 exceptions = pager_lock_check_stack(128); 1979 1980 for (va = begin; va < end; va += SMALL_PAGE_SIZE) { 1981 reg = find_region(&core_vm_regions, va); 1982 if (!reg) 1983 panic(); 1984 unmaped |= tee_pager_release_one_phys(reg, va); 1985 } 1986 1987 if (unmaped) 1988 tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE); 1989 1990 pager_unlock(exceptions); 1991 } 1992 DECLARE_KEEP_PAGER(tee_pager_release_phys); 1993 1994 void *tee_pager_alloc(size_t size) 1995 { 1996 tee_mm_entry_t *mm = NULL; 1997 uint8_t *smem = NULL; 1998 size_t num_pages = 0; 1999 struct fobj *fobj = NULL; 2000 2001 if (!size) 2002 return NULL; 2003 2004 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 2005 if (!mm) 2006 return NULL; 2007 2008 smem = (uint8_t *)tee_mm_get_smem(mm); 2009 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; 2010 fobj = fobj_locked_paged_alloc(num_pages); 2011 if (!fobj) { 2012 tee_mm_free(mm); 2013 return NULL; 2014 } 2015 2016 tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_LOCK, fobj); 2017 fobj_put(fobj); 2018 2019 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); 2020 2021 return smem; 2022 } 2023 2024 vaddr_t tee_pager_init_iv_region(struct fobj *fobj) 2025 { 2026 tee_mm_entry_t *mm = NULL; 2027 uint8_t *smem = NULL; 2028 2029 assert(!pager_iv_region); 2030 2031 mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE); 2032 if (!mm) 2033 panic(); 2034 2035 smem = (uint8_t *)tee_mm_get_smem(mm); 2036 tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_RW, fobj); 2037 fobj_put(fobj); 2038 2039 asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE); 2040 2041 pager_iv_region = find_region(&core_vm_regions, (vaddr_t)smem); 2042 assert(pager_iv_region && pager_iv_region->fobj == fobj); 2043 2044 return (vaddr_t)smem; 2045 } 2046