1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <arm.h> 30 #include <assert.h> 31 #include <keep.h> 32 #include <sys/queue.h> 33 #include <kernel/abort.h> 34 #include <kernel/panic.h> 35 #include <kernel/spinlock.h> 36 #include <kernel/tee_misc.h> 37 #include <kernel/tee_ta_manager.h> 38 #include <kernel/thread.h> 39 #include <mm/core_memprot.h> 40 #include <mm/tee_mm.h> 41 #include <mm/tee_pager.h> 42 #include <types_ext.h> 43 #include <stdlib.h> 44 #include <tee_api_defines.h> 45 #include <tee/tee_cryp_provider.h> 46 #include <trace.h> 47 #include <utee_defines.h> 48 #include <util.h> 49 50 #include "pager_private.h" 51 52 #define PAGER_AE_KEY_BITS 256 53 54 struct pager_rw_pstate { 55 uint64_t iv; 56 uint8_t tag[PAGER_AES_GCM_TAG_LEN]; 57 }; 58 59 enum area_type { 60 AREA_TYPE_RO, 61 AREA_TYPE_RW, 62 AREA_TYPE_LOCK, 63 }; 64 65 struct tee_pager_area { 66 union { 67 const uint8_t *hashes; 68 struct pager_rw_pstate *rwp; 69 } u; 70 uint8_t *store; 71 enum area_type type; 72 uint32_t flags; 73 vaddr_t base; 74 size_t size; 75 struct pgt *pgt; 76 TAILQ_ENTRY(tee_pager_area) link; 77 }; 78 79 TAILQ_HEAD(tee_pager_area_head, tee_pager_area); 80 81 static struct tee_pager_area_head tee_pager_area_head = 82 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 83 84 #define INVALID_PGIDX UINT_MAX 85 86 /* 87 * struct tee_pager_pmem - Represents a physical page used for paging. 88 * 89 * @pgidx an index of the entry in area->ti. 90 * @va_alias Virtual address where the physical page always is aliased. 91 * Used during remapping of the page when the content need to 92 * be updated before it's available at the new location. 93 * @area a pointer to the pager area 94 */ 95 struct tee_pager_pmem { 96 unsigned pgidx; 97 void *va_alias; 98 struct tee_pager_area *area; 99 TAILQ_ENTRY(tee_pager_pmem) link; 100 }; 101 102 /* The list of physical pages. The first page in the list is the oldest */ 103 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 104 105 static struct tee_pager_pmem_head tee_pager_pmem_head = 106 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 107 108 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 109 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 110 111 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; 112 113 /* number of pages hidden */ 114 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 115 116 /* Number of registered physical pages, used hiding pages. */ 117 static size_t tee_pager_npages; 118 119 #ifdef CFG_WITH_STATS 120 static struct tee_pager_stats pager_stats; 121 122 static inline void incr_ro_hits(void) 123 { 124 pager_stats.ro_hits++; 125 } 126 127 static inline void incr_rw_hits(void) 128 { 129 pager_stats.rw_hits++; 130 } 131 132 static inline void incr_hidden_hits(void) 133 { 134 pager_stats.hidden_hits++; 135 } 136 137 static inline void incr_zi_released(void) 138 { 139 pager_stats.zi_released++; 140 } 141 142 static inline void incr_npages_all(void) 143 { 144 pager_stats.npages_all++; 145 } 146 147 static inline void set_npages(void) 148 { 149 pager_stats.npages = tee_pager_npages; 150 } 151 152 void tee_pager_get_stats(struct tee_pager_stats *stats) 153 { 154 *stats = pager_stats; 155 156 pager_stats.hidden_hits = 0; 157 pager_stats.ro_hits = 0; 158 pager_stats.rw_hits = 0; 159 pager_stats.zi_released = 0; 160 } 161 162 #else /* CFG_WITH_STATS */ 163 static inline void incr_ro_hits(void) { } 164 static inline void incr_rw_hits(void) { } 165 static inline void incr_hidden_hits(void) { } 166 static inline void incr_zi_released(void) { } 167 static inline void incr_npages_all(void) { } 168 static inline void set_npages(void) { } 169 170 void tee_pager_get_stats(struct tee_pager_stats *stats) 171 { 172 memset(stats, 0, sizeof(struct tee_pager_stats)); 173 } 174 #endif /* CFG_WITH_STATS */ 175 176 static struct pgt pager_core_pgt; 177 struct core_mmu_table_info tee_pager_tbl_info; 178 static struct core_mmu_table_info pager_alias_tbl_info; 179 180 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 181 182 /* Defines the range of the alias area */ 183 static tee_mm_entry_t *pager_alias_area; 184 /* 185 * Physical pages are added in a stack like fashion to the alias area, 186 * @pager_alias_next_free gives the address of next free entry if 187 * @pager_alias_next_free is != 0 188 */ 189 static uintptr_t pager_alias_next_free; 190 191 static uint32_t pager_lock(void) 192 { 193 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 194 195 cpu_spin_lock(&pager_spinlock); 196 return exceptions; 197 } 198 199 static void pager_unlock(uint32_t exceptions) 200 { 201 cpu_spin_unlock(&pager_spinlock); 202 thread_set_exceptions(exceptions); 203 } 204 205 static void set_alias_area(tee_mm_entry_t *mm) 206 { 207 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 208 size_t tbl_va_size; 209 unsigned idx; 210 unsigned last_idx; 211 vaddr_t smem = tee_mm_get_smem(mm); 212 size_t nbytes = tee_mm_get_bytes(mm); 213 214 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 215 216 if (pager_alias_area) 217 panic("null pager_alias_area"); 218 219 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) 220 panic("Can't find translation table"); 221 222 if ((1 << ti->shift) != SMALL_PAGE_SIZE) 223 panic("Unsupported page size in translation table"); 224 225 tbl_va_size = (1 << ti->shift) * ti->num_entries; 226 if (!core_is_buffer_inside(smem, nbytes, 227 ti->va_base, tbl_va_size)) { 228 EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 229 smem, nbytes, ti->va_base, tbl_va_size); 230 panic(); 231 } 232 233 if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK) 234 panic("invalid area alignment"); 235 236 pager_alias_area = mm; 237 pager_alias_next_free = smem; 238 239 /* Clear all mapping in the alias area */ 240 idx = core_mmu_va2idx(ti, smem); 241 last_idx = core_mmu_va2idx(ti, smem + nbytes); 242 for (; idx < last_idx; idx++) 243 core_mmu_set_entry(ti, idx, 0, 0); 244 245 /* TODO only invalidate entries touched above */ 246 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 247 } 248 249 static void generate_ae_key(void) 250 { 251 if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS) 252 panic("failed to generate random"); 253 } 254 255 void tee_pager_init(tee_mm_entry_t *mm_alias) 256 { 257 set_alias_area(mm_alias); 258 generate_ae_key(); 259 } 260 261 static void *pager_add_alias_page(paddr_t pa) 262 { 263 unsigned idx; 264 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 265 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 266 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 267 TEE_MATTR_SECURE | TEE_MATTR_PRW; 268 269 DMSG("0x%" PRIxPA, pa); 270 271 if (!pager_alias_next_free || !ti->num_entries) 272 panic("invalid alias entry"); 273 274 idx = core_mmu_va2idx(ti, pager_alias_next_free); 275 core_mmu_set_entry(ti, idx, pa, attr); 276 pgt_inc_used_entries(&pager_core_pgt); 277 pager_alias_next_free += SMALL_PAGE_SIZE; 278 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 279 tee_mm_get_bytes(pager_alias_area))) 280 pager_alias_next_free = 0; 281 return (void *)core_mmu_idx2va(ti, idx); 282 } 283 284 static struct tee_pager_area *alloc_area(struct pgt *pgt, 285 vaddr_t base, size_t size, 286 uint32_t flags, const void *store, 287 const void *hashes) 288 { 289 struct tee_pager_area *area = calloc(1, sizeof(*area)); 290 enum area_type at; 291 tee_mm_entry_t *mm_store = NULL; 292 293 if (!area) 294 return NULL; 295 296 if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) { 297 if (flags & TEE_MATTR_LOCKED) { 298 at = AREA_TYPE_LOCK; 299 goto out; 300 } 301 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size); 302 if (!mm_store) 303 goto bad; 304 area->store = phys_to_virt(tee_mm_get_smem(mm_store), 305 MEM_AREA_TA_RAM); 306 if (!area->store) 307 goto bad; 308 area->u.rwp = calloc(size / SMALL_PAGE_SIZE, 309 sizeof(struct pager_rw_pstate)); 310 if (!area->u.rwp) 311 goto bad; 312 at = AREA_TYPE_RW; 313 } else { 314 area->store = (void *)store; 315 area->u.hashes = hashes; 316 at = AREA_TYPE_RO; 317 } 318 out: 319 area->pgt = pgt; 320 area->base = base; 321 area->size = size; 322 area->flags = flags; 323 area->type = at; 324 return area; 325 bad: 326 tee_mm_free(mm_store); 327 free(area->u.rwp); 328 free(area); 329 return NULL; 330 } 331 332 static void area_insert_tail(struct tee_pager_area *area) 333 { 334 uint32_t exceptions = pager_lock(); 335 336 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 337 338 pager_unlock(exceptions); 339 } 340 KEEP_PAGER(area_insert_tail); 341 342 static size_t tbl_usage_count(struct pgt *pgt) 343 { 344 size_t n; 345 paddr_t pa; 346 size_t usage = 0; 347 348 for (n = 0; n < tee_pager_tbl_info.num_entries; n++) { 349 core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level, 350 n, &pa, NULL); 351 if (pa) 352 usage++; 353 } 354 return usage; 355 } 356 357 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags, 358 const void *store, const void *hashes) 359 { 360 struct tee_pager_area *area; 361 size_t tbl_va_size; 362 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 363 364 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 365 base, base + size, flags, store, hashes); 366 367 if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) { 368 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size); 369 panic(); 370 } 371 372 if (!(flags & TEE_MATTR_PW) && (!store || !hashes)) 373 panic("write pages cannot provide store or hashes"); 374 375 if ((flags & TEE_MATTR_PW) && (store || hashes)) 376 panic("non-write pages must provide store and hashes"); 377 378 if (!pager_core_pgt.tbl) { 379 pager_core_pgt.tbl = ti->table; 380 pgt_set_used_entries(&pager_core_pgt, 381 tbl_usage_count(&pager_core_pgt)); 382 } 383 384 tbl_va_size = (1 << ti->shift) * ti->num_entries; 385 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) { 386 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 387 base, size, ti->va_base, tbl_va_size); 388 return false; 389 } 390 391 area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes); 392 if (!area) 393 return false; 394 395 area_insert_tail(area); 396 return true; 397 } 398 399 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas, 400 vaddr_t va) 401 { 402 struct tee_pager_area *area; 403 404 if (!areas) 405 return NULL; 406 407 TAILQ_FOREACH(area, areas, link) { 408 if (core_is_buffer_inside(va, 1, area->base, area->size)) 409 return area; 410 } 411 return NULL; 412 } 413 414 #ifdef CFG_PAGED_USER_TA 415 static struct tee_pager_area *find_uta_area(vaddr_t va) 416 { 417 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx; 418 419 if (!ctx || !is_user_ta_ctx(ctx)) 420 return NULL; 421 return find_area(to_user_ta_ctx(ctx)->areas, va); 422 } 423 #else 424 static struct tee_pager_area *find_uta_area(vaddr_t va __unused) 425 { 426 return NULL; 427 } 428 #endif /*CFG_PAGED_USER_TA*/ 429 430 431 static uint32_t get_area_mattr(uint32_t area_flags) 432 { 433 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 434 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 435 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 436 437 if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW))) 438 attr |= TEE_MATTR_GLOBAL; 439 440 return attr; 441 } 442 443 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 444 { 445 paddr_t pa; 446 unsigned idx; 447 448 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias); 449 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL); 450 return pa; 451 } 452 453 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, 454 void *dst) 455 { 456 struct pager_aes_gcm_iv iv = { 457 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } 458 }; 459 460 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), 461 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); 462 } 463 464 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) 465 { 466 struct pager_aes_gcm_iv iv; 467 468 assert((rwp->iv + 1) > rwp->iv); 469 rwp->iv++; 470 /* 471 * IV is constructed as recommended in section "8.2.1 Deterministic 472 * Construction" of "Recommendation for Block Cipher Modes of 473 * Operation: Galois/Counter Mode (GCM) and GMAC", 474 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 475 */ 476 iv.iv[0] = (vaddr_t)rwp; 477 iv.iv[1] = rwp->iv >> 32; 478 iv.iv[2] = rwp->iv; 479 480 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), 481 &iv, rwp->tag, 482 src, dst, SMALL_PAGE_SIZE)) 483 panic("gcm failed"); 484 } 485 486 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 487 void *va_alias) 488 { 489 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT; 490 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE; 491 492 switch (area->type) { 493 case AREA_TYPE_RO: 494 { 495 const void *hash = area->u.hashes + 496 idx * TEE_SHA256_HASH_SIZE; 497 498 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE); 499 incr_ro_hits(); 500 501 if (hash_sha256_check(hash, va_alias, 502 SMALL_PAGE_SIZE) != TEE_SUCCESS) { 503 EMSG("PH 0x%" PRIxVA " failed", page_va); 504 panic(); 505 } 506 } 507 break; 508 case AREA_TYPE_RW: 509 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64, 510 va_alias, page_va, area->u.rwp[idx].iv); 511 if (!area->u.rwp[idx].iv) 512 memset(va_alias, 0, SMALL_PAGE_SIZE); 513 else if (!decrypt_page(&area->u.rwp[idx], stored_page, 514 va_alias)) { 515 EMSG("PH 0x%" PRIxVA " failed", page_va); 516 panic(); 517 } 518 incr_rw_hits(); 519 break; 520 case AREA_TYPE_LOCK: 521 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va); 522 memset(va_alias, 0, SMALL_PAGE_SIZE); 523 break; 524 default: 525 panic(); 526 } 527 } 528 529 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 530 { 531 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 532 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 533 534 if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) { 535 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK; 536 size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT); 537 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE; 538 539 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW)); 540 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias, 541 stored_page); 542 FMSG("Saved %#" PRIxVA " iv %#" PRIx64, 543 pmem->area->base + idx * SMALL_PAGE_SIZE, 544 pmem->area->u.rwp[idx].iv); 545 } 546 } 547 548 static void area_get_entry(struct tee_pager_area *area, size_t idx, 549 paddr_t *pa, uint32_t *attr) 550 { 551 assert(area->pgt); 552 assert(idx < tee_pager_tbl_info.num_entries); 553 core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 554 idx, pa, attr); 555 } 556 557 static void area_set_entry(struct tee_pager_area *area, size_t idx, 558 paddr_t pa, uint32_t attr) 559 { 560 assert(area->pgt); 561 assert(idx < tee_pager_tbl_info.num_entries); 562 core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 563 idx, pa, attr); 564 } 565 566 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va) 567 { 568 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT; 569 } 570 571 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx) 572 { 573 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK); 574 } 575 576 #ifdef CFG_PAGED_USER_TA 577 static void free_area(struct tee_pager_area *area) 578 { 579 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, 580 virt_to_phys(area->store))); 581 if (area->type == AREA_TYPE_RW) 582 free(area->u.rwp); 583 free(area); 584 } 585 586 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, 587 size_t size) 588 { 589 struct tee_pager_area *area; 590 uint32_t flags; 591 vaddr_t b = base; 592 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 593 594 if (!utc->areas) { 595 utc->areas = malloc(sizeof(*utc->areas)); 596 if (!utc->areas) 597 return false; 598 TAILQ_INIT(utc->areas); 599 } 600 601 flags = TEE_MATTR_PRW | TEE_MATTR_URWX; 602 603 while (s) { 604 size_t s2; 605 606 if (find_area(utc->areas, b)) 607 return false; 608 609 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 610 611 /* Table info will be set when the context is activated. */ 612 area = alloc_area(NULL, b, s2, flags, NULL, NULL); 613 if (!area) 614 return false; 615 TAILQ_INSERT_TAIL(utc->areas, area, link); 616 b += s2; 617 s -= s2; 618 } 619 620 return true; 621 } 622 623 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size) 624 { 625 struct thread_specific_data *tsd = thread_get_tsd(); 626 struct tee_pager_area *area; 627 struct core_mmu_table_info dir_info = { NULL }; 628 629 if (&utc->ctx != tsd->ctx) { 630 /* 631 * Changes are to an utc that isn't active. Just add the 632 * areas page tables will be dealt with later. 633 */ 634 return pager_add_uta_area(utc, base, size); 635 } 636 637 /* 638 * Assign page tables before adding areas to be able to tell which 639 * are newly added and should be removed in case of failure. 640 */ 641 tee_pager_assign_uta_tables(utc); 642 if (!pager_add_uta_area(utc, base, size)) { 643 struct tee_pager_area *next_a; 644 645 /* Remove all added areas */ 646 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 647 if (!area->pgt) { 648 TAILQ_REMOVE(utc->areas, area, link); 649 free_area(area); 650 } 651 } 652 return false; 653 } 654 655 /* 656 * Assign page tables to the new areas and make sure that the page 657 * tables are registered in the upper table. 658 */ 659 tee_pager_assign_uta_tables(utc); 660 core_mmu_get_user_pgdir(&dir_info); 661 TAILQ_FOREACH(area, utc->areas, link) { 662 paddr_t pa; 663 size_t idx; 664 uint32_t attr; 665 666 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase); 667 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 668 669 /* 670 * Check if the page table already is used, if it is, it's 671 * already registered. 672 */ 673 if (area->pgt->num_used_entries) { 674 assert(attr & TEE_MATTR_TABLE); 675 assert(pa == virt_to_phys(area->pgt->tbl)); 676 continue; 677 } 678 679 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 680 pa = virt_to_phys(area->pgt->tbl); 681 assert(pa); 682 /* 683 * Note that the update of the table entry is guaranteed to 684 * be atomic. 685 */ 686 core_mmu_set_entry(&dir_info, idx, pa, attr); 687 } 688 689 return true; 690 } 691 692 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti, 693 struct pgt *pgt) 694 { 695 assert(pgt); 696 ti->table = pgt->tbl; 697 ti->va_base = pgt->vabase; 698 ti->level = tee_pager_tbl_info.level; 699 ti->shift = tee_pager_tbl_info.shift; 700 ti->num_entries = tee_pager_tbl_info.num_entries; 701 } 702 703 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt, 704 vaddr_t new_base) 705 { 706 uint32_t exceptions = pager_lock(); 707 708 /* 709 * If there's no pgt assigned to the old area there's no pages to 710 * deal with either, just update with a new pgt and base. 711 */ 712 if (area->pgt) { 713 struct core_mmu_table_info old_ti; 714 struct core_mmu_table_info new_ti; 715 struct tee_pager_pmem *pmem; 716 717 init_tbl_info_from_pgt(&old_ti, area->pgt); 718 init_tbl_info_from_pgt(&new_ti, new_pgt); 719 720 721 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 722 vaddr_t va; 723 paddr_t pa; 724 uint32_t attr; 725 726 if (pmem->area != area) 727 continue; 728 core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr); 729 core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0); 730 731 assert(pa == get_pmem_pa(pmem)); 732 assert(attr); 733 assert(area->pgt->num_used_entries); 734 area->pgt->num_used_entries--; 735 736 va = core_mmu_idx2va(&old_ti, pmem->pgidx); 737 va = va - area->base + new_base; 738 pmem->pgidx = core_mmu_va2idx(&new_ti, va); 739 core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr); 740 new_pgt->num_used_entries++; 741 } 742 } 743 744 area->pgt = new_pgt; 745 area->base = new_base; 746 pager_unlock(exceptions); 747 } 748 KEEP_PAGER(transpose_area); 749 750 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc, 751 vaddr_t src_base, 752 struct user_ta_ctx *dst_utc, 753 vaddr_t dst_base, struct pgt **dst_pgt, 754 size_t size) 755 { 756 struct tee_pager_area *area; 757 struct tee_pager_area *next_a; 758 759 TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) { 760 vaddr_t new_area_base; 761 size_t new_idx; 762 763 if (!core_is_buffer_inside(area->base, area->size, 764 src_base, size)) 765 continue; 766 767 TAILQ_REMOVE(src_utc->areas, area, link); 768 769 new_area_base = dst_base + (src_base - area->base); 770 new_idx = (new_area_base - dst_pgt[0]->vabase) / 771 CORE_MMU_PGDIR_SIZE; 772 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) == 773 dst_pgt[new_idx]->vabase); 774 transpose_area(area, dst_pgt[new_idx], new_area_base); 775 776 /* 777 * Assert that this will not cause any conflicts in the new 778 * utc. This should already be guaranteed, but a bug here 779 * could be tricky to find. 780 */ 781 assert(!find_area(dst_utc->areas, area->base)); 782 TAILQ_INSERT_TAIL(dst_utc->areas, area, link); 783 } 784 } 785 786 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base, 787 size_t size) 788 { 789 struct tee_pager_area *area; 790 struct tee_pager_area *next_a; 791 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 792 793 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 794 if (core_is_buffer_inside(area->base, area->size, base, s)) { 795 TAILQ_REMOVE(utc->areas, area, link); 796 free_area(area); 797 } 798 } 799 800 } 801 802 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc) 803 { 804 struct tee_pager_area *area; 805 806 if (!utc->areas) 807 return; 808 809 while (true) { 810 area = TAILQ_FIRST(utc->areas); 811 if (!area) 812 break; 813 TAILQ_REMOVE(utc->areas, area, link); 814 free_area(area); 815 } 816 817 free(utc->areas); 818 } 819 820 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base, 821 size_t size, uint32_t flags) 822 { 823 bool ret; 824 vaddr_t b = base; 825 size_t s = size; 826 size_t s2; 827 struct tee_pager_area *area = find_area(utc->areas, b); 828 uint32_t exceptions; 829 struct tee_pager_pmem *pmem; 830 paddr_t pa; 831 uint32_t a; 832 uint32_t f; 833 834 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 835 if (f & TEE_MATTR_UW) 836 f |= TEE_MATTR_PW; 837 f = get_area_mattr(f); 838 839 exceptions = pager_lock(); 840 841 while (s) { 842 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 843 if (!area || area->base != b || area->size != s2) { 844 ret = false; 845 goto out; 846 } 847 b += s2; 848 s -= s2; 849 850 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 851 if (pmem->area != area) 852 continue; 853 area_get_entry(pmem->area, pmem->pgidx, &pa, &a); 854 if (a & TEE_MATTR_VALID_BLOCK) 855 assert(pa == get_pmem_pa(pmem)); 856 else 857 pa = get_pmem_pa(pmem); 858 if (a == f) 859 continue; 860 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 861 /* TODO only invalidate entries touched above */ 862 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 863 if (!(flags & TEE_MATTR_UW)) 864 tee_pager_save_page(pmem, a); 865 866 area_set_entry(pmem->area, pmem->pgidx, pa, f); 867 868 if (flags & TEE_MATTR_UX) { 869 void *va = (void *)area_idx2va(pmem->area, 870 pmem->pgidx); 871 872 cache_maintenance_l1(DCACHE_AREA_CLEAN, va, 873 SMALL_PAGE_SIZE); 874 cache_maintenance_l1(ICACHE_AREA_INVALIDATE, va, 875 SMALL_PAGE_SIZE); 876 } 877 } 878 879 area->flags = f; 880 area = TAILQ_NEXT(area, link); 881 } 882 883 ret = true; 884 out: 885 pager_unlock(exceptions); 886 return ret; 887 } 888 KEEP_PAGER(tee_pager_set_uta_area_attr); 889 #endif /*CFG_PAGED_USER_TA*/ 890 891 static bool tee_pager_unhide_page(vaddr_t page_va) 892 { 893 struct tee_pager_pmem *pmem; 894 895 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 896 paddr_t pa; 897 uint32_t attr; 898 899 if (pmem->pgidx == INVALID_PGIDX) 900 continue; 901 902 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 903 904 if (!(attr & 905 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 906 continue; 907 908 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) { 909 uint32_t a = get_area_mattr(pmem->area->flags); 910 911 /* page is hidden, show and move to back */ 912 if (pa != get_pmem_pa(pmem)) 913 panic("unexpected pa"); 914 915 /* 916 * If it's not a dirty block, then it should be 917 * read only. 918 */ 919 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 920 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 921 else 922 FMSG("Unhide %#" PRIxVA, page_va); 923 924 if (page_va == 0x8000a000) 925 FMSG("unhide %#" PRIxVA " a %#" PRIX32, 926 page_va, a); 927 area_set_entry(pmem->area, pmem->pgidx, pa, a); 928 929 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 930 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 931 932 /* TODO only invalidate entry touched above */ 933 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 934 935 incr_hidden_hits(); 936 return true; 937 } 938 } 939 940 return false; 941 } 942 943 static void tee_pager_hide_pages(void) 944 { 945 struct tee_pager_pmem *pmem; 946 size_t n = 0; 947 948 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 949 paddr_t pa; 950 uint32_t attr; 951 uint32_t a; 952 953 if (n >= TEE_PAGER_NHIDE) 954 break; 955 n++; 956 957 /* we cannot hide pages when pmem->area is not defined. */ 958 if (!pmem->area) 959 continue; 960 961 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 962 if (!(attr & TEE_MATTR_VALID_BLOCK)) 963 continue; 964 965 assert(pa == get_pmem_pa(pmem)); 966 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 967 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 968 FMSG("Hide %#" PRIxVA, 969 area_idx2va(pmem->area, pmem->pgidx)); 970 } else 971 a = TEE_MATTR_HIDDEN_BLOCK; 972 area_set_entry(pmem->area, pmem->pgidx, pa, a); 973 } 974 975 /* TODO only invalidate entries touched above */ 976 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 977 } 978 979 /* 980 * Find mapped pmem, hide and move to pageble pmem. 981 * Return false if page was not mapped, and true if page was mapped. 982 */ 983 static bool tee_pager_release_one_phys(struct tee_pager_area *area, 984 vaddr_t page_va) 985 { 986 struct tee_pager_pmem *pmem; 987 unsigned pgidx; 988 paddr_t pa; 989 uint32_t attr; 990 991 pgidx = area_va2idx(area, page_va); 992 area_get_entry(area, pgidx, &pa, &attr); 993 994 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 995 996 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 997 if (pmem->area != area || pmem->pgidx != pgidx) 998 continue; 999 1000 assert(pa == get_pmem_pa(pmem)); 1001 area_set_entry(area, pgidx, 0, 0); 1002 pgt_dec_used_entries(area->pgt); 1003 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1004 pmem->area = NULL; 1005 pmem->pgidx = INVALID_PGIDX; 1006 tee_pager_npages++; 1007 set_npages(); 1008 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1009 incr_zi_released(); 1010 return true; 1011 } 1012 1013 return false; 1014 } 1015 1016 /* Finds the oldest page and unmats it from its old virtual address */ 1017 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area) 1018 { 1019 struct tee_pager_pmem *pmem; 1020 1021 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1022 if (!pmem) { 1023 EMSG("No pmem entries"); 1024 return NULL; 1025 } 1026 if (pmem->pgidx != INVALID_PGIDX) { 1027 uint32_t a; 1028 1029 assert(pmem->area && pmem->area->pgt); 1030 area_get_entry(pmem->area, pmem->pgidx, NULL, &a); 1031 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1032 pgt_dec_used_entries(pmem->area->pgt); 1033 /* TODO only invalidate entries touched above */ 1034 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1035 tee_pager_save_page(pmem, a); 1036 } 1037 1038 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1039 pmem->pgidx = INVALID_PGIDX; 1040 pmem->area = NULL; 1041 if (area->type == AREA_TYPE_LOCK) { 1042 /* Move page to lock list */ 1043 if (tee_pager_npages <= 0) 1044 panic("running out of page"); 1045 tee_pager_npages--; 1046 set_npages(); 1047 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1048 } else { 1049 /* move page to back */ 1050 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1051 } 1052 1053 return pmem; 1054 } 1055 1056 static bool pager_update_permissions(struct tee_pager_area *area, 1057 struct abort_info *ai, bool *handled) 1058 { 1059 unsigned int pgidx = area_va2idx(area, ai->va); 1060 uint32_t attr; 1061 paddr_t pa; 1062 1063 *handled = false; 1064 1065 area_get_entry(area, pgidx, &pa, &attr); 1066 1067 /* Not mapped */ 1068 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1069 return false; 1070 1071 /* Not readable, should not happen */ 1072 if (abort_is_user_exception(ai)) { 1073 if (!(attr & TEE_MATTR_UR)) 1074 return true; 1075 } else { 1076 if (!(attr & TEE_MATTR_PR)) { 1077 abort_print_error(ai); 1078 panic(); 1079 } 1080 } 1081 1082 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1083 case CORE_MMU_FAULT_TRANSLATION: 1084 case CORE_MMU_FAULT_READ_PERMISSION: 1085 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1086 /* Check attempting to execute from an NOX page */ 1087 if (abort_is_user_exception(ai)) { 1088 if (!(attr & TEE_MATTR_UX)) 1089 return true; 1090 } else { 1091 if (!(attr & TEE_MATTR_PX)) { 1092 abort_print_error(ai); 1093 panic(); 1094 } 1095 } 1096 } 1097 /* Since the page is mapped now it's OK */ 1098 break; 1099 case CORE_MMU_FAULT_WRITE_PERMISSION: 1100 /* Check attempting to write to an RO page */ 1101 if (abort_is_user_exception(ai)) { 1102 if (!(area->flags & TEE_MATTR_UW)) 1103 return true; 1104 if (!(attr & TEE_MATTR_UW)) { 1105 FMSG("Dirty %p", 1106 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1107 area_set_entry(area, pgidx, pa, 1108 get_area_mattr(area->flags)); 1109 /* TODO only invalidate entry above */ 1110 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1111 } 1112 1113 } else { 1114 if (!(area->flags & TEE_MATTR_PW)) { 1115 abort_print_error(ai); 1116 panic(); 1117 } 1118 if (!(attr & TEE_MATTR_PW)) { 1119 FMSG("Dirty %p", 1120 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1121 area_set_entry(area, pgidx, pa, 1122 get_area_mattr(area->flags)); 1123 /* TODO only invalidate entry above */ 1124 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1125 } 1126 } 1127 /* Since permissions has been updated now it's OK */ 1128 break; 1129 default: 1130 /* Some fault we can't deal with */ 1131 if (abort_is_user_exception(ai)) 1132 return true; 1133 abort_print_error(ai); 1134 panic(); 1135 } 1136 *handled = true; 1137 return true; 1138 } 1139 1140 #ifdef CFG_TEE_CORE_DEBUG 1141 static void stat_handle_fault(void) 1142 { 1143 static size_t num_faults; 1144 static size_t min_npages = SIZE_MAX; 1145 static size_t total_min_npages = SIZE_MAX; 1146 1147 num_faults++; 1148 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1149 DMSG("nfaults %zu npages %zu (min %zu)", 1150 num_faults, tee_pager_npages, min_npages); 1151 min_npages = tee_pager_npages; /* reset */ 1152 } 1153 if (tee_pager_npages < min_npages) 1154 min_npages = tee_pager_npages; 1155 if (tee_pager_npages < total_min_npages) 1156 total_min_npages = tee_pager_npages; 1157 } 1158 #else 1159 static void stat_handle_fault(void) 1160 { 1161 } 1162 #endif 1163 1164 bool tee_pager_handle_fault(struct abort_info *ai) 1165 { 1166 struct tee_pager_area *area; 1167 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1168 uint32_t exceptions; 1169 bool ret; 1170 1171 #ifdef TEE_PAGER_DEBUG_PRINT 1172 abort_print(ai); 1173 #endif 1174 1175 /* 1176 * We're updating pages that can affect several active CPUs at a 1177 * time below. We end up here because a thread tries to access some 1178 * memory that isn't available. We have to be careful when making 1179 * that memory available as other threads may succeed in accessing 1180 * that address the moment after we've made it available. 1181 * 1182 * That means that we can't just map the memory and populate the 1183 * page, instead we use the aliased mapping to populate the page 1184 * and once everything is ready we map it. 1185 */ 1186 exceptions = pager_lock(); 1187 1188 stat_handle_fault(); 1189 1190 /* check if the access is valid */ 1191 if (abort_is_user_exception(ai)) { 1192 area = find_uta_area(ai->va); 1193 1194 } else { 1195 area = find_area(&tee_pager_area_head, ai->va); 1196 if (!area) 1197 area = find_uta_area(ai->va); 1198 } 1199 if (!area || !area->pgt) { 1200 ret = false; 1201 goto out; 1202 } 1203 1204 if (!tee_pager_unhide_page(page_va)) { 1205 struct tee_pager_pmem *pmem = NULL; 1206 uint32_t attr; 1207 1208 /* 1209 * The page wasn't hidden, but some other core may have 1210 * updated the table entry before we got here or we need 1211 * to make a read-only page read-write (dirty). 1212 */ 1213 if (pager_update_permissions(area, ai, &ret)) { 1214 /* 1215 * Nothing more to do with the abort. The problem 1216 * could already have been dealt with from another 1217 * core or if ret is false the TA will be paniced. 1218 */ 1219 goto out; 1220 } 1221 1222 pmem = tee_pager_get_page(area); 1223 if (!pmem) { 1224 abort_print(ai); 1225 panic(); 1226 } 1227 1228 /* load page code & data */ 1229 tee_pager_load_page(area, page_va, pmem->va_alias); 1230 1231 /* 1232 * We've updated the page using the aliased mapping and 1233 * some cache maintenence is now needed if it's an 1234 * executable page. 1235 * 1236 * Since the d-cache is a Physically-indexed, 1237 * physically-tagged (PIPT) cache we can clean the aliased 1238 * address instead of the real virtual address. 1239 * 1240 * The i-cache can also be PIPT, but may be something else 1241 * to, to keep it simple we invalidate the entire i-cache. 1242 * As a future optimization we may invalidate only the 1243 * aliased area if it a PIPT cache else the entire cache. 1244 */ 1245 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1246 /* 1247 * Doing these operations to LoUIS (Level of 1248 * unification, Inner Shareable) would be enough 1249 */ 1250 cache_maintenance_l1(DCACHE_AREA_CLEAN, 1251 pmem->va_alias, SMALL_PAGE_SIZE); 1252 1253 cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0); 1254 } 1255 1256 pmem->area = area; 1257 pmem->pgidx = area_va2idx(area, ai->va); 1258 attr = get_area_mattr(area->flags) & 1259 ~(TEE_MATTR_PW | TEE_MATTR_UW); 1260 area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr); 1261 pgt_inc_used_entries(area->pgt); 1262 1263 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, 1264 area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem)); 1265 1266 } 1267 1268 tee_pager_hide_pages(); 1269 ret = true; 1270 out: 1271 pager_unlock(exceptions); 1272 return ret; 1273 } 1274 1275 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1276 { 1277 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 1278 size_t n; 1279 1280 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1281 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1282 1283 /* setup memory */ 1284 for (n = 0; n < npages; n++) { 1285 struct tee_pager_pmem *pmem; 1286 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1287 unsigned pgidx = core_mmu_va2idx(ti, va); 1288 paddr_t pa; 1289 uint32_t attr; 1290 1291 /* 1292 * Note that we can only support adding pages in the 1293 * valid range of this table info, currently not a problem. 1294 */ 1295 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1296 1297 /* Ignore unmapped pages/blocks */ 1298 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1299 continue; 1300 1301 pmem = malloc(sizeof(struct tee_pager_pmem)); 1302 if (!pmem) 1303 panic("out of mem"); 1304 1305 pmem->va_alias = pager_add_alias_page(pa); 1306 1307 if (unmap) { 1308 pmem->area = NULL; 1309 pmem->pgidx = INVALID_PGIDX; 1310 core_mmu_set_entry(ti, pgidx, 0, 0); 1311 pgt_dec_used_entries(&pager_core_pgt); 1312 } else { 1313 /* 1314 * The page is still mapped, let's assign the area 1315 * and update the protection bits accordingly. 1316 */ 1317 pmem->area = find_area(&tee_pager_area_head, va); 1318 assert(pmem->area->pgt == &pager_core_pgt); 1319 pmem->pgidx = pgidx; 1320 assert(pa == get_pmem_pa(pmem)); 1321 area_set_entry(pmem->area, pgidx, pa, 1322 get_area_mattr(pmem->area->flags)); 1323 } 1324 1325 tee_pager_npages++; 1326 incr_npages_all(); 1327 set_npages(); 1328 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1329 } 1330 1331 /* Invalidate secure TLB */ 1332 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1333 } 1334 1335 #ifdef CFG_PAGED_USER_TA 1336 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1337 { 1338 struct pgt *p = pgt; 1339 1340 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1341 p = SLIST_NEXT(p, link); 1342 return p; 1343 } 1344 1345 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc) 1346 { 1347 struct tee_pager_area *area; 1348 struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1349 1350 TAILQ_FOREACH(area, utc->areas, link) { 1351 if (!area->pgt) 1352 area->pgt = find_pgt(pgt, area->base); 1353 else 1354 assert(area->pgt == find_pgt(pgt, area->base)); 1355 if (!area->pgt) 1356 panic(); 1357 } 1358 } 1359 1360 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem) 1361 { 1362 uint32_t attr; 1363 1364 assert(pmem->area && pmem->area->pgt); 1365 1366 area_get_entry(pmem->area, pmem->pgidx, NULL, &attr); 1367 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1368 tee_pager_save_page(pmem, attr); 1369 assert(pmem->area->pgt->num_used_entries); 1370 pmem->area->pgt->num_used_entries--; 1371 pmem->pgidx = INVALID_PGIDX; 1372 pmem->area = NULL; 1373 } 1374 1375 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1376 { 1377 struct tee_pager_pmem *pmem; 1378 struct tee_pager_area *area; 1379 uint32_t exceptions = pager_lock(); 1380 1381 if (!pgt->num_used_entries) 1382 goto out; 1383 1384 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1385 if (!pmem->area || pmem->pgidx == INVALID_PGIDX) 1386 continue; 1387 if (pmem->area->pgt == pgt) 1388 pager_save_and_release_entry(pmem); 1389 } 1390 assert(!pgt->num_used_entries); 1391 1392 out: 1393 if (is_user_ta_ctx(pgt->ctx)) { 1394 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) { 1395 if (area->pgt == pgt) 1396 area->pgt = NULL; 1397 } 1398 } 1399 1400 pager_unlock(exceptions); 1401 } 1402 KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1403 #endif /*CFG_PAGED_USER_TA*/ 1404 1405 void tee_pager_release_phys(void *addr, size_t size) 1406 { 1407 bool unmaped = false; 1408 vaddr_t va = (vaddr_t)addr; 1409 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1410 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1411 struct tee_pager_area *area; 1412 uint32_t exceptions; 1413 1414 if (!size) 1415 return; 1416 1417 area = find_area(&tee_pager_area_head, begin); 1418 if (!area || 1419 area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE)) 1420 panic(); 1421 1422 exceptions = pager_lock(); 1423 1424 for (va = begin; va < end; va += SMALL_PAGE_SIZE) 1425 unmaped |= tee_pager_release_one_phys(area, va); 1426 1427 /* Invalidate secure TLB */ 1428 if (unmaped) 1429 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1430 1431 pager_unlock(exceptions); 1432 } 1433 KEEP_PAGER(tee_pager_release_phys); 1434 1435 void *tee_pager_alloc(size_t size, uint32_t flags) 1436 { 1437 tee_mm_entry_t *mm; 1438 uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED); 1439 1440 if (!size) 1441 return NULL; 1442 1443 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 1444 if (!mm) 1445 return NULL; 1446 1447 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 1448 f, NULL, NULL); 1449 1450 return (void *)tee_mm_get_smem(mm); 1451 } 1452