1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <arm.h> 30 #include <assert.h> 31 #include <keep.h> 32 #include <sys/queue.h> 33 #include <kernel/abort.h> 34 #include <kernel/panic.h> 35 #include <kernel/spinlock.h> 36 #include <kernel/tee_misc.h> 37 #include <kernel/tee_ta_manager.h> 38 #include <kernel/thread.h> 39 #include <mm/core_memprot.h> 40 #include <mm/tee_mm.h> 41 #include <mm/tee_pager.h> 42 #include <types_ext.h> 43 #include <stdlib.h> 44 #include <tee_api_defines.h> 45 #include <tee/tee_cryp_provider.h> 46 #include <trace.h> 47 #include <utee_defines.h> 48 #include <util.h> 49 50 #include "pager_private.h" 51 52 #define PAGER_AE_KEY_BITS 256 53 54 struct pager_rw_pstate { 55 uint64_t iv; 56 uint8_t tag[PAGER_AES_GCM_TAG_LEN]; 57 }; 58 59 enum area_type { 60 AREA_TYPE_RO, 61 AREA_TYPE_RW, 62 AREA_TYPE_LOCK, 63 }; 64 65 struct tee_pager_area { 66 union { 67 const uint8_t *hashes; 68 struct pager_rw_pstate *rwp; 69 } u; 70 uint8_t *store; 71 enum area_type type; 72 uint32_t flags; 73 vaddr_t base; 74 size_t size; 75 struct pgt *pgt; 76 TAILQ_ENTRY(tee_pager_area) link; 77 }; 78 79 TAILQ_HEAD(tee_pager_area_head, tee_pager_area); 80 81 static struct tee_pager_area_head tee_pager_area_head = 82 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 83 84 #define INVALID_PGIDX UINT_MAX 85 86 /* 87 * struct tee_pager_pmem - Represents a physical page used for paging. 88 * 89 * @pgidx an index of the entry in area->ti. 90 * @va_alias Virtual address where the physical page always is aliased. 91 * Used during remapping of the page when the content need to 92 * be updated before it's available at the new location. 93 * @area a pointer to the pager area 94 */ 95 struct tee_pager_pmem { 96 unsigned pgidx; 97 void *va_alias; 98 struct tee_pager_area *area; 99 TAILQ_ENTRY(tee_pager_pmem) link; 100 }; 101 102 /* The list of physical pages. The first page in the list is the oldest */ 103 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 104 105 static struct tee_pager_pmem_head tee_pager_pmem_head = 106 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 107 108 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 109 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 110 111 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; 112 113 /* number of pages hidden */ 114 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 115 116 /* Number of registered physical pages, used hiding pages. */ 117 static size_t tee_pager_npages; 118 119 #ifdef CFG_WITH_STATS 120 static struct tee_pager_stats pager_stats; 121 122 static inline void incr_ro_hits(void) 123 { 124 pager_stats.ro_hits++; 125 } 126 127 static inline void incr_rw_hits(void) 128 { 129 pager_stats.rw_hits++; 130 } 131 132 static inline void incr_hidden_hits(void) 133 { 134 pager_stats.hidden_hits++; 135 } 136 137 static inline void incr_zi_released(void) 138 { 139 pager_stats.zi_released++; 140 } 141 142 static inline void incr_npages_all(void) 143 { 144 pager_stats.npages_all++; 145 } 146 147 static inline void set_npages(void) 148 { 149 pager_stats.npages = tee_pager_npages; 150 } 151 152 void tee_pager_get_stats(struct tee_pager_stats *stats) 153 { 154 *stats = pager_stats; 155 156 pager_stats.hidden_hits = 0; 157 pager_stats.ro_hits = 0; 158 pager_stats.rw_hits = 0; 159 pager_stats.zi_released = 0; 160 } 161 162 #else /* CFG_WITH_STATS */ 163 static inline void incr_ro_hits(void) { } 164 static inline void incr_rw_hits(void) { } 165 static inline void incr_hidden_hits(void) { } 166 static inline void incr_zi_released(void) { } 167 static inline void incr_npages_all(void) { } 168 static inline void set_npages(void) { } 169 170 void tee_pager_get_stats(struct tee_pager_stats *stats) 171 { 172 memset(stats, 0, sizeof(struct tee_pager_stats)); 173 } 174 #endif /* CFG_WITH_STATS */ 175 176 static struct pgt pager_core_pgt; 177 struct core_mmu_table_info tee_pager_tbl_info; 178 static struct core_mmu_table_info pager_alias_tbl_info; 179 180 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 181 182 /* Defines the range of the alias area */ 183 static tee_mm_entry_t *pager_alias_area; 184 /* 185 * Physical pages are added in a stack like fashion to the alias area, 186 * @pager_alias_next_free gives the address of next free entry if 187 * @pager_alias_next_free is != 0 188 */ 189 static uintptr_t pager_alias_next_free; 190 191 static uint32_t pager_lock(void) 192 { 193 return cpu_spin_lock_xsave(&pager_spinlock); 194 } 195 196 static void pager_unlock(uint32_t exceptions) 197 { 198 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 199 } 200 201 static void set_alias_area(tee_mm_entry_t *mm) 202 { 203 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 204 size_t tbl_va_size; 205 unsigned idx; 206 unsigned last_idx; 207 vaddr_t smem = tee_mm_get_smem(mm); 208 size_t nbytes = tee_mm_get_bytes(mm); 209 210 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 211 212 if (pager_alias_area) 213 panic("null pager_alias_area"); 214 215 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) 216 panic("Can't find translation table"); 217 218 if ((1 << ti->shift) != SMALL_PAGE_SIZE) 219 panic("Unsupported page size in translation table"); 220 221 tbl_va_size = (1 << ti->shift) * ti->num_entries; 222 if (!core_is_buffer_inside(smem, nbytes, 223 ti->va_base, tbl_va_size)) { 224 EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 225 smem, nbytes, ti->va_base, tbl_va_size); 226 panic(); 227 } 228 229 if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK) 230 panic("invalid area alignment"); 231 232 pager_alias_area = mm; 233 pager_alias_next_free = smem; 234 235 /* Clear all mapping in the alias area */ 236 idx = core_mmu_va2idx(ti, smem); 237 last_idx = core_mmu_va2idx(ti, smem + nbytes); 238 for (; idx < last_idx; idx++) 239 core_mmu_set_entry(ti, idx, 0, 0); 240 241 /* TODO only invalidate entries touched above */ 242 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 243 } 244 245 static void generate_ae_key(void) 246 { 247 if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS) 248 panic("failed to generate random"); 249 } 250 251 void tee_pager_init(tee_mm_entry_t *mm_alias) 252 { 253 set_alias_area(mm_alias); 254 generate_ae_key(); 255 } 256 257 static void *pager_add_alias_page(paddr_t pa) 258 { 259 unsigned idx; 260 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 261 /* Alias pages mapped without write permission: runtime will care */ 262 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 263 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 264 TEE_MATTR_SECURE | TEE_MATTR_PR; 265 266 DMSG("0x%" PRIxPA, pa); 267 268 if (!pager_alias_next_free || !ti->num_entries) 269 panic("invalid alias entry"); 270 271 idx = core_mmu_va2idx(ti, pager_alias_next_free); 272 core_mmu_set_entry(ti, idx, pa, attr); 273 pgt_inc_used_entries(&pager_core_pgt); 274 pager_alias_next_free += SMALL_PAGE_SIZE; 275 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 276 tee_mm_get_bytes(pager_alias_area))) 277 pager_alias_next_free = 0; 278 return (void *)core_mmu_idx2va(ti, idx); 279 } 280 281 static struct tee_pager_area *alloc_area(struct pgt *pgt, 282 vaddr_t base, size_t size, 283 uint32_t flags, const void *store, 284 const void *hashes) 285 { 286 struct tee_pager_area *area = calloc(1, sizeof(*area)); 287 enum area_type at; 288 tee_mm_entry_t *mm_store = NULL; 289 290 if (!area) 291 return NULL; 292 293 if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) { 294 if (flags & TEE_MATTR_LOCKED) { 295 at = AREA_TYPE_LOCK; 296 goto out; 297 } 298 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size); 299 if (!mm_store) 300 goto bad; 301 area->store = phys_to_virt(tee_mm_get_smem(mm_store), 302 MEM_AREA_TA_RAM); 303 if (!area->store) 304 goto bad; 305 area->u.rwp = calloc(size / SMALL_PAGE_SIZE, 306 sizeof(struct pager_rw_pstate)); 307 if (!area->u.rwp) 308 goto bad; 309 at = AREA_TYPE_RW; 310 } else { 311 area->store = (void *)store; 312 area->u.hashes = hashes; 313 at = AREA_TYPE_RO; 314 } 315 out: 316 area->pgt = pgt; 317 area->base = base; 318 area->size = size; 319 area->flags = flags; 320 area->type = at; 321 return area; 322 bad: 323 tee_mm_free(mm_store); 324 free(area->u.rwp); 325 free(area); 326 return NULL; 327 } 328 329 static void area_insert_tail(struct tee_pager_area *area) 330 { 331 uint32_t exceptions = pager_lock(); 332 333 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 334 335 pager_unlock(exceptions); 336 } 337 KEEP_PAGER(area_insert_tail); 338 339 static size_t tbl_usage_count(struct pgt *pgt) 340 { 341 size_t n; 342 paddr_t pa; 343 size_t usage = 0; 344 345 for (n = 0; n < tee_pager_tbl_info.num_entries; n++) { 346 core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level, 347 n, &pa, NULL); 348 if (pa) 349 usage++; 350 } 351 return usage; 352 } 353 354 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags, 355 const void *store, const void *hashes) 356 { 357 struct tee_pager_area *area; 358 size_t tbl_va_size; 359 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 360 361 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 362 base, base + size, flags, store, hashes); 363 364 if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) { 365 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size); 366 panic(); 367 } 368 369 if (!(flags & TEE_MATTR_PW) && (!store || !hashes)) 370 panic("write pages cannot provide store or hashes"); 371 372 if ((flags & TEE_MATTR_PW) && (store || hashes)) 373 panic("non-write pages must provide store and hashes"); 374 375 if (!pager_core_pgt.tbl) { 376 pager_core_pgt.tbl = ti->table; 377 pgt_set_used_entries(&pager_core_pgt, 378 tbl_usage_count(&pager_core_pgt)); 379 } 380 381 tbl_va_size = (1 << ti->shift) * ti->num_entries; 382 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) { 383 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 384 base, size, ti->va_base, tbl_va_size); 385 return false; 386 } 387 388 area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes); 389 if (!area) 390 return false; 391 392 area_insert_tail(area); 393 return true; 394 } 395 396 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas, 397 vaddr_t va) 398 { 399 struct tee_pager_area *area; 400 401 if (!areas) 402 return NULL; 403 404 TAILQ_FOREACH(area, areas, link) { 405 if (core_is_buffer_inside(va, 1, area->base, area->size)) 406 return area; 407 } 408 return NULL; 409 } 410 411 #ifdef CFG_PAGED_USER_TA 412 static struct tee_pager_area *find_uta_area(vaddr_t va) 413 { 414 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx; 415 416 if (!ctx || !is_user_ta_ctx(ctx)) 417 return NULL; 418 return find_area(to_user_ta_ctx(ctx)->areas, va); 419 } 420 #else 421 static struct tee_pager_area *find_uta_area(vaddr_t va __unused) 422 { 423 return NULL; 424 } 425 #endif /*CFG_PAGED_USER_TA*/ 426 427 428 static uint32_t get_area_mattr(uint32_t area_flags) 429 { 430 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 431 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 432 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 433 434 if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW))) 435 attr |= TEE_MATTR_GLOBAL; 436 437 return attr; 438 } 439 440 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 441 { 442 paddr_t pa; 443 unsigned idx; 444 445 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias); 446 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL); 447 return pa; 448 } 449 450 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, 451 void *dst) 452 { 453 struct pager_aes_gcm_iv iv = { 454 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } 455 }; 456 457 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), 458 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); 459 } 460 461 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) 462 { 463 struct pager_aes_gcm_iv iv; 464 465 assert((rwp->iv + 1) > rwp->iv); 466 rwp->iv++; 467 /* 468 * IV is constructed as recommended in section "8.2.1 Deterministic 469 * Construction" of "Recommendation for Block Cipher Modes of 470 * Operation: Galois/Counter Mode (GCM) and GMAC", 471 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 472 */ 473 iv.iv[0] = (vaddr_t)rwp; 474 iv.iv[1] = rwp->iv >> 32; 475 iv.iv[2] = rwp->iv; 476 477 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), 478 &iv, rwp->tag, 479 src, dst, SMALL_PAGE_SIZE)) 480 panic("gcm failed"); 481 } 482 483 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 484 void *va_alias) 485 { 486 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT; 487 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE; 488 struct core_mmu_table_info *ti; 489 uint32_t attr_alias; 490 paddr_t pa_alias; 491 unsigned int idx_alias; 492 493 /* Insure we are allowed to write to aliased virtual page */ 494 ti = &pager_alias_tbl_info; 495 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 496 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 497 if (!(attr_alias & TEE_MATTR_PW)) { 498 attr_alias |= TEE_MATTR_PW; 499 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 500 /* TODO: flush TLB for target page only */ 501 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 502 } 503 504 switch (area->type) { 505 case AREA_TYPE_RO: 506 { 507 const void *hash = area->u.hashes + 508 idx * TEE_SHA256_HASH_SIZE; 509 510 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE); 511 incr_ro_hits(); 512 513 if (hash_sha256_check(hash, va_alias, 514 SMALL_PAGE_SIZE) != TEE_SUCCESS) { 515 EMSG("PH 0x%" PRIxVA " failed", page_va); 516 panic(); 517 } 518 } 519 /* Forbid write to aliases for read-only (maybe exec) pages */ 520 attr_alias &= ~TEE_MATTR_PW; 521 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 522 /* TODO: flush TLB for target page only */ 523 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 524 break; 525 case AREA_TYPE_RW: 526 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64, 527 va_alias, page_va, area->u.rwp[idx].iv); 528 if (!area->u.rwp[idx].iv) 529 memset(va_alias, 0, SMALL_PAGE_SIZE); 530 else if (!decrypt_page(&area->u.rwp[idx], stored_page, 531 va_alias)) { 532 EMSG("PH 0x%" PRIxVA " failed", page_va); 533 panic(); 534 } 535 incr_rw_hits(); 536 break; 537 case AREA_TYPE_LOCK: 538 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va); 539 memset(va_alias, 0, SMALL_PAGE_SIZE); 540 break; 541 default: 542 panic(); 543 } 544 } 545 546 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 547 { 548 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 549 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 550 551 if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) { 552 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK; 553 size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT); 554 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE; 555 556 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW)); 557 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias, 558 stored_page); 559 FMSG("Saved %#" PRIxVA " iv %#" PRIx64, 560 pmem->area->base + idx * SMALL_PAGE_SIZE, 561 pmem->area->u.rwp[idx].iv); 562 } 563 } 564 565 static void area_get_entry(struct tee_pager_area *area, size_t idx, 566 paddr_t *pa, uint32_t *attr) 567 { 568 assert(area->pgt); 569 assert(idx < tee_pager_tbl_info.num_entries); 570 core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 571 idx, pa, attr); 572 } 573 574 static void area_set_entry(struct tee_pager_area *area, size_t idx, 575 paddr_t pa, uint32_t attr) 576 { 577 assert(area->pgt); 578 assert(idx < tee_pager_tbl_info.num_entries); 579 core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 580 idx, pa, attr); 581 } 582 583 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va) 584 { 585 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT; 586 } 587 588 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area, 589 size_t idx) 590 { 591 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK); 592 } 593 594 #ifdef CFG_PAGED_USER_TA 595 static void free_area(struct tee_pager_area *area) 596 { 597 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, 598 virt_to_phys(area->store))); 599 if (area->type == AREA_TYPE_RW) 600 free(area->u.rwp); 601 free(area); 602 } 603 604 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, 605 size_t size) 606 { 607 struct tee_pager_area *area; 608 uint32_t flags; 609 vaddr_t b = base; 610 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 611 612 if (!utc->areas) { 613 utc->areas = malloc(sizeof(*utc->areas)); 614 if (!utc->areas) 615 return false; 616 TAILQ_INIT(utc->areas); 617 } 618 619 flags = TEE_MATTR_PRW | TEE_MATTR_URWX; 620 621 while (s) { 622 size_t s2; 623 624 if (find_area(utc->areas, b)) 625 return false; 626 627 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 628 629 /* Table info will be set when the context is activated. */ 630 area = alloc_area(NULL, b, s2, flags, NULL, NULL); 631 if (!area) 632 return false; 633 TAILQ_INSERT_TAIL(utc->areas, area, link); 634 b += s2; 635 s -= s2; 636 } 637 638 return true; 639 } 640 641 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size) 642 { 643 struct thread_specific_data *tsd = thread_get_tsd(); 644 struct tee_pager_area *area; 645 struct core_mmu_table_info dir_info = { NULL }; 646 647 if (&utc->ctx != tsd->ctx) { 648 /* 649 * Changes are to an utc that isn't active. Just add the 650 * areas page tables will be dealt with later. 651 */ 652 return pager_add_uta_area(utc, base, size); 653 } 654 655 /* 656 * Assign page tables before adding areas to be able to tell which 657 * are newly added and should be removed in case of failure. 658 */ 659 tee_pager_assign_uta_tables(utc); 660 if (!pager_add_uta_area(utc, base, size)) { 661 struct tee_pager_area *next_a; 662 663 /* Remove all added areas */ 664 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 665 if (!area->pgt) { 666 TAILQ_REMOVE(utc->areas, area, link); 667 free_area(area); 668 } 669 } 670 return false; 671 } 672 673 /* 674 * Assign page tables to the new areas and make sure that the page 675 * tables are registered in the upper table. 676 */ 677 tee_pager_assign_uta_tables(utc); 678 core_mmu_get_user_pgdir(&dir_info); 679 TAILQ_FOREACH(area, utc->areas, link) { 680 paddr_t pa; 681 size_t idx; 682 uint32_t attr; 683 684 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase); 685 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 686 687 /* 688 * Check if the page table already is used, if it is, it's 689 * already registered. 690 */ 691 if (area->pgt->num_used_entries) { 692 assert(attr & TEE_MATTR_TABLE); 693 assert(pa == virt_to_phys(area->pgt->tbl)); 694 continue; 695 } 696 697 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 698 pa = virt_to_phys(area->pgt->tbl); 699 assert(pa); 700 /* 701 * Note that the update of the table entry is guaranteed to 702 * be atomic. 703 */ 704 core_mmu_set_entry(&dir_info, idx, pa, attr); 705 } 706 707 return true; 708 } 709 710 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti, 711 struct pgt *pgt) 712 { 713 assert(pgt); 714 ti->table = pgt->tbl; 715 ti->va_base = pgt->vabase; 716 ti->level = tee_pager_tbl_info.level; 717 ti->shift = tee_pager_tbl_info.shift; 718 ti->num_entries = tee_pager_tbl_info.num_entries; 719 } 720 721 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt, 722 vaddr_t new_base) 723 { 724 uint32_t exceptions = pager_lock(); 725 726 /* 727 * If there's no pgt assigned to the old area there's no pages to 728 * deal with either, just update with a new pgt and base. 729 */ 730 if (area->pgt) { 731 struct core_mmu_table_info old_ti; 732 struct core_mmu_table_info new_ti; 733 struct tee_pager_pmem *pmem; 734 735 init_tbl_info_from_pgt(&old_ti, area->pgt); 736 init_tbl_info_from_pgt(&new_ti, new_pgt); 737 738 739 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 740 vaddr_t va; 741 paddr_t pa; 742 uint32_t attr; 743 744 if (pmem->area != area) 745 continue; 746 core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr); 747 core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0); 748 749 assert(pa == get_pmem_pa(pmem)); 750 assert(attr); 751 assert(area->pgt->num_used_entries); 752 area->pgt->num_used_entries--; 753 754 va = core_mmu_idx2va(&old_ti, pmem->pgidx); 755 va = va - area->base + new_base; 756 pmem->pgidx = core_mmu_va2idx(&new_ti, va); 757 core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr); 758 new_pgt->num_used_entries++; 759 } 760 } 761 762 area->pgt = new_pgt; 763 area->base = new_base; 764 pager_unlock(exceptions); 765 } 766 KEEP_PAGER(transpose_area); 767 768 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc, 769 vaddr_t src_base, 770 struct user_ta_ctx *dst_utc, 771 vaddr_t dst_base, struct pgt **dst_pgt, 772 size_t size) 773 { 774 struct tee_pager_area *area; 775 struct tee_pager_area *next_a; 776 777 TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) { 778 vaddr_t new_area_base; 779 size_t new_idx; 780 781 if (!core_is_buffer_inside(area->base, area->size, 782 src_base, size)) 783 continue; 784 785 TAILQ_REMOVE(src_utc->areas, area, link); 786 787 new_area_base = dst_base + (src_base - area->base); 788 new_idx = (new_area_base - dst_pgt[0]->vabase) / 789 CORE_MMU_PGDIR_SIZE; 790 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) == 791 dst_pgt[new_idx]->vabase); 792 transpose_area(area, dst_pgt[new_idx], new_area_base); 793 794 /* 795 * Assert that this will not cause any conflicts in the new 796 * utc. This should already be guaranteed, but a bug here 797 * could be tricky to find. 798 */ 799 assert(!find_area(dst_utc->areas, area->base)); 800 TAILQ_INSERT_TAIL(dst_utc->areas, area, link); 801 } 802 } 803 804 static void rem_area(struct tee_pager_area_head *area_head, 805 struct tee_pager_area *area) 806 { 807 struct tee_pager_pmem *pmem; 808 uint32_t exceptions; 809 810 exceptions = pager_lock(); 811 812 TAILQ_REMOVE(area_head, area, link); 813 814 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 815 if (pmem->area == area) { 816 area_set_entry(area, pmem->pgidx, 0, 0); 817 pgt_dec_used_entries(area->pgt); 818 pmem->area = NULL; 819 pmem->pgidx = INVALID_PGIDX; 820 } 821 } 822 823 pager_unlock(exceptions); 824 free_area(area); 825 } 826 KEEP_PAGER(rem_area); 827 828 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base, 829 size_t size) 830 { 831 struct tee_pager_area *area; 832 struct tee_pager_area *next_a; 833 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 834 835 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 836 if (core_is_buffer_inside(area->base, area->size, base, s)) 837 rem_area(utc->areas, area); 838 } 839 } 840 841 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc) 842 { 843 struct tee_pager_area *area; 844 845 if (!utc->areas) 846 return; 847 848 while (true) { 849 area = TAILQ_FIRST(utc->areas); 850 if (!area) 851 break; 852 TAILQ_REMOVE(utc->areas, area, link); 853 free_area(area); 854 } 855 856 free(utc->areas); 857 } 858 859 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base, 860 size_t size, uint32_t flags) 861 { 862 bool ret; 863 vaddr_t b = base; 864 size_t s = size; 865 size_t s2; 866 struct tee_pager_area *area = find_area(utc->areas, b); 867 uint32_t exceptions; 868 struct tee_pager_pmem *pmem; 869 paddr_t pa; 870 uint32_t a; 871 uint32_t f; 872 873 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 874 if (f & TEE_MATTR_UW) 875 f |= TEE_MATTR_PW; 876 f = get_area_mattr(f); 877 878 exceptions = pager_lock(); 879 880 while (s) { 881 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 882 if (!area || area->base != b || area->size != s2) { 883 ret = false; 884 goto out; 885 } 886 b += s2; 887 s -= s2; 888 889 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 890 if (pmem->area != area) 891 continue; 892 area_get_entry(pmem->area, pmem->pgidx, &pa, &a); 893 if (a & TEE_MATTR_VALID_BLOCK) 894 assert(pa == get_pmem_pa(pmem)); 895 else 896 pa = get_pmem_pa(pmem); 897 if (a == f) 898 continue; 899 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 900 /* TODO only invalidate entries touched above */ 901 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 902 if (!(flags & TEE_MATTR_UW)) 903 tee_pager_save_page(pmem, a); 904 905 area_set_entry(pmem->area, pmem->pgidx, pa, f); 906 907 if (flags & TEE_MATTR_UX) { 908 void *va = (void *)area_idx2va(pmem->area, 909 pmem->pgidx); 910 911 cache_op_inner(DCACHE_AREA_CLEAN, va, 912 SMALL_PAGE_SIZE); 913 cache_op_inner(ICACHE_AREA_INVALIDATE, va, 914 SMALL_PAGE_SIZE); 915 } 916 } 917 918 area->flags = f; 919 area = TAILQ_NEXT(area, link); 920 } 921 922 ret = true; 923 out: 924 pager_unlock(exceptions); 925 return ret; 926 } 927 KEEP_PAGER(tee_pager_set_uta_area_attr); 928 #endif /*CFG_PAGED_USER_TA*/ 929 930 static bool tee_pager_unhide_page(vaddr_t page_va) 931 { 932 struct tee_pager_pmem *pmem; 933 934 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 935 paddr_t pa; 936 uint32_t attr; 937 938 if (pmem->pgidx == INVALID_PGIDX) 939 continue; 940 941 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 942 943 if (!(attr & 944 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 945 continue; 946 947 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) { 948 uint32_t a = get_area_mattr(pmem->area->flags); 949 950 /* page is hidden, show and move to back */ 951 if (pa != get_pmem_pa(pmem)) 952 panic("unexpected pa"); 953 954 /* 955 * If it's not a dirty block, then it should be 956 * read only. 957 */ 958 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 959 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 960 else 961 FMSG("Unhide %#" PRIxVA, page_va); 962 963 if (page_va == 0x8000a000) 964 FMSG("unhide %#" PRIxVA " a %#" PRIX32, 965 page_va, a); 966 area_set_entry(pmem->area, pmem->pgidx, pa, a); 967 968 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 969 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 970 971 /* TODO only invalidate entry touched above */ 972 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 973 974 incr_hidden_hits(); 975 return true; 976 } 977 } 978 979 return false; 980 } 981 982 static void tee_pager_hide_pages(void) 983 { 984 struct tee_pager_pmem *pmem; 985 size_t n = 0; 986 987 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 988 paddr_t pa; 989 uint32_t attr; 990 uint32_t a; 991 992 if (n >= TEE_PAGER_NHIDE) 993 break; 994 n++; 995 996 /* we cannot hide pages when pmem->area is not defined. */ 997 if (!pmem->area) 998 continue; 999 1000 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 1001 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1002 continue; 1003 1004 assert(pa == get_pmem_pa(pmem)); 1005 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 1006 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 1007 FMSG("Hide %#" PRIxVA, 1008 area_idx2va(pmem->area, pmem->pgidx)); 1009 } else 1010 a = TEE_MATTR_HIDDEN_BLOCK; 1011 area_set_entry(pmem->area, pmem->pgidx, pa, a); 1012 } 1013 1014 /* TODO only invalidate entries touched above */ 1015 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1016 } 1017 1018 /* 1019 * Find mapped pmem, hide and move to pageble pmem. 1020 * Return false if page was not mapped, and true if page was mapped. 1021 */ 1022 static bool tee_pager_release_one_phys(struct tee_pager_area *area, 1023 vaddr_t page_va) 1024 { 1025 struct tee_pager_pmem *pmem; 1026 unsigned pgidx; 1027 paddr_t pa; 1028 uint32_t attr; 1029 1030 pgidx = area_va2idx(area, page_va); 1031 area_get_entry(area, pgidx, &pa, &attr); 1032 1033 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 1034 1035 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1036 if (pmem->area != area || pmem->pgidx != pgidx) 1037 continue; 1038 1039 assert(pa == get_pmem_pa(pmem)); 1040 area_set_entry(area, pgidx, 0, 0); 1041 pgt_dec_used_entries(area->pgt); 1042 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1043 pmem->area = NULL; 1044 pmem->pgidx = INVALID_PGIDX; 1045 tee_pager_npages++; 1046 set_npages(); 1047 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1048 incr_zi_released(); 1049 return true; 1050 } 1051 1052 return false; 1053 } 1054 1055 /* Finds the oldest page and unmats it from its old virtual address */ 1056 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area) 1057 { 1058 struct tee_pager_pmem *pmem; 1059 1060 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1061 if (!pmem) { 1062 EMSG("No pmem entries"); 1063 return NULL; 1064 } 1065 if (pmem->pgidx != INVALID_PGIDX) { 1066 uint32_t a; 1067 1068 assert(pmem->area && pmem->area->pgt); 1069 area_get_entry(pmem->area, pmem->pgidx, NULL, &a); 1070 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1071 pgt_dec_used_entries(pmem->area->pgt); 1072 /* TODO only invalidate entries touched above */ 1073 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1074 tee_pager_save_page(pmem, a); 1075 } 1076 1077 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1078 pmem->pgidx = INVALID_PGIDX; 1079 pmem->area = NULL; 1080 if (area->type == AREA_TYPE_LOCK) { 1081 /* Move page to lock list */ 1082 if (tee_pager_npages <= 0) 1083 panic("running out of page"); 1084 tee_pager_npages--; 1085 set_npages(); 1086 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1087 } else { 1088 /* move page to back */ 1089 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1090 } 1091 1092 return pmem; 1093 } 1094 1095 static bool pager_update_permissions(struct tee_pager_area *area, 1096 struct abort_info *ai, bool *handled) 1097 { 1098 unsigned int pgidx = area_va2idx(area, ai->va); 1099 uint32_t attr; 1100 paddr_t pa; 1101 1102 *handled = false; 1103 1104 area_get_entry(area, pgidx, &pa, &attr); 1105 1106 /* Not mapped */ 1107 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1108 return false; 1109 1110 /* Not readable, should not happen */ 1111 if (abort_is_user_exception(ai)) { 1112 if (!(attr & TEE_MATTR_UR)) 1113 return true; 1114 } else { 1115 if (!(attr & TEE_MATTR_PR)) { 1116 abort_print_error(ai); 1117 panic(); 1118 } 1119 } 1120 1121 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1122 case CORE_MMU_FAULT_TRANSLATION: 1123 case CORE_MMU_FAULT_READ_PERMISSION: 1124 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1125 /* Check attempting to execute from an NOX page */ 1126 if (abort_is_user_exception(ai)) { 1127 if (!(attr & TEE_MATTR_UX)) 1128 return true; 1129 } else { 1130 if (!(attr & TEE_MATTR_PX)) { 1131 abort_print_error(ai); 1132 panic(); 1133 } 1134 } 1135 } 1136 /* Since the page is mapped now it's OK */ 1137 break; 1138 case CORE_MMU_FAULT_WRITE_PERMISSION: 1139 /* Check attempting to write to an RO page */ 1140 if (abort_is_user_exception(ai)) { 1141 if (!(area->flags & TEE_MATTR_UW)) 1142 return true; 1143 if (!(attr & TEE_MATTR_UW)) { 1144 FMSG("Dirty %p", 1145 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1146 area_set_entry(area, pgidx, pa, 1147 get_area_mattr(area->flags)); 1148 /* TODO only invalidate entry above */ 1149 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1150 } 1151 1152 } else { 1153 if (!(area->flags & TEE_MATTR_PW)) { 1154 abort_print_error(ai); 1155 panic(); 1156 } 1157 if (!(attr & TEE_MATTR_PW)) { 1158 FMSG("Dirty %p", 1159 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1160 area_set_entry(area, pgidx, pa, 1161 get_area_mattr(area->flags)); 1162 /* TODO only invalidate entry above */ 1163 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1164 } 1165 } 1166 /* Since permissions has been updated now it's OK */ 1167 break; 1168 default: 1169 /* Some fault we can't deal with */ 1170 if (abort_is_user_exception(ai)) 1171 return true; 1172 abort_print_error(ai); 1173 panic(); 1174 } 1175 *handled = true; 1176 return true; 1177 } 1178 1179 #ifdef CFG_TEE_CORE_DEBUG 1180 static void stat_handle_fault(void) 1181 { 1182 static size_t num_faults; 1183 static size_t min_npages = SIZE_MAX; 1184 static size_t total_min_npages = SIZE_MAX; 1185 1186 num_faults++; 1187 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1188 DMSG("nfaults %zu npages %zu (min %zu)", 1189 num_faults, tee_pager_npages, min_npages); 1190 min_npages = tee_pager_npages; /* reset */ 1191 } 1192 if (tee_pager_npages < min_npages) 1193 min_npages = tee_pager_npages; 1194 if (tee_pager_npages < total_min_npages) 1195 total_min_npages = tee_pager_npages; 1196 } 1197 #else 1198 static void stat_handle_fault(void) 1199 { 1200 } 1201 #endif 1202 1203 bool tee_pager_handle_fault(struct abort_info *ai) 1204 { 1205 struct tee_pager_area *area; 1206 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1207 uint32_t exceptions; 1208 bool ret; 1209 1210 #ifdef TEE_PAGER_DEBUG_PRINT 1211 abort_print(ai); 1212 #endif 1213 1214 /* 1215 * We're updating pages that can affect several active CPUs at a 1216 * time below. We end up here because a thread tries to access some 1217 * memory that isn't available. We have to be careful when making 1218 * that memory available as other threads may succeed in accessing 1219 * that address the moment after we've made it available. 1220 * 1221 * That means that we can't just map the memory and populate the 1222 * page, instead we use the aliased mapping to populate the page 1223 * and once everything is ready we map it. 1224 */ 1225 exceptions = pager_lock(); 1226 1227 stat_handle_fault(); 1228 1229 /* check if the access is valid */ 1230 if (abort_is_user_exception(ai)) { 1231 area = find_uta_area(ai->va); 1232 1233 } else { 1234 area = find_area(&tee_pager_area_head, ai->va); 1235 if (!area) 1236 area = find_uta_area(ai->va); 1237 } 1238 if (!area || !area->pgt) { 1239 ret = false; 1240 goto out; 1241 } 1242 1243 if (!tee_pager_unhide_page(page_va)) { 1244 struct tee_pager_pmem *pmem = NULL; 1245 uint32_t attr; 1246 1247 /* 1248 * The page wasn't hidden, but some other core may have 1249 * updated the table entry before we got here or we need 1250 * to make a read-only page read-write (dirty). 1251 */ 1252 if (pager_update_permissions(area, ai, &ret)) { 1253 /* 1254 * Nothing more to do with the abort. The problem 1255 * could already have been dealt with from another 1256 * core or if ret is false the TA will be paniced. 1257 */ 1258 goto out; 1259 } 1260 1261 pmem = tee_pager_get_page(area); 1262 if (!pmem) { 1263 abort_print(ai); 1264 panic(); 1265 } 1266 1267 /* load page code & data */ 1268 tee_pager_load_page(area, page_va, pmem->va_alias); 1269 1270 /* 1271 * We've updated the page using the aliased mapping and 1272 * some cache maintenence is now needed if it's an 1273 * executable page. 1274 * 1275 * Since the d-cache is a Physically-indexed, 1276 * physically-tagged (PIPT) cache we can clean the aliased 1277 * address instead of the real virtual address. 1278 * 1279 * The i-cache can also be PIPT, but may be something else 1280 * to, to keep it simple we invalidate the entire i-cache. 1281 * As a future optimization we may invalidate only the 1282 * aliased area if it a PIPT cache else the entire cache. 1283 */ 1284 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1285 /* 1286 * Doing these operations to LoUIS (Level of 1287 * unification, Inner Shareable) would be enough 1288 */ 1289 cache_op_inner(DCACHE_AREA_CLEAN, pmem->va_alias, 1290 SMALL_PAGE_SIZE); 1291 cache_op_inner(ICACHE_INVALIDATE, NULL, 0); 1292 } 1293 1294 pmem->area = area; 1295 pmem->pgidx = area_va2idx(area, ai->va); 1296 attr = get_area_mattr(area->flags) & 1297 ~(TEE_MATTR_PW | TEE_MATTR_UW); 1298 area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr); 1299 pgt_inc_used_entries(area->pgt); 1300 1301 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, 1302 area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem)); 1303 1304 } 1305 1306 tee_pager_hide_pages(); 1307 ret = true; 1308 out: 1309 pager_unlock(exceptions); 1310 return ret; 1311 } 1312 1313 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1314 { 1315 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 1316 size_t n; 1317 1318 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1319 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1320 1321 /* setup memory */ 1322 for (n = 0; n < npages; n++) { 1323 struct tee_pager_pmem *pmem; 1324 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1325 unsigned pgidx = core_mmu_va2idx(ti, va); 1326 paddr_t pa; 1327 uint32_t attr; 1328 1329 /* 1330 * Note that we can only support adding pages in the 1331 * valid range of this table info, currently not a problem. 1332 */ 1333 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1334 1335 /* Ignore unmapped pages/blocks */ 1336 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1337 continue; 1338 1339 pmem = malloc(sizeof(struct tee_pager_pmem)); 1340 if (!pmem) 1341 panic("out of mem"); 1342 1343 pmem->va_alias = pager_add_alias_page(pa); 1344 1345 if (unmap) { 1346 pmem->area = NULL; 1347 pmem->pgidx = INVALID_PGIDX; 1348 core_mmu_set_entry(ti, pgidx, 0, 0); 1349 pgt_dec_used_entries(&pager_core_pgt); 1350 } else { 1351 /* 1352 * The page is still mapped, let's assign the area 1353 * and update the protection bits accordingly. 1354 */ 1355 pmem->area = find_area(&tee_pager_area_head, va); 1356 assert(pmem->area->pgt == &pager_core_pgt); 1357 pmem->pgidx = pgidx; 1358 assert(pa == get_pmem_pa(pmem)); 1359 area_set_entry(pmem->area, pgidx, pa, 1360 get_area_mattr(pmem->area->flags)); 1361 } 1362 1363 tee_pager_npages++; 1364 incr_npages_all(); 1365 set_npages(); 1366 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1367 } 1368 1369 /* Invalidate secure TLB */ 1370 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1371 } 1372 1373 #ifdef CFG_PAGED_USER_TA 1374 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1375 { 1376 struct pgt *p = pgt; 1377 1378 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1379 p = SLIST_NEXT(p, link); 1380 return p; 1381 } 1382 1383 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc) 1384 { 1385 struct tee_pager_area *area; 1386 struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1387 1388 TAILQ_FOREACH(area, utc->areas, link) { 1389 if (!area->pgt) 1390 area->pgt = find_pgt(pgt, area->base); 1391 else 1392 assert(area->pgt == find_pgt(pgt, area->base)); 1393 if (!area->pgt) 1394 panic(); 1395 } 1396 } 1397 1398 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem) 1399 { 1400 uint32_t attr; 1401 1402 assert(pmem->area && pmem->area->pgt); 1403 1404 area_get_entry(pmem->area, pmem->pgidx, NULL, &attr); 1405 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1406 tee_pager_save_page(pmem, attr); 1407 assert(pmem->area->pgt->num_used_entries); 1408 pmem->area->pgt->num_used_entries--; 1409 pmem->pgidx = INVALID_PGIDX; 1410 pmem->area = NULL; 1411 } 1412 1413 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1414 { 1415 struct tee_pager_pmem *pmem; 1416 struct tee_pager_area *area; 1417 uint32_t exceptions = pager_lock(); 1418 1419 if (!pgt->num_used_entries) 1420 goto out; 1421 1422 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1423 if (!pmem->area || pmem->pgidx == INVALID_PGIDX) 1424 continue; 1425 if (pmem->area->pgt == pgt) 1426 pager_save_and_release_entry(pmem); 1427 } 1428 assert(!pgt->num_used_entries); 1429 1430 out: 1431 if (is_user_ta_ctx(pgt->ctx)) { 1432 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) { 1433 if (area->pgt == pgt) 1434 area->pgt = NULL; 1435 } 1436 } 1437 1438 pager_unlock(exceptions); 1439 } 1440 KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1441 #endif /*CFG_PAGED_USER_TA*/ 1442 1443 void tee_pager_release_phys(void *addr, size_t size) 1444 { 1445 bool unmaped = false; 1446 vaddr_t va = (vaddr_t)addr; 1447 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1448 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1449 struct tee_pager_area *area; 1450 uint32_t exceptions; 1451 1452 if (!size) 1453 return; 1454 1455 area = find_area(&tee_pager_area_head, begin); 1456 if (!area || 1457 area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE)) 1458 panic(); 1459 1460 exceptions = pager_lock(); 1461 1462 for (va = begin; va < end; va += SMALL_PAGE_SIZE) 1463 unmaped |= tee_pager_release_one_phys(area, va); 1464 1465 /* Invalidate secure TLB */ 1466 if (unmaped) 1467 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 1468 1469 pager_unlock(exceptions); 1470 } 1471 KEEP_PAGER(tee_pager_release_phys); 1472 1473 void *tee_pager_alloc(size_t size, uint32_t flags) 1474 { 1475 tee_mm_entry_t *mm; 1476 uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED); 1477 1478 if (!size) 1479 return NULL; 1480 1481 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 1482 if (!mm) 1483 return NULL; 1484 1485 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 1486 f, NULL, NULL); 1487 1488 return (void *)tee_mm_get_smem(mm); 1489 } 1490