1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <arm.h> 30 #include <assert.h> 31 #include <io.h> 32 #include <keep.h> 33 #include <kernel/abort.h> 34 #include <kernel/panic.h> 35 #include <kernel/spinlock.h> 36 #include <kernel/tee_misc.h> 37 #include <kernel/tee_ta_manager.h> 38 #include <kernel/thread.h> 39 #include <kernel/tlb_helpers.h> 40 #include <mm/core_memprot.h> 41 #include <mm/tee_mm.h> 42 #include <mm/tee_pager.h> 43 #include <stdlib.h> 44 #include <sys/queue.h> 45 #include <tee_api_defines.h> 46 #include <tee/tee_cryp_provider.h> 47 #include <trace.h> 48 #include <types_ext.h> 49 #include <utee_defines.h> 50 #include <util.h> 51 52 #include "pager_private.h" 53 54 #define PAGER_AE_KEY_BITS 256 55 56 struct pager_rw_pstate { 57 uint64_t iv; 58 uint8_t tag[PAGER_AES_GCM_TAG_LEN]; 59 }; 60 61 enum area_type { 62 AREA_TYPE_RO, 63 AREA_TYPE_RW, 64 AREA_TYPE_LOCK, 65 }; 66 67 struct tee_pager_area { 68 union { 69 const uint8_t *hashes; 70 struct pager_rw_pstate *rwp; 71 } u; 72 uint8_t *store; 73 enum area_type type; 74 uint32_t flags; 75 vaddr_t base; 76 size_t size; 77 struct pgt *pgt; 78 TAILQ_ENTRY(tee_pager_area) link; 79 }; 80 81 TAILQ_HEAD(tee_pager_area_head, tee_pager_area); 82 83 static struct tee_pager_area_head tee_pager_area_head = 84 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 85 86 #define INVALID_PGIDX UINT_MAX 87 88 /* 89 * struct tee_pager_pmem - Represents a physical page used for paging. 90 * 91 * @pgidx an index of the entry in area->ti. 92 * @va_alias Virtual address where the physical page always is aliased. 93 * Used during remapping of the page when the content need to 94 * be updated before it's available at the new location. 95 * @area a pointer to the pager area 96 */ 97 struct tee_pager_pmem { 98 unsigned pgidx; 99 void *va_alias; 100 struct tee_pager_area *area; 101 TAILQ_ENTRY(tee_pager_pmem) link; 102 }; 103 104 /* The list of physical pages. The first page in the list is the oldest */ 105 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 106 107 static struct tee_pager_pmem_head tee_pager_pmem_head = 108 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 109 110 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 111 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 112 113 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; 114 115 /* number of pages hidden */ 116 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 117 118 /* Number of registered physical pages, used hiding pages. */ 119 static size_t tee_pager_npages; 120 121 #ifdef CFG_WITH_STATS 122 static struct tee_pager_stats pager_stats; 123 124 static inline void incr_ro_hits(void) 125 { 126 pager_stats.ro_hits++; 127 } 128 129 static inline void incr_rw_hits(void) 130 { 131 pager_stats.rw_hits++; 132 } 133 134 static inline void incr_hidden_hits(void) 135 { 136 pager_stats.hidden_hits++; 137 } 138 139 static inline void incr_zi_released(void) 140 { 141 pager_stats.zi_released++; 142 } 143 144 static inline void incr_npages_all(void) 145 { 146 pager_stats.npages_all++; 147 } 148 149 static inline void set_npages(void) 150 { 151 pager_stats.npages = tee_pager_npages; 152 } 153 154 void tee_pager_get_stats(struct tee_pager_stats *stats) 155 { 156 *stats = pager_stats; 157 158 pager_stats.hidden_hits = 0; 159 pager_stats.ro_hits = 0; 160 pager_stats.rw_hits = 0; 161 pager_stats.zi_released = 0; 162 } 163 164 #else /* CFG_WITH_STATS */ 165 static inline void incr_ro_hits(void) { } 166 static inline void incr_rw_hits(void) { } 167 static inline void incr_hidden_hits(void) { } 168 static inline void incr_zi_released(void) { } 169 static inline void incr_npages_all(void) { } 170 static inline void set_npages(void) { } 171 172 void tee_pager_get_stats(struct tee_pager_stats *stats) 173 { 174 memset(stats, 0, sizeof(struct tee_pager_stats)); 175 } 176 #endif /* CFG_WITH_STATS */ 177 178 static struct pgt pager_core_pgt; 179 struct core_mmu_table_info tee_pager_tbl_info; 180 static struct core_mmu_table_info pager_alias_tbl_info; 181 182 static unsigned pager_spinlock = SPINLOCK_UNLOCK; 183 184 /* Defines the range of the alias area */ 185 static tee_mm_entry_t *pager_alias_area; 186 /* 187 * Physical pages are added in a stack like fashion to the alias area, 188 * @pager_alias_next_free gives the address of next free entry if 189 * @pager_alias_next_free is != 0 190 */ 191 static uintptr_t pager_alias_next_free; 192 193 #ifdef CFG_TEE_CORE_DEBUG 194 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai) 195 196 static uint32_t pager_lock_dldetect(const char *func, const int line, 197 struct abort_info *ai) 198 { 199 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 200 unsigned int retries = 0; 201 unsigned int reminder = 0; 202 203 while (!cpu_spin_trylock(&pager_spinlock)) { 204 retries++; 205 if (!retries) { 206 /* wrapped, time to report */ 207 trace_printf(func, line, TRACE_ERROR, true, 208 "possible spinlock deadlock reminder %u", 209 reminder); 210 if (reminder < UINT_MAX) 211 reminder++; 212 if (ai) 213 abort_print(ai); 214 } 215 } 216 217 return exceptions; 218 } 219 #else 220 static uint32_t pager_lock(struct abort_info __unused *ai) 221 { 222 return cpu_spin_lock_xsave(&pager_spinlock); 223 } 224 #endif 225 226 static uint32_t pager_lock_check_stack(size_t stack_size) 227 { 228 if (stack_size) { 229 int8_t buf[stack_size]; 230 size_t n; 231 232 /* 233 * Make sure to touch all pages of the stack that we expect 234 * to use with this lock held. We need to take eventual 235 * page faults before the lock is taken or we'll deadlock 236 * the pager. The pages that are populated in this way will 237 * eventually be released at certain save transitions of 238 * the thread. 239 */ 240 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) 241 write8(1, (vaddr_t)buf + n); 242 write8(1, (vaddr_t)buf + stack_size - 1); 243 } 244 245 return pager_lock(NULL); 246 } 247 248 static void pager_unlock(uint32_t exceptions) 249 { 250 cpu_spin_unlock_xrestore(&pager_spinlock, exceptions); 251 } 252 253 void *tee_pager_phys_to_virt(paddr_t pa) 254 { 255 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 256 unsigned idx; 257 unsigned end_idx; 258 uint32_t a; 259 paddr_t p; 260 261 end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START + 262 CFG_TEE_RAM_VA_SIZE); 263 /* Most addresses are mapped lineary, try that first if possible. */ 264 idx = core_mmu_va2idx(ti, pa); 265 if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) && 266 idx < end_idx) { 267 core_mmu_get_entry(ti, idx, &p, &a); 268 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 269 return (void *)core_mmu_idx2va(ti, idx); 270 } 271 272 for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START); 273 idx < end_idx; idx++) { 274 core_mmu_get_entry(ti, idx, &p, &a); 275 if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) 276 return (void *)core_mmu_idx2va(ti, idx); 277 } 278 279 return NULL; 280 } 281 282 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti) 283 { 284 if (va >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) && 285 va <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) { 286 *ti = tee_pager_tbl_info; 287 return true; 288 } 289 290 return false; 291 } 292 293 static void set_alias_area(tee_mm_entry_t *mm) 294 { 295 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 296 size_t tbl_va_size; 297 unsigned idx; 298 unsigned last_idx; 299 vaddr_t smem = tee_mm_get_smem(mm); 300 size_t nbytes = tee_mm_get_bytes(mm); 301 302 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 303 304 if (pager_alias_area) 305 panic("null pager_alias_area"); 306 307 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) 308 panic("Can't find translation table"); 309 310 if ((1 << ti->shift) != SMALL_PAGE_SIZE) 311 panic("Unsupported page size in translation table"); 312 313 tbl_va_size = (1 << ti->shift) * ti->num_entries; 314 if (!core_is_buffer_inside(smem, nbytes, 315 ti->va_base, tbl_va_size)) { 316 EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 317 smem, nbytes, ti->va_base, tbl_va_size); 318 panic(); 319 } 320 321 if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK) 322 panic("invalid area alignment"); 323 324 pager_alias_area = mm; 325 pager_alias_next_free = smem; 326 327 /* Clear all mapping in the alias area */ 328 idx = core_mmu_va2idx(ti, smem); 329 last_idx = core_mmu_va2idx(ti, smem + nbytes); 330 for (; idx < last_idx; idx++) 331 core_mmu_set_entry(ti, idx, 0, 0); 332 333 tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE); 334 } 335 336 static void generate_ae_key(void) 337 { 338 if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS) 339 panic("failed to generate random"); 340 } 341 342 void tee_pager_early_init(void) 343 { 344 if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, 345 &tee_pager_tbl_info)) 346 panic("can't find mmu tables"); 347 348 if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT) 349 panic("Unsupported page size in translation table"); 350 } 351 352 void tee_pager_init(tee_mm_entry_t *mm_alias) 353 { 354 set_alias_area(mm_alias); 355 generate_ae_key(); 356 } 357 358 static void *pager_add_alias_page(paddr_t pa) 359 { 360 unsigned idx; 361 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 362 /* Alias pages mapped without write permission: runtime will care */ 363 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 364 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 365 TEE_MATTR_SECURE | TEE_MATTR_PR; 366 367 DMSG("0x%" PRIxPA, pa); 368 369 if (!pager_alias_next_free || !ti->num_entries) 370 panic("invalid alias entry"); 371 372 idx = core_mmu_va2idx(ti, pager_alias_next_free); 373 core_mmu_set_entry(ti, idx, pa, attr); 374 pgt_inc_used_entries(&pager_core_pgt); 375 pager_alias_next_free += SMALL_PAGE_SIZE; 376 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 377 tee_mm_get_bytes(pager_alias_area))) 378 pager_alias_next_free = 0; 379 return (void *)core_mmu_idx2va(ti, idx); 380 } 381 382 static struct tee_pager_area *alloc_area(struct pgt *pgt, 383 vaddr_t base, size_t size, 384 uint32_t flags, const void *store, 385 const void *hashes) 386 { 387 struct tee_pager_area *area = calloc(1, sizeof(*area)); 388 enum area_type at; 389 tee_mm_entry_t *mm_store = NULL; 390 391 if (!area) 392 return NULL; 393 394 if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) { 395 if (flags & TEE_MATTR_LOCKED) { 396 at = AREA_TYPE_LOCK; 397 goto out; 398 } 399 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size); 400 if (!mm_store) 401 goto bad; 402 area->store = phys_to_virt(tee_mm_get_smem(mm_store), 403 MEM_AREA_TA_RAM); 404 if (!area->store) 405 goto bad; 406 area->u.rwp = calloc(size / SMALL_PAGE_SIZE, 407 sizeof(struct pager_rw_pstate)); 408 if (!area->u.rwp) 409 goto bad; 410 at = AREA_TYPE_RW; 411 } else { 412 area->store = (void *)store; 413 area->u.hashes = hashes; 414 at = AREA_TYPE_RO; 415 } 416 out: 417 area->pgt = pgt; 418 area->base = base; 419 area->size = size; 420 area->flags = flags; 421 area->type = at; 422 return area; 423 bad: 424 tee_mm_free(mm_store); 425 free(area->u.rwp); 426 free(area); 427 return NULL; 428 } 429 430 static void area_insert_tail(struct tee_pager_area *area) 431 { 432 uint32_t exceptions = pager_lock_check_stack(8); 433 434 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 435 436 pager_unlock(exceptions); 437 } 438 KEEP_PAGER(area_insert_tail); 439 440 static size_t tbl_usage_count(struct pgt *pgt) 441 { 442 size_t n; 443 paddr_t pa; 444 size_t usage = 0; 445 446 for (n = 0; n < tee_pager_tbl_info.num_entries; n++) { 447 core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level, 448 n, &pa, NULL); 449 if (pa) 450 usage++; 451 } 452 return usage; 453 } 454 455 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags, 456 const void *store, const void *hashes) 457 { 458 struct tee_pager_area *area; 459 size_t tbl_va_size; 460 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 461 462 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 463 base, base + size, flags, store, hashes); 464 465 if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) { 466 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size); 467 panic(); 468 } 469 470 if (!(flags & TEE_MATTR_PW) && (!store || !hashes)) 471 panic("write pages cannot provide store or hashes"); 472 473 if ((flags & TEE_MATTR_PW) && (store || hashes)) 474 panic("non-write pages must provide store and hashes"); 475 476 if (!pager_core_pgt.tbl) { 477 pager_core_pgt.tbl = ti->table; 478 pgt_set_used_entries(&pager_core_pgt, 479 tbl_usage_count(&pager_core_pgt)); 480 } 481 482 tbl_va_size = (1 << ti->shift) * ti->num_entries; 483 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) { 484 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 485 base, size, ti->va_base, tbl_va_size); 486 panic(); 487 } 488 489 area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes); 490 if (!area) 491 panic(); 492 493 area_insert_tail(area); 494 } 495 496 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas, 497 vaddr_t va) 498 { 499 struct tee_pager_area *area; 500 501 if (!areas) 502 return NULL; 503 504 TAILQ_FOREACH(area, areas, link) { 505 if (core_is_buffer_inside(va, 1, area->base, area->size)) 506 return area; 507 } 508 return NULL; 509 } 510 511 #ifdef CFG_PAGED_USER_TA 512 static struct tee_pager_area *find_uta_area(vaddr_t va) 513 { 514 struct tee_ta_ctx *ctx = thread_get_tsd()->ctx; 515 516 if (!ctx || !is_user_ta_ctx(ctx)) 517 return NULL; 518 return find_area(to_user_ta_ctx(ctx)->areas, va); 519 } 520 #else 521 static struct tee_pager_area *find_uta_area(vaddr_t va __unused) 522 { 523 return NULL; 524 } 525 #endif /*CFG_PAGED_USER_TA*/ 526 527 528 static uint32_t get_area_mattr(uint32_t area_flags) 529 { 530 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | 531 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 532 (area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX)); 533 534 if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW))) 535 attr |= TEE_MATTR_GLOBAL; 536 537 return attr; 538 } 539 540 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 541 { 542 paddr_t pa; 543 unsigned idx; 544 545 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias); 546 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL); 547 return pa; 548 } 549 550 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, 551 void *dst) 552 { 553 struct pager_aes_gcm_iv iv = { 554 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } 555 }; 556 557 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), 558 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); 559 } 560 561 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) 562 { 563 struct pager_aes_gcm_iv iv; 564 565 assert((rwp->iv + 1) > rwp->iv); 566 rwp->iv++; 567 /* 568 * IV is constructed as recommended in section "8.2.1 Deterministic 569 * Construction" of "Recommendation for Block Cipher Modes of 570 * Operation: Galois/Counter Mode (GCM) and GMAC", 571 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 572 */ 573 iv.iv[0] = (vaddr_t)rwp; 574 iv.iv[1] = rwp->iv >> 32; 575 iv.iv[2] = rwp->iv; 576 577 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), 578 &iv, rwp->tag, 579 src, dst, SMALL_PAGE_SIZE)) 580 panic("gcm failed"); 581 } 582 583 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 584 void *va_alias) 585 { 586 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT; 587 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE; 588 struct core_mmu_table_info *ti; 589 uint32_t attr_alias; 590 paddr_t pa_alias; 591 unsigned int idx_alias; 592 593 /* Insure we are allowed to write to aliased virtual page */ 594 ti = &pager_alias_tbl_info; 595 idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias); 596 core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias); 597 if (!(attr_alias & TEE_MATTR_PW)) { 598 attr_alias |= TEE_MATTR_PW; 599 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 600 tlbi_mva_allasid((vaddr_t)va_alias); 601 } 602 603 switch (area->type) { 604 case AREA_TYPE_RO: 605 { 606 const void *hash = area->u.hashes + 607 idx * TEE_SHA256_HASH_SIZE; 608 609 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE); 610 incr_ro_hits(); 611 612 if (hash_sha256_check(hash, va_alias, 613 SMALL_PAGE_SIZE) != TEE_SUCCESS) { 614 EMSG("PH 0x%" PRIxVA " failed", page_va); 615 panic(); 616 } 617 } 618 /* Forbid write to aliases for read-only (maybe exec) pages */ 619 attr_alias &= ~TEE_MATTR_PW; 620 core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias); 621 tlbi_mva_allasid((vaddr_t)va_alias); 622 break; 623 case AREA_TYPE_RW: 624 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64, 625 va_alias, page_va, area->u.rwp[idx].iv); 626 if (!area->u.rwp[idx].iv) 627 memset(va_alias, 0, SMALL_PAGE_SIZE); 628 else if (!decrypt_page(&area->u.rwp[idx], stored_page, 629 va_alias)) { 630 EMSG("PH 0x%" PRIxVA " failed", page_va); 631 panic(); 632 } 633 incr_rw_hits(); 634 break; 635 case AREA_TYPE_LOCK: 636 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va); 637 memset(va_alias, 0, SMALL_PAGE_SIZE); 638 break; 639 default: 640 panic(); 641 } 642 } 643 644 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 645 { 646 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 647 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 648 649 if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) { 650 size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK; 651 size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT); 652 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE; 653 654 assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW)); 655 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias, 656 stored_page); 657 FMSG("Saved %#" PRIxVA " iv %#" PRIx64, 658 pmem->area->base + idx * SMALL_PAGE_SIZE, 659 pmem->area->u.rwp[idx].iv); 660 } 661 } 662 663 static void area_get_entry(struct tee_pager_area *area, size_t idx, 664 paddr_t *pa, uint32_t *attr) 665 { 666 assert(area->pgt); 667 assert(idx < tee_pager_tbl_info.num_entries); 668 core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 669 idx, pa, attr); 670 } 671 672 static void area_set_entry(struct tee_pager_area *area, size_t idx, 673 paddr_t pa, uint32_t attr) 674 { 675 assert(area->pgt); 676 assert(idx < tee_pager_tbl_info.num_entries); 677 core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level, 678 idx, pa, attr); 679 } 680 681 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va) 682 { 683 return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT; 684 } 685 686 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area, 687 size_t idx) 688 { 689 return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK); 690 } 691 692 #ifdef CFG_PAGED_USER_TA 693 static void free_area(struct tee_pager_area *area) 694 { 695 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, 696 virt_to_phys(area->store))); 697 if (area->type == AREA_TYPE_RW) 698 free(area->u.rwp); 699 free(area); 700 } 701 702 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, 703 size_t size) 704 { 705 struct tee_pager_area *area; 706 uint32_t flags; 707 vaddr_t b = base; 708 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 709 710 if (!utc->areas) { 711 utc->areas = malloc(sizeof(*utc->areas)); 712 if (!utc->areas) 713 return false; 714 TAILQ_INIT(utc->areas); 715 } 716 717 flags = TEE_MATTR_PRW | TEE_MATTR_URWX; 718 719 while (s) { 720 size_t s2; 721 722 if (find_area(utc->areas, b)) 723 return false; 724 725 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 726 727 /* Table info will be set when the context is activated. */ 728 area = alloc_area(NULL, b, s2, flags, NULL, NULL); 729 if (!area) 730 return false; 731 TAILQ_INSERT_TAIL(utc->areas, area, link); 732 b += s2; 733 s -= s2; 734 } 735 736 return true; 737 } 738 739 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size) 740 { 741 struct thread_specific_data *tsd = thread_get_tsd(); 742 struct tee_pager_area *area; 743 struct core_mmu_table_info dir_info = { NULL }; 744 745 if (&utc->ctx != tsd->ctx) { 746 /* 747 * Changes are to an utc that isn't active. Just add the 748 * areas page tables will be dealt with later. 749 */ 750 return pager_add_uta_area(utc, base, size); 751 } 752 753 /* 754 * Assign page tables before adding areas to be able to tell which 755 * are newly added and should be removed in case of failure. 756 */ 757 tee_pager_assign_uta_tables(utc); 758 if (!pager_add_uta_area(utc, base, size)) { 759 struct tee_pager_area *next_a; 760 761 /* Remove all added areas */ 762 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 763 if (!area->pgt) { 764 TAILQ_REMOVE(utc->areas, area, link); 765 free_area(area); 766 } 767 } 768 return false; 769 } 770 771 /* 772 * Assign page tables to the new areas and make sure that the page 773 * tables are registered in the upper table. 774 */ 775 tee_pager_assign_uta_tables(utc); 776 core_mmu_get_user_pgdir(&dir_info); 777 TAILQ_FOREACH(area, utc->areas, link) { 778 paddr_t pa; 779 size_t idx; 780 uint32_t attr; 781 782 idx = core_mmu_va2idx(&dir_info, area->pgt->vabase); 783 core_mmu_get_entry(&dir_info, idx, &pa, &attr); 784 785 /* 786 * Check if the page table already is used, if it is, it's 787 * already registered. 788 */ 789 if (area->pgt->num_used_entries) { 790 assert(attr & TEE_MATTR_TABLE); 791 assert(pa == virt_to_phys(area->pgt->tbl)); 792 continue; 793 } 794 795 attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE; 796 pa = virt_to_phys(area->pgt->tbl); 797 assert(pa); 798 /* 799 * Note that the update of the table entry is guaranteed to 800 * be atomic. 801 */ 802 core_mmu_set_entry(&dir_info, idx, pa, attr); 803 } 804 805 return true; 806 } 807 808 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti, 809 struct pgt *pgt) 810 { 811 assert(pgt); 812 ti->table = pgt->tbl; 813 ti->va_base = pgt->vabase; 814 ti->level = tee_pager_tbl_info.level; 815 ti->shift = tee_pager_tbl_info.shift; 816 ti->num_entries = tee_pager_tbl_info.num_entries; 817 } 818 819 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt, 820 vaddr_t new_base) 821 { 822 uint32_t exceptions = pager_lock_check_stack(64); 823 824 /* 825 * If there's no pgt assigned to the old area there's no pages to 826 * deal with either, just update with a new pgt and base. 827 */ 828 if (area->pgt) { 829 struct core_mmu_table_info old_ti; 830 struct core_mmu_table_info new_ti; 831 struct tee_pager_pmem *pmem; 832 833 init_tbl_info_from_pgt(&old_ti, area->pgt); 834 init_tbl_info_from_pgt(&new_ti, new_pgt); 835 836 837 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 838 vaddr_t va; 839 paddr_t pa; 840 uint32_t attr; 841 842 if (pmem->area != area) 843 continue; 844 core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr); 845 core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0); 846 847 assert(pa == get_pmem_pa(pmem)); 848 assert(attr); 849 assert(area->pgt->num_used_entries); 850 area->pgt->num_used_entries--; 851 852 va = core_mmu_idx2va(&old_ti, pmem->pgidx); 853 va = va - area->base + new_base; 854 pmem->pgidx = core_mmu_va2idx(&new_ti, va); 855 core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr); 856 new_pgt->num_used_entries++; 857 } 858 } 859 860 area->pgt = new_pgt; 861 area->base = new_base; 862 pager_unlock(exceptions); 863 } 864 KEEP_PAGER(transpose_area); 865 866 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc, 867 vaddr_t src_base, 868 struct user_ta_ctx *dst_utc, 869 vaddr_t dst_base, struct pgt **dst_pgt, 870 size_t size) 871 { 872 struct tee_pager_area *area; 873 struct tee_pager_area *next_a; 874 875 TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) { 876 vaddr_t new_area_base; 877 size_t new_idx; 878 879 if (!core_is_buffer_inside(area->base, area->size, 880 src_base, size)) 881 continue; 882 883 TAILQ_REMOVE(src_utc->areas, area, link); 884 885 new_area_base = dst_base + (src_base - area->base); 886 new_idx = (new_area_base - dst_pgt[0]->vabase) / 887 CORE_MMU_PGDIR_SIZE; 888 assert((new_area_base & ~CORE_MMU_PGDIR_MASK) == 889 dst_pgt[new_idx]->vabase); 890 transpose_area(area, dst_pgt[new_idx], new_area_base); 891 892 /* 893 * Assert that this will not cause any conflicts in the new 894 * utc. This should already be guaranteed, but a bug here 895 * could be tricky to find. 896 */ 897 assert(!find_area(dst_utc->areas, area->base)); 898 TAILQ_INSERT_TAIL(dst_utc->areas, area, link); 899 } 900 } 901 902 static void rem_area(struct tee_pager_area_head *area_head, 903 struct tee_pager_area *area) 904 { 905 struct tee_pager_pmem *pmem; 906 uint32_t exceptions; 907 908 exceptions = pager_lock_check_stack(64); 909 910 TAILQ_REMOVE(area_head, area, link); 911 912 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 913 if (pmem->area == area) { 914 area_set_entry(area, pmem->pgidx, 0, 0); 915 tlbi_mva_allasid(area_idx2va(area, pmem->pgidx)); 916 pgt_dec_used_entries(area->pgt); 917 pmem->area = NULL; 918 pmem->pgidx = INVALID_PGIDX; 919 } 920 } 921 922 pager_unlock(exceptions); 923 free_area(area); 924 } 925 KEEP_PAGER(rem_area); 926 927 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base, 928 size_t size) 929 { 930 struct tee_pager_area *area; 931 struct tee_pager_area *next_a; 932 size_t s = ROUNDUP(size, SMALL_PAGE_SIZE); 933 934 TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) { 935 if (core_is_buffer_inside(area->base, area->size, base, s)) 936 rem_area(utc->areas, area); 937 } 938 } 939 940 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc) 941 { 942 struct tee_pager_area *area; 943 944 if (!utc->areas) 945 return; 946 947 while (true) { 948 area = TAILQ_FIRST(utc->areas); 949 if (!area) 950 break; 951 TAILQ_REMOVE(utc->areas, area, link); 952 free_area(area); 953 } 954 955 free(utc->areas); 956 } 957 958 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base, 959 size_t size, uint32_t flags) 960 { 961 bool ret; 962 vaddr_t b = base; 963 size_t s = size; 964 size_t s2; 965 struct tee_pager_area *area = find_area(utc->areas, b); 966 uint32_t exceptions; 967 struct tee_pager_pmem *pmem; 968 paddr_t pa; 969 uint32_t a; 970 uint32_t f; 971 972 f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR; 973 if (f & TEE_MATTR_UW) 974 f |= TEE_MATTR_PW; 975 f = get_area_mattr(f); 976 977 exceptions = pager_lock_check_stack(64); 978 979 while (s) { 980 s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s); 981 if (!area || area->base != b || area->size != s2) { 982 ret = false; 983 goto out; 984 } 985 b += s2; 986 s -= s2; 987 988 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 989 if (pmem->area != area) 990 continue; 991 area_get_entry(pmem->area, pmem->pgidx, &pa, &a); 992 if (a & TEE_MATTR_VALID_BLOCK) 993 assert(pa == get_pmem_pa(pmem)); 994 else 995 pa = get_pmem_pa(pmem); 996 if (a == f) 997 continue; 998 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 999 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1000 if (!(flags & TEE_MATTR_UW)) 1001 tee_pager_save_page(pmem, a); 1002 1003 area_set_entry(pmem->area, pmem->pgidx, pa, f); 1004 /* 1005 * Make sure the table update is visible before 1006 * continuing. 1007 */ 1008 dsb_ishst(); 1009 1010 if (flags & TEE_MATTR_UX) { 1011 void *va = (void *)area_idx2va(pmem->area, 1012 pmem->pgidx); 1013 1014 cache_op_inner(DCACHE_AREA_CLEAN, va, 1015 SMALL_PAGE_SIZE); 1016 cache_op_inner(ICACHE_AREA_INVALIDATE, va, 1017 SMALL_PAGE_SIZE); 1018 } 1019 } 1020 1021 area->flags = f; 1022 area = TAILQ_NEXT(area, link); 1023 } 1024 1025 ret = true; 1026 out: 1027 pager_unlock(exceptions); 1028 return ret; 1029 } 1030 KEEP_PAGER(tee_pager_set_uta_area_attr); 1031 #endif /*CFG_PAGED_USER_TA*/ 1032 1033 static bool tee_pager_unhide_page(vaddr_t page_va) 1034 { 1035 struct tee_pager_pmem *pmem; 1036 1037 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1038 paddr_t pa; 1039 uint32_t attr; 1040 1041 if (pmem->pgidx == INVALID_PGIDX) 1042 continue; 1043 1044 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 1045 1046 if (!(attr & 1047 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 1048 continue; 1049 1050 if (area_va2idx(pmem->area, page_va) == pmem->pgidx) { 1051 uint32_t a = get_area_mattr(pmem->area->flags); 1052 1053 /* page is hidden, show and move to back */ 1054 if (pa != get_pmem_pa(pmem)) 1055 panic("unexpected pa"); 1056 1057 /* 1058 * If it's not a dirty block, then it should be 1059 * read only. 1060 */ 1061 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 1062 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 1063 else 1064 FMSG("Unhide %#" PRIxVA, page_va); 1065 1066 if (page_va == 0x8000a000) 1067 FMSG("unhide %#" PRIxVA " a %#" PRIX32, 1068 page_va, a); 1069 area_set_entry(pmem->area, pmem->pgidx, pa, a); 1070 /* 1071 * Note that TLB invalidation isn't needed since 1072 * there wasn't a valid mapping before. We should 1073 * use a barrier though, to make sure that the 1074 * change is visible. 1075 */ 1076 dsb_ishst(); 1077 1078 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1079 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1080 incr_hidden_hits(); 1081 return true; 1082 } 1083 } 1084 1085 return false; 1086 } 1087 1088 static void tee_pager_hide_pages(void) 1089 { 1090 struct tee_pager_pmem *pmem; 1091 size_t n = 0; 1092 1093 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1094 paddr_t pa; 1095 uint32_t attr; 1096 uint32_t a; 1097 1098 if (n >= TEE_PAGER_NHIDE) 1099 break; 1100 n++; 1101 1102 /* we cannot hide pages when pmem->area is not defined. */ 1103 if (!pmem->area) 1104 continue; 1105 1106 area_get_entry(pmem->area, pmem->pgidx, &pa, &attr); 1107 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1108 continue; 1109 1110 assert(pa == get_pmem_pa(pmem)); 1111 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 1112 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 1113 FMSG("Hide %#" PRIxVA, 1114 area_idx2va(pmem->area, pmem->pgidx)); 1115 } else 1116 a = TEE_MATTR_HIDDEN_BLOCK; 1117 1118 area_set_entry(pmem->area, pmem->pgidx, pa, a); 1119 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1120 } 1121 } 1122 1123 /* 1124 * Find mapped pmem, hide and move to pageble pmem. 1125 * Return false if page was not mapped, and true if page was mapped. 1126 */ 1127 static bool tee_pager_release_one_phys(struct tee_pager_area *area, 1128 vaddr_t page_va) 1129 { 1130 struct tee_pager_pmem *pmem; 1131 unsigned pgidx; 1132 paddr_t pa; 1133 uint32_t attr; 1134 1135 pgidx = area_va2idx(area, page_va); 1136 area_get_entry(area, pgidx, &pa, &attr); 1137 1138 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 1139 1140 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 1141 if (pmem->area != area || pmem->pgidx != pgidx) 1142 continue; 1143 1144 assert(pa == get_pmem_pa(pmem)); 1145 area_set_entry(area, pgidx, 0, 0); 1146 pgt_dec_used_entries(area->pgt); 1147 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 1148 pmem->area = NULL; 1149 pmem->pgidx = INVALID_PGIDX; 1150 tee_pager_npages++; 1151 set_npages(); 1152 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 1153 incr_zi_released(); 1154 return true; 1155 } 1156 1157 return false; 1158 } 1159 1160 /* Finds the oldest page and unmats it from its old virtual address */ 1161 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area) 1162 { 1163 struct tee_pager_pmem *pmem; 1164 1165 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 1166 if (!pmem) { 1167 EMSG("No pmem entries"); 1168 return NULL; 1169 } 1170 if (pmem->pgidx != INVALID_PGIDX) { 1171 uint32_t a; 1172 1173 assert(pmem->area && pmem->area->pgt); 1174 area_get_entry(pmem->area, pmem->pgidx, NULL, &a); 1175 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1176 pgt_dec_used_entries(pmem->area->pgt); 1177 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1178 tee_pager_save_page(pmem, a); 1179 } 1180 1181 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 1182 pmem->pgidx = INVALID_PGIDX; 1183 pmem->area = NULL; 1184 if (area->type == AREA_TYPE_LOCK) { 1185 /* Move page to lock list */ 1186 if (tee_pager_npages <= 0) 1187 panic("running out of page"); 1188 tee_pager_npages--; 1189 set_npages(); 1190 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 1191 } else { 1192 /* move page to back */ 1193 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1194 } 1195 1196 return pmem; 1197 } 1198 1199 static bool pager_update_permissions(struct tee_pager_area *area, 1200 struct abort_info *ai, bool *handled) 1201 { 1202 unsigned int pgidx = area_va2idx(area, ai->va); 1203 uint32_t attr; 1204 paddr_t pa; 1205 1206 *handled = false; 1207 1208 area_get_entry(area, pgidx, &pa, &attr); 1209 1210 /* Not mapped */ 1211 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1212 return false; 1213 1214 /* Not readable, should not happen */ 1215 if (abort_is_user_exception(ai)) { 1216 if (!(attr & TEE_MATTR_UR)) 1217 return true; 1218 } else { 1219 if (!(attr & TEE_MATTR_PR)) { 1220 abort_print_error(ai); 1221 panic(); 1222 } 1223 } 1224 1225 switch (core_mmu_get_fault_type(ai->fault_descr)) { 1226 case CORE_MMU_FAULT_TRANSLATION: 1227 case CORE_MMU_FAULT_READ_PERMISSION: 1228 if (ai->abort_type == ABORT_TYPE_PREFETCH) { 1229 /* Check attempting to execute from an NOX page */ 1230 if (abort_is_user_exception(ai)) { 1231 if (!(attr & TEE_MATTR_UX)) 1232 return true; 1233 } else { 1234 if (!(attr & TEE_MATTR_PX)) { 1235 abort_print_error(ai); 1236 panic(); 1237 } 1238 } 1239 } 1240 /* Since the page is mapped now it's OK */ 1241 break; 1242 case CORE_MMU_FAULT_WRITE_PERMISSION: 1243 /* Check attempting to write to an RO page */ 1244 if (abort_is_user_exception(ai)) { 1245 if (!(area->flags & TEE_MATTR_UW)) 1246 return true; 1247 if (!(attr & TEE_MATTR_UW)) { 1248 FMSG("Dirty %p", 1249 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1250 area_set_entry(area, pgidx, pa, 1251 get_area_mattr(area->flags)); 1252 tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK); 1253 } 1254 1255 } else { 1256 if (!(area->flags & TEE_MATTR_PW)) { 1257 abort_print_error(ai); 1258 panic(); 1259 } 1260 if (!(attr & TEE_MATTR_PW)) { 1261 FMSG("Dirty %p", 1262 (void *)(ai->va & ~SMALL_PAGE_MASK)); 1263 area_set_entry(area, pgidx, pa, 1264 get_area_mattr(area->flags)); 1265 tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK); 1266 } 1267 } 1268 /* Since permissions has been updated now it's OK */ 1269 break; 1270 default: 1271 /* Some fault we can't deal with */ 1272 if (abort_is_user_exception(ai)) 1273 return true; 1274 abort_print_error(ai); 1275 panic(); 1276 } 1277 *handled = true; 1278 return true; 1279 } 1280 1281 #ifdef CFG_TEE_CORE_DEBUG 1282 static void stat_handle_fault(void) 1283 { 1284 static size_t num_faults; 1285 static size_t min_npages = SIZE_MAX; 1286 static size_t total_min_npages = SIZE_MAX; 1287 1288 num_faults++; 1289 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 1290 DMSG("nfaults %zu npages %zu (min %zu)", 1291 num_faults, tee_pager_npages, min_npages); 1292 min_npages = tee_pager_npages; /* reset */ 1293 } 1294 if (tee_pager_npages < min_npages) 1295 min_npages = tee_pager_npages; 1296 if (tee_pager_npages < total_min_npages) 1297 total_min_npages = tee_pager_npages; 1298 } 1299 #else 1300 static void stat_handle_fault(void) 1301 { 1302 } 1303 #endif 1304 1305 bool tee_pager_handle_fault(struct abort_info *ai) 1306 { 1307 struct tee_pager_area *area; 1308 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 1309 uint32_t exceptions; 1310 bool ret; 1311 1312 #ifdef TEE_PAGER_DEBUG_PRINT 1313 abort_print(ai); 1314 #endif 1315 1316 /* 1317 * We're updating pages that can affect several active CPUs at a 1318 * time below. We end up here because a thread tries to access some 1319 * memory that isn't available. We have to be careful when making 1320 * that memory available as other threads may succeed in accessing 1321 * that address the moment after we've made it available. 1322 * 1323 * That means that we can't just map the memory and populate the 1324 * page, instead we use the aliased mapping to populate the page 1325 * and once everything is ready we map it. 1326 */ 1327 exceptions = pager_lock(ai); 1328 1329 stat_handle_fault(); 1330 1331 /* check if the access is valid */ 1332 if (abort_is_user_exception(ai)) { 1333 area = find_uta_area(ai->va); 1334 1335 } else { 1336 area = find_area(&tee_pager_area_head, ai->va); 1337 if (!area) 1338 area = find_uta_area(ai->va); 1339 } 1340 if (!area || !area->pgt) { 1341 ret = false; 1342 goto out; 1343 } 1344 1345 if (!tee_pager_unhide_page(page_va)) { 1346 struct tee_pager_pmem *pmem = NULL; 1347 uint32_t attr; 1348 paddr_t pa; 1349 1350 /* 1351 * The page wasn't hidden, but some other core may have 1352 * updated the table entry before we got here or we need 1353 * to make a read-only page read-write (dirty). 1354 */ 1355 if (pager_update_permissions(area, ai, &ret)) { 1356 /* 1357 * Nothing more to do with the abort. The problem 1358 * could already have been dealt with from another 1359 * core or if ret is false the TA will be paniced. 1360 */ 1361 goto out; 1362 } 1363 1364 pmem = tee_pager_get_page(area); 1365 if (!pmem) { 1366 abort_print(ai); 1367 panic(); 1368 } 1369 1370 /* load page code & data */ 1371 tee_pager_load_page(area, page_va, pmem->va_alias); 1372 1373 1374 pmem->area = area; 1375 pmem->pgidx = area_va2idx(area, ai->va); 1376 attr = get_area_mattr(area->flags) & 1377 ~(TEE_MATTR_PW | TEE_MATTR_UW); 1378 pa = get_pmem_pa(pmem); 1379 1380 /* 1381 * We've updated the page using the aliased mapping and 1382 * some cache maintenence is now needed if it's an 1383 * executable page. 1384 * 1385 * Since the d-cache is a Physically-indexed, 1386 * physically-tagged (PIPT) cache we can clean either the 1387 * aliased address or the real virtual address. In this 1388 * case we choose the real virtual address. 1389 * 1390 * The i-cache can also be PIPT, but may be something else 1391 * too like VIPT. The current code requires the caches to 1392 * implement the IVIPT extension, that is: 1393 * "instruction cache maintenance is required only after 1394 * writing new data to a physical address that holds an 1395 * instruction." 1396 * 1397 * To portably invalidate the icache the page has to 1398 * be mapped at the final virtual address but not 1399 * executable. 1400 */ 1401 if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) { 1402 uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX | 1403 TEE_MATTR_PW | TEE_MATTR_UW; 1404 1405 /* Set a temporary read-only mapping */ 1406 area_set_entry(pmem->area, pmem->pgidx, pa, 1407 attr & ~mask); 1408 tlbi_mva_allasid(page_va); 1409 1410 /* 1411 * Doing these operations to LoUIS (Level of 1412 * unification, Inner Shareable) would be enough 1413 */ 1414 cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va, 1415 SMALL_PAGE_SIZE); 1416 cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va, 1417 SMALL_PAGE_SIZE); 1418 1419 /* Set the final mapping */ 1420 area_set_entry(area, pmem->pgidx, pa, attr); 1421 tlbi_mva_allasid(page_va); 1422 } else { 1423 area_set_entry(area, pmem->pgidx, pa, attr); 1424 /* 1425 * No need to flush TLB for this entry, it was 1426 * invalid. We should use a barrier though, to make 1427 * sure that the change is visible. 1428 */ 1429 dsb_ishst(); 1430 } 1431 pgt_inc_used_entries(area->pgt); 1432 1433 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa); 1434 1435 } 1436 1437 tee_pager_hide_pages(); 1438 ret = true; 1439 out: 1440 pager_unlock(exceptions); 1441 return ret; 1442 } 1443 1444 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 1445 { 1446 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 1447 size_t n; 1448 1449 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 1450 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 1451 1452 /* setup memory */ 1453 for (n = 0; n < npages; n++) { 1454 struct tee_pager_pmem *pmem; 1455 vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 1456 unsigned pgidx = core_mmu_va2idx(ti, va); 1457 paddr_t pa; 1458 uint32_t attr; 1459 1460 /* 1461 * Note that we can only support adding pages in the 1462 * valid range of this table info, currently not a problem. 1463 */ 1464 core_mmu_get_entry(ti, pgidx, &pa, &attr); 1465 1466 /* Ignore unmapped pages/blocks */ 1467 if (!(attr & TEE_MATTR_VALID_BLOCK)) 1468 continue; 1469 1470 pmem = malloc(sizeof(struct tee_pager_pmem)); 1471 if (!pmem) 1472 panic("out of mem"); 1473 1474 pmem->va_alias = pager_add_alias_page(pa); 1475 1476 if (unmap) { 1477 pmem->area = NULL; 1478 pmem->pgidx = INVALID_PGIDX; 1479 core_mmu_set_entry(ti, pgidx, 0, 0); 1480 pgt_dec_used_entries(&pager_core_pgt); 1481 } else { 1482 /* 1483 * The page is still mapped, let's assign the area 1484 * and update the protection bits accordingly. 1485 */ 1486 pmem->area = find_area(&tee_pager_area_head, va); 1487 assert(pmem->area->pgt == &pager_core_pgt); 1488 pmem->pgidx = pgidx; 1489 assert(pa == get_pmem_pa(pmem)); 1490 area_set_entry(pmem->area, pgidx, pa, 1491 get_area_mattr(pmem->area->flags)); 1492 } 1493 1494 tee_pager_npages++; 1495 incr_npages_all(); 1496 set_npages(); 1497 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 1498 } 1499 1500 /* 1501 * As this is done at inits, invalidate all TLBs once instead of 1502 * targeting only the modified entries. 1503 */ 1504 tlbi_all(); 1505 } 1506 1507 #ifdef CFG_PAGED_USER_TA 1508 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va) 1509 { 1510 struct pgt *p = pgt; 1511 1512 while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase) 1513 p = SLIST_NEXT(p, link); 1514 return p; 1515 } 1516 1517 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc) 1518 { 1519 struct tee_pager_area *area; 1520 struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache); 1521 1522 TAILQ_FOREACH(area, utc->areas, link) { 1523 if (!area->pgt) 1524 area->pgt = find_pgt(pgt, area->base); 1525 else 1526 assert(area->pgt == find_pgt(pgt, area->base)); 1527 if (!area->pgt) 1528 panic(); 1529 } 1530 } 1531 1532 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem) 1533 { 1534 uint32_t attr; 1535 1536 assert(pmem->area && pmem->area->pgt); 1537 1538 area_get_entry(pmem->area, pmem->pgidx, NULL, &attr); 1539 area_set_entry(pmem->area, pmem->pgidx, 0, 0); 1540 tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx)); 1541 tee_pager_save_page(pmem, attr); 1542 assert(pmem->area->pgt->num_used_entries); 1543 pmem->area->pgt->num_used_entries--; 1544 pmem->pgidx = INVALID_PGIDX; 1545 pmem->area = NULL; 1546 } 1547 1548 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt) 1549 { 1550 struct tee_pager_pmem *pmem; 1551 struct tee_pager_area *area; 1552 uint32_t exceptions = pager_lock_check_stack(2048); 1553 1554 if (!pgt->num_used_entries) 1555 goto out; 1556 1557 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 1558 if (!pmem->area || pmem->pgidx == INVALID_PGIDX) 1559 continue; 1560 if (pmem->area->pgt == pgt) 1561 pager_save_and_release_entry(pmem); 1562 } 1563 assert(!pgt->num_used_entries); 1564 1565 out: 1566 if (is_user_ta_ctx(pgt->ctx)) { 1567 TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) { 1568 if (area->pgt == pgt) 1569 area->pgt = NULL; 1570 } 1571 } 1572 1573 pager_unlock(exceptions); 1574 } 1575 KEEP_PAGER(tee_pager_pgt_save_and_release_entries); 1576 #endif /*CFG_PAGED_USER_TA*/ 1577 1578 void tee_pager_release_phys(void *addr, size_t size) 1579 { 1580 bool unmaped = false; 1581 vaddr_t va = (vaddr_t)addr; 1582 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 1583 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 1584 struct tee_pager_area *area; 1585 uint32_t exceptions; 1586 1587 if (end <= begin) 1588 return; 1589 1590 area = find_area(&tee_pager_area_head, begin); 1591 if (!area || 1592 area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE)) 1593 panic(); 1594 1595 exceptions = pager_lock_check_stack(128); 1596 1597 for (va = begin; va < end; va += SMALL_PAGE_SIZE) 1598 unmaped |= tee_pager_release_one_phys(area, va); 1599 1600 if (unmaped) 1601 tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE); 1602 1603 pager_unlock(exceptions); 1604 } 1605 KEEP_PAGER(tee_pager_release_phys); 1606 1607 void *tee_pager_alloc(size_t size, uint32_t flags) 1608 { 1609 tee_mm_entry_t *mm; 1610 uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED); 1611 1612 if (!size) 1613 return NULL; 1614 1615 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 1616 if (!mm) 1617 return NULL; 1618 1619 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 1620 f, NULL, NULL); 1621 1622 return (void *)tee_mm_get_smem(mm); 1623 } 1624