1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <assert.h> 30 #include <keep.h> 31 #include <sys/queue.h> 32 #include <kernel/abort.h> 33 #include <kernel/panic.h> 34 #include <kernel/tee_misc.h> 35 #include <kernel/tee_ta_manager.h> 36 #include <kernel/thread.h> 37 #include <kernel/tz_proc.h> 38 #include <mm/core_memprot.h> 39 #include <mm/tee_mm.h> 40 #include <mm/tee_mmu_defs.h> 41 #include <mm/tee_pager.h> 42 #include <types_ext.h> 43 #include <stdlib.h> 44 #include <tee_api_defines.h> 45 #include <tee/tee_cryp_provider.h> 46 #include <trace.h> 47 #include <utee_defines.h> 48 #include <util.h> 49 50 #include "pager_private.h" 51 52 #define PAGER_AE_KEY_BITS 256 53 54 struct pager_rw_pstate { 55 uint64_t iv; 56 uint8_t tag[PAGER_AES_GCM_TAG_LEN]; 57 }; 58 59 struct tee_pager_area { 60 union { 61 const uint8_t *hashes; 62 struct pager_rw_pstate *rwp; 63 } u; 64 uint8_t *store; 65 uint32_t flags; 66 vaddr_t base; 67 size_t size; 68 TAILQ_ENTRY(tee_pager_area) link; 69 }; 70 71 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head = 72 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 73 74 #define INVALID_PGIDX UINT_MAX 75 76 /* 77 * struct tee_pager_pmem - Represents a physical page used for paging. 78 * 79 * @pgidx an index of the entry in tee_pager_tbl_info. 80 * @va_alias Virtual address where the physical page always is aliased. 81 * Used during remapping of the page when the content need to 82 * be updated before it's available at the new location. 83 * @area a pointer to the pager area 84 */ 85 struct tee_pager_pmem { 86 unsigned pgidx; 87 void *va_alias; 88 struct tee_pager_area *area; 89 TAILQ_ENTRY(tee_pager_pmem) link; 90 }; 91 92 /* The list of physical pages. The first page in the list is the oldest */ 93 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 94 95 static struct tee_pager_pmem_head tee_pager_pmem_head = 96 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 97 98 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 99 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 100 101 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; 102 103 /* number of pages hidden */ 104 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 105 106 /* Number of registered physical pages, used hiding pages. */ 107 static size_t tee_pager_npages; 108 109 #ifdef CFG_WITH_STATS 110 static struct tee_pager_stats pager_stats; 111 112 static inline void incr_ro_hits(void) 113 { 114 pager_stats.ro_hits++; 115 } 116 117 static inline void incr_rw_hits(void) 118 { 119 pager_stats.rw_hits++; 120 } 121 122 static inline void incr_hidden_hits(void) 123 { 124 pager_stats.hidden_hits++; 125 } 126 127 static inline void incr_zi_released(void) 128 { 129 pager_stats.zi_released++; 130 } 131 132 static inline void incr_npages_all(void) 133 { 134 pager_stats.npages_all++; 135 } 136 137 static inline void set_npages(void) 138 { 139 pager_stats.npages = tee_pager_npages; 140 } 141 142 void tee_pager_get_stats(struct tee_pager_stats *stats) 143 { 144 *stats = pager_stats; 145 146 pager_stats.hidden_hits = 0; 147 pager_stats.ro_hits = 0; 148 pager_stats.rw_hits = 0; 149 pager_stats.zi_released = 0; 150 } 151 152 #else /* CFG_WITH_STATS */ 153 static inline void incr_ro_hits(void) { } 154 static inline void incr_rw_hits(void) { } 155 static inline void incr_hidden_hits(void) { } 156 static inline void incr_zi_released(void) { } 157 static inline void incr_npages_all(void) { } 158 static inline void set_npages(void) { } 159 160 void tee_pager_get_stats(struct tee_pager_stats *stats) 161 { 162 memset(stats, 0, sizeof(struct tee_pager_stats)); 163 } 164 #endif /* CFG_WITH_STATS */ 165 166 struct core_mmu_table_info tee_pager_tbl_info; 167 static struct core_mmu_table_info pager_alias_tbl_info; 168 169 static unsigned pager_lock = SPINLOCK_UNLOCK; 170 171 /* Defines the range of the alias area */ 172 static tee_mm_entry_t *pager_alias_area; 173 /* 174 * Physical pages are added in a stack like fashion to the alias area, 175 * @pager_alias_next_free gives the address of next free entry if 176 * @pager_alias_next_free is != 0 177 */ 178 static uintptr_t pager_alias_next_free; 179 180 static void set_alias_area(tee_mm_entry_t *mm) 181 { 182 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 183 size_t tbl_va_size; 184 unsigned idx; 185 unsigned last_idx; 186 vaddr_t smem = tee_mm_get_smem(mm); 187 size_t nbytes = tee_mm_get_bytes(mm); 188 189 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 190 191 if (pager_alias_area) 192 panic(); 193 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) 194 panic(); 195 196 if ((1 << ti->shift) != SMALL_PAGE_SIZE) 197 panic(); 198 199 tbl_va_size = (1 << ti->shift) * ti->num_entries; 200 if (!core_is_buffer_inside(smem, nbytes, 201 ti->va_base, tbl_va_size)) { 202 DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 203 smem, nbytes, ti->va_base, tbl_va_size); 204 panic(); 205 } 206 207 if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK) 208 panic(); 209 210 pager_alias_area = mm; 211 pager_alias_next_free = smem; 212 213 /* Clear all mapping in the alias area */ 214 idx = core_mmu_va2idx(ti, smem); 215 last_idx = core_mmu_va2idx(ti, smem + nbytes); 216 for (; idx < last_idx; idx++) 217 core_mmu_set_entry(ti, idx, 0, 0); 218 219 /* TODO only invalidate entries touched above */ 220 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 221 } 222 223 static void generate_ae_key(void) 224 { 225 if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS) 226 panic(); 227 } 228 229 void tee_pager_init(tee_mm_entry_t *mm_alias) 230 { 231 set_alias_area(mm_alias); 232 generate_ae_key(); 233 } 234 235 static void *pager_add_alias_page(paddr_t pa) 236 { 237 unsigned idx; 238 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 239 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 240 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 241 TEE_MATTR_SECURE | TEE_MATTR_PRW; 242 243 DMSG("0x%" PRIxPA, pa); 244 245 if (!pager_alias_next_free || !ti->num_entries) 246 panic(); 247 idx = core_mmu_va2idx(ti, pager_alias_next_free); 248 core_mmu_set_entry(ti, idx, pa, attr); 249 pager_alias_next_free += SMALL_PAGE_SIZE; 250 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 251 tee_mm_get_bytes(pager_alias_area))) 252 pager_alias_next_free = 0; 253 return (void *)core_mmu_idx2va(ti, idx); 254 } 255 256 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size, 257 uint32_t flags, const void *store, const void *hashes) 258 { 259 struct tee_pager_area *area = calloc(1, sizeof(*area)); 260 tee_mm_entry_t *mm_store = NULL; 261 262 if (!area) 263 return NULL; 264 265 if (flags & TEE_MATTR_PW) { 266 if (flags & TEE_MATTR_LOCKED) 267 goto out; 268 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size); 269 if (!mm_store) 270 goto bad; 271 area->store = phys_to_virt(tee_mm_get_smem(mm_store), 272 MEM_AREA_TA_RAM); 273 if (!area->store) 274 goto bad; 275 area->u.rwp = calloc(size / SMALL_PAGE_SIZE, 276 sizeof(struct pager_rw_pstate)); 277 if (!area->u.rwp) 278 goto bad; 279 } else { 280 area->store = (void *)store; 281 area->u.hashes = hashes; 282 } 283 out: 284 area->base = base; 285 area->size = size; 286 area->flags = flags; 287 return area; 288 bad: 289 tee_mm_free(mm_store); 290 free(area->u.rwp); 291 free(area); 292 return NULL; 293 } 294 295 static void area_insert_tail(struct tee_pager_area *area) 296 { 297 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 298 299 cpu_spin_lock(&pager_lock); 300 301 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 302 303 cpu_spin_unlock(&pager_lock); 304 thread_set_exceptions(exceptions); 305 } 306 KEEP_PAGER(area_insert_tail); 307 308 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags, 309 const void *store, const void *hashes) 310 { 311 struct tee_pager_area *area; 312 size_t tbl_va_size; 313 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 314 315 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 316 base, base + size, flags, store, hashes); 317 318 if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) { 319 EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size); 320 panic(); 321 } 322 323 if (!(flags & TEE_MATTR_PW) && (!store || !hashes)) 324 panic(); 325 326 if ((flags & TEE_MATTR_PW) && (store || hashes)) 327 panic(); 328 329 tbl_va_size = (1 << ti->shift) * ti->num_entries; 330 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) { 331 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 332 base, size, ti->va_base, tbl_va_size); 333 return false; 334 } 335 336 area = alloc_area(base, size, flags, store, hashes); 337 if (!area) 338 return false; 339 340 area_insert_tail(area); 341 return true; 342 } 343 344 static struct tee_pager_area *tee_pager_find_area(vaddr_t va) 345 { 346 struct tee_pager_area *area; 347 348 TAILQ_FOREACH(area, &tee_pager_area_head, link) { 349 if (core_is_buffer_inside(va, 1, area->base, area->size)) 350 return area; 351 } 352 return NULL; 353 } 354 355 static uint32_t get_area_mattr(struct tee_pager_area *area) 356 { 357 return TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 358 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 359 TEE_MATTR_SECURE | TEE_MATTR_PR | 360 (area->flags & TEE_MATTR_PRWX); 361 } 362 363 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 364 { 365 paddr_t pa; 366 unsigned idx; 367 368 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias); 369 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL); 370 return pa; 371 } 372 373 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, 374 void *dst) 375 { 376 struct pager_aes_gcm_iv iv = { 377 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } 378 }; 379 380 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), 381 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); 382 } 383 384 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) 385 { 386 struct pager_aes_gcm_iv iv; 387 388 assert((rwp->iv + 1) > rwp->iv); 389 rwp->iv++; 390 /* 391 * IV is constructed as recommended in section "8.2.1 Deterministic 392 * Construction" of "Recommendation for Block Cipher Modes of 393 * Operation: Galois/Counter Mode (GCM) and GMAC", 394 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 395 */ 396 iv.iv[0] = (vaddr_t)rwp; 397 iv.iv[1] = rwp->iv >> 32; 398 iv.iv[2] = rwp->iv; 399 400 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), 401 &iv, rwp->tag, 402 src, dst, SMALL_PAGE_SIZE)) 403 panic(); 404 } 405 406 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 407 void *va_alias) 408 { 409 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT; 410 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE; 411 412 if (!(area->flags & TEE_MATTR_PW)) { 413 const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE; 414 415 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE); 416 incr_ro_hits(); 417 418 if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) != 419 TEE_SUCCESS) { 420 EMSG("PH 0x%" PRIxVA " failed", page_va); 421 panic(); 422 } 423 } else if (area->flags & TEE_MATTR_LOCKED) { 424 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va); 425 memset(va_alias, 0, SMALL_PAGE_SIZE); 426 } else { 427 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64, 428 va_alias, page_va, area->u.rwp[idx].iv); 429 if (!area->u.rwp[idx].iv) 430 memset(va_alias, 0, SMALL_PAGE_SIZE); 431 else if (!decrypt_page(&area->u.rwp[idx], stored_page, 432 va_alias)) { 433 EMSG("PH 0x%" PRIxVA " failed", page_va); 434 panic(); 435 } 436 incr_rw_hits(); 437 } 438 } 439 440 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 441 { 442 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 443 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 444 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 445 446 assert(!(pmem->area->flags & TEE_MATTR_LOCKED)); 447 448 if (attr & dirty_bits) { 449 size_t idx = pmem->pgidx - core_mmu_va2idx(ti, 450 pmem->area->base); 451 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE; 452 453 assert(pmem->area->flags & TEE_MATTR_PW); 454 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias, 455 stored_page); 456 FMSG("Saved %#" PRIxVA " iv %#" PRIx64, 457 core_mmu_idx2va(ti, pmem->pgidx), 458 pmem->area->u.rwp[idx].iv); 459 } 460 } 461 462 static bool tee_pager_unhide_page(vaddr_t page_va) 463 { 464 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 465 struct tee_pager_pmem *pmem; 466 467 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 468 paddr_t pa; 469 uint32_t attr; 470 471 if (pmem->pgidx == INVALID_PGIDX) 472 continue; 473 474 core_mmu_get_entry(ti, pmem->pgidx, 475 &pa, &attr); 476 477 if (!(attr & 478 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 479 continue; 480 481 if (core_mmu_va2idx(ti, page_va) == pmem->pgidx) { 482 uint32_t a = get_area_mattr(pmem->area); 483 484 /* page is hidden, show and move to back */ 485 if (pa != get_pmem_pa(pmem)) 486 panic(); 487 /* 488 * If it's not a dirty block, then it should be 489 * read only. 490 */ 491 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 492 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 493 else 494 FMSG("Unhide %#" PRIxVA, page_va); 495 core_mmu_set_entry(ti, pmem->pgidx, pa, a); 496 497 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 498 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 499 500 /* TODO only invalidate entry touched above */ 501 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 502 503 incr_hidden_hits(); 504 return true; 505 } 506 } 507 508 return false; 509 } 510 511 static void tee_pager_hide_pages(void) 512 { 513 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 514 struct tee_pager_pmem *pmem; 515 size_t n = 0; 516 517 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 518 paddr_t pa; 519 uint32_t attr; 520 uint32_t a; 521 522 if (n >= TEE_PAGER_NHIDE) 523 break; 524 n++; 525 526 /* 527 * we cannot hide pages when pmem->area is not defined as 528 * unhide requires pmem->area to be defined 529 */ 530 if (!pmem->area) 531 continue; 532 533 core_mmu_get_entry(ti, pmem->pgidx, &pa, &attr); 534 if (!(attr & TEE_MATTR_VALID_BLOCK)) 535 continue; 536 537 assert(pa == get_pmem_pa(pmem)); 538 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 539 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 540 FMSG("Hide %#" PRIxVA, 541 ti->va_base + pmem->pgidx * SMALL_PAGE_SIZE); 542 } else 543 a = TEE_MATTR_HIDDEN_BLOCK; 544 core_mmu_set_entry(ti, pmem->pgidx, pa, a); 545 546 } 547 548 /* TODO only invalidate entries touched above */ 549 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 550 } 551 552 /* 553 * Find mapped pmem, hide and move to pageble pmem. 554 * Return false if page was not mapped, and true if page was mapped. 555 */ 556 static bool tee_pager_release_one_phys(vaddr_t page_va) 557 { 558 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 559 struct tee_pager_pmem *pmem; 560 unsigned pgidx; 561 paddr_t pa; 562 uint32_t attr; 563 564 pgidx = core_mmu_va2idx(ti, page_va); 565 core_mmu_get_entry(ti, pgidx, &pa, &attr); 566 567 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 568 569 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 570 if (pmem->pgidx != pgidx) 571 continue; 572 573 assert(pa == get_pmem_pa(pmem)); 574 core_mmu_set_entry(ti, pgidx, 0, 0); 575 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 576 pmem->area = NULL; 577 pmem->pgidx = INVALID_PGIDX; 578 tee_pager_npages++; 579 set_npages(); 580 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 581 incr_zi_released(); 582 return true; 583 } 584 585 return false; 586 } 587 588 /* Finds the oldest page and unmats it from its old virtual address */ 589 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags) 590 { 591 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 592 struct tee_pager_pmem *pmem; 593 594 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 595 if (!pmem) { 596 EMSG("No pmem entries"); 597 return NULL; 598 } 599 if (pmem->pgidx != INVALID_PGIDX) { 600 uint32_t a; 601 602 core_mmu_get_entry(ti, pmem->pgidx, NULL, &a); 603 core_mmu_set_entry(ti, pmem->pgidx, 0, 0); 604 /* TODO only invalidate entries touched above */ 605 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 606 tee_pager_save_page(pmem, a); 607 } 608 609 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 610 pmem->pgidx = INVALID_PGIDX; 611 pmem->area = NULL; 612 if (next_area_flags & TEE_MATTR_LOCKED) { 613 /* Move page to lock list */ 614 if (tee_pager_npages <= 0) 615 panic(); 616 tee_pager_npages--; 617 set_npages(); 618 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 619 } else { 620 /* move page to back */ 621 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 622 } 623 624 return pmem; 625 } 626 627 static bool pager_update_permissions(struct tee_pager_area *area, 628 struct abort_info *ai) 629 { 630 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 631 unsigned pgidx = core_mmu_va2idx(ti, ai->va); 632 uint32_t attr; 633 paddr_t pa; 634 635 core_mmu_get_entry(ti, pgidx, &pa, &attr); 636 637 /* Not mapped */ 638 if (!(attr & TEE_MATTR_VALID_BLOCK)) 639 return false; 640 641 /* Not readable, should not happen */ 642 if (!(attr & TEE_MATTR_PR)) { 643 abort_print_error(ai); 644 panic(); 645 } 646 647 switch (core_mmu_get_fault_type(ai->fault_descr)) { 648 case CORE_MMU_FAULT_TRANSLATION: 649 case CORE_MMU_FAULT_READ_PERMISSION: 650 if (ai->abort_type == ABORT_TYPE_PREFETCH && 651 !(attr & TEE_MATTR_PX)) { 652 /* Attempting to execute from an NOX page */ 653 abort_print_error(ai); 654 panic(); 655 } 656 /* Since the page is mapped now it's OK */ 657 return true; 658 case CORE_MMU_FAULT_WRITE_PERMISSION: 659 if (!(area->flags & TEE_MATTR_PW)) { 660 /* Attempting to write to an RO page */ 661 abort_print_error(ai); 662 panic(); 663 } 664 if (!(attr & TEE_MATTR_PW)) { 665 FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK)); 666 core_mmu_set_entry(ti, pgidx, pa, attr | TEE_MATTR_PW); 667 /* TODO only invalidate entry above */ 668 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 669 } 670 /* Since permissions has been updated now it's OK */ 671 return true; 672 default: 673 /* Some fault we can't deal with */ 674 abort_print_error(ai); 675 panic(); 676 } 677 678 } 679 680 #ifdef CFG_TEE_CORE_DEBUG 681 static void stat_handle_fault(void) 682 { 683 static size_t num_faults; 684 static size_t min_npages = SIZE_MAX; 685 static size_t total_min_npages = SIZE_MAX; 686 687 num_faults++; 688 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 689 DMSG("nfaults %zu npages %zu (min %zu)", 690 num_faults, tee_pager_npages, min_npages); 691 min_npages = tee_pager_npages; /* reset */ 692 } 693 if (tee_pager_npages < min_npages) 694 min_npages = tee_pager_npages; 695 if (tee_pager_npages < total_min_npages) 696 total_min_npages = tee_pager_npages; 697 } 698 #else 699 static void stat_handle_fault(void) 700 { 701 } 702 #endif 703 704 bool tee_pager_handle_fault(struct abort_info *ai) 705 { 706 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 707 struct tee_pager_area *area; 708 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 709 uint32_t exceptions; 710 bool ret; 711 712 #ifdef TEE_PAGER_DEBUG_PRINT 713 abort_print(ai); 714 #endif 715 716 /* 717 * We're updating pages that can affect several active CPUs at a 718 * time below. We end up here because a thread tries to access some 719 * memory that isn't available. We have to be careful when making 720 * that memory available as other threads may succeed in accessing 721 * that address the moment after we've made it available. 722 * 723 * That means that we can't just map the memory and populate the 724 * page, instead we use the aliased mapping to populate the page 725 * and once everything is ready we map it. 726 */ 727 exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 728 cpu_spin_lock(&pager_lock); 729 730 stat_handle_fault(); 731 732 /* check if the access is valid */ 733 area = tee_pager_find_area(ai->va); 734 if (!area) { 735 EMSG("Invalid addr 0x%" PRIxVA, ai->va); 736 ret = false; 737 goto out; 738 } 739 740 if (!tee_pager_unhide_page(page_va)) { 741 struct tee_pager_pmem *pmem = NULL; 742 uint32_t attr; 743 744 /* 745 * The page wasn't hidden, but some other core may have 746 * updated the table entry before we got here or we need 747 * to make a read-only page read-write (dirty). 748 */ 749 if (pager_update_permissions(area, ai)) { 750 /* 751 * Kind of access is OK with the mapping, we're 752 * done here because the fault has already been 753 * dealt with by another core. 754 */ 755 ret = true; 756 goto out; 757 } 758 759 pmem = tee_pager_get_page(area->flags); 760 if (!pmem) { 761 abort_print(ai); 762 panic(); 763 } 764 765 /* load page code & data */ 766 tee_pager_load_page(area, page_va, pmem->va_alias); 767 768 /* 769 * We've updated the page using the aliased mapping and 770 * some cache maintenence is now needed if it's an 771 * executable page. 772 * 773 * Since the d-cache is a Physically-indexed, 774 * physically-tagged (PIPT) cache we can clean the aliased 775 * address instead of the real virtual address. 776 * 777 * The i-cache can also be PIPT, but may be something else 778 * to, to keep it simple we invalidate the entire i-cache. 779 * As a future optimization we may invalidate only the 780 * aliased area if it a PIPT cache else the entire cache. 781 */ 782 if (area->flags & TEE_MATTR_PX) { 783 /* 784 * Doing these operations to LoUIS (Level of 785 * unification, Inner Shareable) would be enough 786 */ 787 cache_maintenance_l1(DCACHE_AREA_CLEAN, 788 pmem->va_alias, SMALL_PAGE_SIZE); 789 790 cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0); 791 } 792 793 pmem->area = area; 794 pmem->pgidx = core_mmu_va2idx(ti, ai->va); 795 attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW); 796 core_mmu_set_entry(ti, pmem->pgidx, get_pmem_pa(pmem), attr); 797 798 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, 799 core_mmu_idx2va(ti, pmem->pgidx), get_pmem_pa(pmem)); 800 801 } 802 803 tee_pager_hide_pages(); 804 ret = true; 805 out: 806 cpu_spin_unlock(&pager_lock); 807 thread_unmask_exceptions(exceptions); 808 return ret; 809 } 810 811 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 812 { 813 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 814 size_t n; 815 816 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 817 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 818 819 /* setup memory */ 820 for (n = 0; n < npages; n++) { 821 struct tee_pager_pmem *pmem; 822 tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 823 unsigned pgidx = core_mmu_va2idx(ti, va); 824 paddr_t pa; 825 uint32_t attr; 826 827 core_mmu_get_entry(ti, pgidx, &pa, &attr); 828 829 /* Ignore unmapped pages/blocks */ 830 if (!(attr & TEE_MATTR_VALID_BLOCK)) 831 continue; 832 833 pmem = malloc(sizeof(struct tee_pager_pmem)); 834 if (!pmem) 835 panic(); 836 837 pmem->va_alias = pager_add_alias_page(pa); 838 839 if (unmap) { 840 pmem->area = NULL; 841 pmem->pgidx = INVALID_PGIDX; 842 core_mmu_set_entry(ti, pgidx, 0, 0); 843 } else { 844 /* 845 * The page is still mapped, let's assign the area 846 * and update the protection bits accordingly. 847 */ 848 pmem->area = tee_pager_find_area(va); 849 pmem->pgidx = pgidx; 850 assert(pa == get_pmem_pa(pmem)); 851 core_mmu_set_entry(ti, pgidx, pa, 852 get_area_mattr(pmem->area)); 853 } 854 855 tee_pager_npages++; 856 incr_npages_all(); 857 set_npages(); 858 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 859 } 860 861 /* Invalidate secure TLB */ 862 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 863 } 864 865 void tee_pager_release_phys(void *addr, size_t size) 866 { 867 bool unmaped = false; 868 vaddr_t va = (vaddr_t)addr; 869 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 870 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 871 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 872 873 cpu_spin_lock(&pager_lock); 874 875 for (va = begin; va < end; va += SMALL_PAGE_SIZE) 876 unmaped |= tee_pager_release_one_phys(va); 877 878 /* Invalidate secure TLB */ 879 if (unmaped) 880 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 881 882 cpu_spin_unlock(&pager_lock); 883 thread_set_exceptions(exceptions); 884 } 885 KEEP_PAGER(tee_pager_release_phys); 886 887 void *tee_pager_alloc(size_t size, uint32_t flags) 888 { 889 tee_mm_entry_t *mm; 890 uint32_t f = TEE_MATTR_PRW | (flags & TEE_MATTR_LOCKED); 891 892 if (!size) 893 return NULL; 894 895 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 896 if (!mm) 897 return NULL; 898 899 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 900 f, NULL, NULL); 901 902 return (void *)tee_mm_get_smem(mm); 903 } 904