1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/queue.h> 30 #include <kernel/abort.h> 31 #include <kernel/panic.h> 32 #include <kernel/tee_misc.h> 33 #include <kernel/tee_ta_manager.h> 34 #include <kernel/thread.h> 35 #include <kernel/tz_proc.h> 36 #include <mm/core_memprot.h> 37 #include <mm/tee_mm.h> 38 #include <mm/tee_mmu_defs.h> 39 #include <mm/tee_pager.h> 40 #include <types_ext.h> 41 #include <stdlib.h> 42 #include <tee_api_defines.h> 43 #include <tee/tee_cryp_provider.h> 44 #include <trace.h> 45 #include <utee_defines.h> 46 #include <util.h> 47 #include <keep.h> 48 #include "pager_private.h" 49 50 #define PAGER_AE_KEY_BITS 256 51 52 struct pager_rw_pstate { 53 uint64_t iv; 54 uint8_t tag[PAGER_AES_GCM_TAG_LEN]; 55 }; 56 57 struct tee_pager_area { 58 union { 59 const uint8_t *hashes; 60 struct pager_rw_pstate *rwp; 61 } u; 62 uint8_t *store; 63 uint32_t flags; 64 vaddr_t base; 65 size_t size; 66 TAILQ_ENTRY(tee_pager_area) link; 67 }; 68 69 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head = 70 TAILQ_HEAD_INITIALIZER(tee_pager_area_head); 71 72 #define INVALID_PGIDX UINT_MAX 73 74 /* 75 * struct tee_pager_pmem - Represents a physical page used for paging. 76 * 77 * @pgidx an index of the entry in tee_pager_tbl_info. 78 * @va_alias Virtual address where the physical page always is aliased. 79 * Used during remapping of the page when the content need to 80 * be updated before it's available at the new location. 81 * @area a pointer to the pager area 82 */ 83 struct tee_pager_pmem { 84 unsigned pgidx; 85 void *va_alias; 86 struct tee_pager_area *area; 87 TAILQ_ENTRY(tee_pager_pmem) link; 88 }; 89 90 /* The list of physical pages. The first page in the list is the oldest */ 91 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem); 92 93 static struct tee_pager_pmem_head tee_pager_pmem_head = 94 TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head); 95 96 static struct tee_pager_pmem_head tee_pager_lock_pmem_head = 97 TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); 98 99 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; 100 101 /* number of pages hidden */ 102 #define TEE_PAGER_NHIDE (tee_pager_npages / 3) 103 104 /* Number of registered physical pages, used hiding pages. */ 105 static size_t tee_pager_npages; 106 107 #ifdef CFG_WITH_STATS 108 static struct tee_pager_stats pager_stats; 109 110 static inline void incr_ro_hits(void) 111 { 112 pager_stats.ro_hits++; 113 } 114 115 static inline void incr_rw_hits(void) 116 { 117 pager_stats.rw_hits++; 118 } 119 120 static inline void incr_hidden_hits(void) 121 { 122 pager_stats.hidden_hits++; 123 } 124 125 static inline void incr_zi_released(void) 126 { 127 pager_stats.zi_released++; 128 } 129 130 static inline void incr_npages_all(void) 131 { 132 pager_stats.npages_all++; 133 } 134 135 static inline void set_npages(void) 136 { 137 pager_stats.npages = tee_pager_npages; 138 } 139 140 void tee_pager_get_stats(struct tee_pager_stats *stats) 141 { 142 *stats = pager_stats; 143 144 pager_stats.hidden_hits = 0; 145 pager_stats.ro_hits = 0; 146 pager_stats.rw_hits = 0; 147 pager_stats.zi_released = 0; 148 } 149 150 #else /* CFG_WITH_STATS */ 151 static inline void incr_ro_hits(void) { } 152 static inline void incr_rw_hits(void) { } 153 static inline void incr_hidden_hits(void) { } 154 static inline void incr_zi_released(void) { } 155 static inline void incr_npages_all(void) { } 156 static inline void set_npages(void) { } 157 158 void tee_pager_get_stats(struct tee_pager_stats *stats) 159 { 160 memset(stats, 0, sizeof(struct tee_pager_stats)); 161 } 162 #endif /* CFG_WITH_STATS */ 163 164 struct core_mmu_table_info tee_pager_tbl_info; 165 static struct core_mmu_table_info pager_alias_tbl_info; 166 167 static unsigned pager_lock = SPINLOCK_UNLOCK; 168 169 /* Defines the range of the alias area */ 170 static tee_mm_entry_t *pager_alias_area; 171 /* 172 * Physical pages are added in a stack like fashion to the alias area, 173 * @pager_alias_next_free gives the address of next free entry if 174 * @pager_alias_next_free is != 0 175 */ 176 static uintptr_t pager_alias_next_free; 177 178 static void set_alias_area(tee_mm_entry_t *mm) 179 { 180 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 181 size_t tbl_va_size; 182 unsigned idx; 183 unsigned last_idx; 184 vaddr_t smem = tee_mm_get_smem(mm); 185 size_t nbytes = tee_mm_get_bytes(mm); 186 187 DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes); 188 189 TEE_ASSERT(!pager_alias_area); 190 if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) { 191 DMSG("Can't find translation table"); 192 panic(); 193 } 194 if ((1 << ti->shift) != SMALL_PAGE_SIZE) { 195 DMSG("Unsupported page size in translation table %u", 196 1 << ti->shift); 197 panic(); 198 } 199 200 tbl_va_size = (1 << ti->shift) * ti->num_entries; 201 if (!core_is_buffer_inside(smem, nbytes, 202 ti->va_base, tbl_va_size)) { 203 DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 204 smem, nbytes, ti->va_base, tbl_va_size); 205 panic(); 206 } 207 208 TEE_ASSERT(!(smem & SMALL_PAGE_MASK)); 209 TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK)); 210 211 pager_alias_area = mm; 212 pager_alias_next_free = smem; 213 214 /* Clear all mapping in the alias area */ 215 idx = core_mmu_va2idx(ti, smem); 216 last_idx = core_mmu_va2idx(ti, smem + nbytes); 217 for (; idx < last_idx; idx++) 218 core_mmu_set_entry(ti, idx, 0, 0); 219 220 /* TODO only invalidate entries touched above */ 221 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 222 } 223 224 static void generate_ae_key(void) 225 { 226 TEE_Result res; 227 228 res = rng_generate(pager_ae_key, sizeof(pager_ae_key)); 229 TEE_ASSERT(res == TEE_SUCCESS); 230 } 231 232 void tee_pager_init(tee_mm_entry_t *mm_alias) 233 { 234 set_alias_area(mm_alias); 235 generate_ae_key(); 236 } 237 238 static void *pager_add_alias_page(paddr_t pa) 239 { 240 unsigned idx; 241 struct core_mmu_table_info *ti = &pager_alias_tbl_info; 242 uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 243 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) | 244 TEE_MATTR_SECURE | TEE_MATTR_PRW; 245 246 DMSG("0x%" PRIxPA, pa); 247 248 TEE_ASSERT(pager_alias_next_free && ti->num_entries); 249 idx = core_mmu_va2idx(ti, pager_alias_next_free); 250 core_mmu_set_entry(ti, idx, pa, attr); 251 pager_alias_next_free += SMALL_PAGE_SIZE; 252 if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) + 253 tee_mm_get_bytes(pager_alias_area))) 254 pager_alias_next_free = 0; 255 return (void *)core_mmu_idx2va(ti, idx); 256 } 257 258 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size, 259 uint32_t flags, const void *store, const void *hashes) 260 { 261 struct tee_pager_area *area = calloc(1, sizeof(*area)); 262 tee_mm_entry_t *mm_store = NULL; 263 264 if (!area) 265 return NULL; 266 267 if (flags & TEE_MATTR_PW) { 268 if (flags & TEE_MATTR_LOCKED) 269 goto out; 270 mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size); 271 if (!mm_store) 272 goto bad; 273 area->store = phys_to_virt(tee_mm_get_smem(mm_store), 274 MEM_AREA_TA_RAM); 275 if (!area->store) 276 goto bad; 277 area->u.rwp = calloc(size / SMALL_PAGE_SIZE, 278 sizeof(struct pager_rw_pstate)); 279 if (!area->u.rwp) 280 goto bad; 281 } else { 282 area->store = (void *)store; 283 area->u.hashes = hashes; 284 } 285 out: 286 area->base = base; 287 area->size = size; 288 area->flags = flags; 289 return area; 290 bad: 291 tee_mm_free(mm_store); 292 free(area->u.rwp); 293 free(area); 294 return NULL; 295 } 296 297 static void area_insert_tail(struct tee_pager_area *area) 298 { 299 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 300 301 cpu_spin_lock(&pager_lock); 302 303 TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link); 304 305 cpu_spin_unlock(&pager_lock); 306 thread_set_exceptions(exceptions); 307 } 308 KEEP_PAGER(area_insert_tail); 309 310 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags, 311 const void *store, const void *hashes) 312 { 313 struct tee_pager_area *area; 314 size_t tbl_va_size; 315 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 316 317 DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p", 318 base, base + size, flags, store, hashes); 319 320 TEE_ASSERT(!(base & SMALL_PAGE_MASK) && 321 size && !(size & SMALL_PAGE_MASK)); 322 323 if (!(flags & TEE_MATTR_PW)) 324 TEE_ASSERT(store && hashes); 325 else if (flags & TEE_MATTR_PW) 326 TEE_ASSERT(!store && !hashes); 327 else 328 panic(); 329 330 if (!ti->num_entries) { 331 if (!core_mmu_find_table(base, UINT_MAX, ti)) 332 return false; 333 if ((1 << ti->shift) != SMALL_PAGE_SIZE) { 334 DMSG("Unsupported page size in translation table %u", 335 1 << ti->shift); 336 return false; 337 } 338 } 339 340 tbl_va_size = (1 << ti->shift) * ti->num_entries; 341 if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) { 342 DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx", 343 base, size, ti->va_base, tbl_va_size); 344 return false; 345 } 346 347 area = alloc_area(base, size, flags, store, hashes); 348 if (!area) 349 return false; 350 351 area_insert_tail(area); 352 return true; 353 } 354 355 static struct tee_pager_area *tee_pager_find_area(vaddr_t va) 356 { 357 struct tee_pager_area *area; 358 359 TAILQ_FOREACH(area, &tee_pager_area_head, link) { 360 if (core_is_buffer_inside(va, 1, area->base, area->size)) 361 return area; 362 } 363 return NULL; 364 } 365 366 static uint32_t get_area_mattr(struct tee_pager_area *area) 367 { 368 return TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL | 369 TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT | 370 TEE_MATTR_SECURE | TEE_MATTR_PR | 371 (area->flags & TEE_MATTR_PRWX); 372 } 373 374 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem) 375 { 376 paddr_t pa; 377 unsigned idx; 378 379 idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias); 380 core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL); 381 return pa; 382 } 383 384 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, 385 void *dst) 386 { 387 struct pager_aes_gcm_iv iv = { 388 { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } 389 }; 390 391 return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), 392 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); 393 } 394 395 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) 396 { 397 struct pager_aes_gcm_iv iv; 398 399 assert((rwp->iv + 1) > rwp->iv); 400 rwp->iv++; 401 /* 402 * IV is constructed as recommended in section "8.2.1 Deterministic 403 * Construction" of "Recommendation for Block Cipher Modes of 404 * Operation: Galois/Counter Mode (GCM) and GMAC", 405 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 406 */ 407 iv.iv[0] = (vaddr_t)rwp; 408 iv.iv[1] = rwp->iv >> 32; 409 iv.iv[2] = rwp->iv; 410 411 if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), 412 &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE)) 413 panic(); 414 } 415 416 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va, 417 void *va_alias) 418 { 419 size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT; 420 const void *stored_page = area->store + idx * SMALL_PAGE_SIZE; 421 422 if (!(area->flags & TEE_MATTR_PW)) { 423 const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE; 424 425 memcpy(va_alias, stored_page, SMALL_PAGE_SIZE); 426 incr_ro_hits(); 427 428 if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) != 429 TEE_SUCCESS) { 430 EMSG("PH 0x%" PRIxVA " failed", page_va); 431 panic(); 432 } 433 } else if (area->flags & TEE_MATTR_LOCKED) { 434 FMSG("Zero init %p %#" PRIxVA, va_alias, page_va); 435 memset(va_alias, 0, SMALL_PAGE_SIZE); 436 } else { 437 FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64, 438 va_alias, page_va, area->u.rwp[idx].iv); 439 if (!area->u.rwp[idx].iv) 440 memset(va_alias, 0, SMALL_PAGE_SIZE); 441 else if (!decrypt_page(&area->u.rwp[idx], stored_page, 442 va_alias)) { 443 EMSG("PH 0x%" PRIxVA " failed", page_va); 444 panic(); 445 } 446 incr_rw_hits(); 447 } 448 } 449 450 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr) 451 { 452 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 453 const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW | 454 TEE_MATTR_HIDDEN_DIRTY_BLOCK; 455 456 assert(!(pmem->area->flags & TEE_MATTR_LOCKED)); 457 458 if (attr & dirty_bits) { 459 size_t idx = pmem->pgidx - core_mmu_va2idx(ti, 460 pmem->area->base); 461 void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE; 462 463 assert(pmem->area->flags & TEE_MATTR_PW); 464 encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias, 465 stored_page); 466 FMSG("Saved %#" PRIxVA " iv %#" PRIx64, 467 core_mmu_idx2va(ti, pmem->pgidx), 468 pmem->area->u.rwp[idx].iv); 469 } 470 } 471 472 static bool tee_pager_unhide_page(vaddr_t page_va) 473 { 474 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 475 struct tee_pager_pmem *pmem; 476 477 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 478 paddr_t pa; 479 uint32_t attr; 480 481 if (pmem->pgidx == INVALID_PGIDX) 482 continue; 483 484 core_mmu_get_entry(ti, pmem->pgidx, 485 &pa, &attr); 486 487 if (!(attr & 488 (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK))) 489 continue; 490 491 if (core_mmu_va2idx(ti, page_va) == pmem->pgidx) { 492 uint32_t a = get_area_mattr(pmem->area); 493 494 /* page is hidden, show and move to back */ 495 assert(pa == get_pmem_pa(pmem)); 496 /* 497 * If it's not a dirty block, then it should be 498 * read only. 499 */ 500 if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK)) 501 a &= ~(TEE_MATTR_PW | TEE_MATTR_UW); 502 else 503 FMSG("Unhide %#" PRIxVA, page_va); 504 core_mmu_set_entry(ti, pmem->pgidx, pa, a); 505 506 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 507 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 508 509 /* TODO only invalidate entry touched above */ 510 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 511 512 incr_hidden_hits(); 513 return true; 514 } 515 } 516 517 return false; 518 } 519 520 static void tee_pager_hide_pages(void) 521 { 522 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 523 struct tee_pager_pmem *pmem; 524 size_t n = 0; 525 526 TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) { 527 paddr_t pa; 528 uint32_t attr; 529 uint32_t a; 530 531 if (n >= TEE_PAGER_NHIDE) 532 break; 533 n++; 534 535 /* 536 * we cannot hide pages when pmem->area is not defined as 537 * unhide requires pmem->area to be defined 538 */ 539 if (!pmem->area) 540 continue; 541 542 core_mmu_get_entry(ti, pmem->pgidx, &pa, &attr); 543 if (!(attr & TEE_MATTR_VALID_BLOCK)) 544 continue; 545 546 assert(pa == get_pmem_pa(pmem)); 547 if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){ 548 a = TEE_MATTR_HIDDEN_DIRTY_BLOCK; 549 FMSG("Hide %#" PRIxVA, 550 ti->va_base + pmem->pgidx * SMALL_PAGE_SIZE); 551 } else 552 a = TEE_MATTR_HIDDEN_BLOCK; 553 core_mmu_set_entry(ti, pmem->pgidx, pa, a); 554 555 } 556 557 /* TODO only invalidate entries touched above */ 558 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 559 } 560 561 /* 562 * Find mapped pmem, hide and move to pageble pmem. 563 * Return false if page was not mapped, and true if page was mapped. 564 */ 565 static bool tee_pager_release_one_phys(vaddr_t page_va) 566 { 567 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 568 struct tee_pager_pmem *pmem; 569 unsigned pgidx; 570 paddr_t pa; 571 uint32_t attr; 572 573 pgidx = core_mmu_va2idx(ti, page_va); 574 core_mmu_get_entry(ti, pgidx, &pa, &attr); 575 576 FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr); 577 578 TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) { 579 if (pmem->pgidx != pgidx) 580 continue; 581 582 assert(pa == get_pmem_pa(pmem)); 583 core_mmu_set_entry(ti, pgidx, 0, 0); 584 TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link); 585 pmem->area = NULL; 586 pmem->pgidx = INVALID_PGIDX; 587 tee_pager_npages++; 588 set_npages(); 589 TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link); 590 incr_zi_released(); 591 return true; 592 } 593 594 return false; 595 } 596 597 /* Finds the oldest page and unmats it from its old virtual address */ 598 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags) 599 { 600 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 601 struct tee_pager_pmem *pmem; 602 603 pmem = TAILQ_FIRST(&tee_pager_pmem_head); 604 if (!pmem) { 605 EMSG("No pmem entries"); 606 return NULL; 607 } 608 if (pmem->pgidx != INVALID_PGIDX) { 609 uint32_t a; 610 611 core_mmu_get_entry(ti, pmem->pgidx, NULL, &a); 612 core_mmu_set_entry(ti, pmem->pgidx, 0, 0); 613 /* TODO only invalidate entries touched above */ 614 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 615 tee_pager_save_page(pmem, a); 616 } 617 618 TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link); 619 pmem->pgidx = INVALID_PGIDX; 620 pmem->area = NULL; 621 if (next_area_flags & TEE_MATTR_LOCKED) { 622 /* Move page to lock list */ 623 TEE_ASSERT(tee_pager_npages > 0); 624 tee_pager_npages--; 625 set_npages(); 626 TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link); 627 } else { 628 /* move page to back */ 629 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 630 } 631 632 return pmem; 633 } 634 635 static bool pager_update_permissions(struct tee_pager_area *area, 636 struct abort_info *ai) 637 { 638 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 639 unsigned pgidx = core_mmu_va2idx(ti, ai->va); 640 uint32_t attr; 641 paddr_t pa; 642 643 core_mmu_get_entry(ti, pgidx, &pa, &attr); 644 645 /* Not mapped */ 646 if (!(attr & TEE_MATTR_VALID_BLOCK)) 647 return false; 648 649 /* Not readable, should not happen */ 650 if (!(attr & TEE_MATTR_PR)) { 651 abort_print_error(ai); 652 panic(); 653 } 654 655 switch (core_mmu_get_fault_type(ai->fault_descr)) { 656 case CORE_MMU_FAULT_TRANSLATION: 657 case CORE_MMU_FAULT_READ_PERMISSION: 658 if (ai->abort_type == ABORT_TYPE_PREFETCH && 659 !(attr & TEE_MATTR_PX)) { 660 /* Attempting to execute from an NOX page */ 661 abort_print_error(ai); 662 panic(); 663 } 664 /* Since the page is mapped now it's OK */ 665 return true; 666 case CORE_MMU_FAULT_WRITE_PERMISSION: 667 if (!(area->flags & TEE_MATTR_PW)) { 668 /* Attempting to write to an RO page */ 669 abort_print_error(ai); 670 panic(); 671 } 672 if (!(attr & TEE_MATTR_PW)) { 673 FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK)); 674 core_mmu_set_entry(ti, pgidx, pa, attr | TEE_MATTR_PW); 675 /* TODO only invalidate entry above */ 676 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 677 } 678 /* Since permissions has been updated now it's OK */ 679 return true; 680 default: 681 /* Some fault we can't deal with */ 682 abort_print_error(ai); 683 panic(); 684 } 685 686 } 687 688 #ifdef CFG_TEE_CORE_DEBUG 689 static void stat_handle_fault(void) 690 { 691 static size_t num_faults; 692 static size_t min_npages = SIZE_MAX; 693 static size_t total_min_npages = SIZE_MAX; 694 695 num_faults++; 696 if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) { 697 DMSG("nfaults %zu npages %zu (min %zu)", 698 num_faults, tee_pager_npages, min_npages); 699 min_npages = tee_pager_npages; /* reset */ 700 } 701 if (tee_pager_npages < min_npages) 702 min_npages = tee_pager_npages; 703 if (tee_pager_npages < total_min_npages) 704 total_min_npages = tee_pager_npages; 705 } 706 #else 707 static void stat_handle_fault(void) 708 { 709 } 710 #endif 711 712 bool tee_pager_handle_fault(struct abort_info *ai) 713 { 714 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 715 struct tee_pager_area *area; 716 vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK; 717 uint32_t exceptions; 718 bool ret; 719 720 #ifdef TEE_PAGER_DEBUG_PRINT 721 abort_print(ai); 722 #endif 723 724 /* 725 * We're updating pages that can affect several active CPUs at a 726 * time below. We end up here because a thread tries to access some 727 * memory that isn't available. We have to be careful when making 728 * that memory available as other threads may succeed in accessing 729 * that address the moment after we've made it available. 730 * 731 * That means that we can't just map the memory and populate the 732 * page, instead we use the aliased mapping to populate the page 733 * and once everything is ready we map it. 734 */ 735 exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 736 cpu_spin_lock(&pager_lock); 737 738 stat_handle_fault(); 739 740 /* check if the access is valid */ 741 area = tee_pager_find_area(ai->va); 742 if (!area) { 743 EMSG("Invalid addr 0x%" PRIxVA, ai->va); 744 ret = false; 745 goto out; 746 } 747 748 if (!tee_pager_unhide_page(page_va)) { 749 struct tee_pager_pmem *pmem = NULL; 750 uint32_t attr; 751 752 /* 753 * The page wasn't hidden, but some other core may have 754 * updated the table entry before we got here or we need 755 * to make a read-only page read-write (dirty). 756 */ 757 if (pager_update_permissions(area, ai)) { 758 /* 759 * Kind of access is OK with the mapping, we're 760 * done here because the fault has already been 761 * dealt with by another core. 762 */ 763 ret = true; 764 goto out; 765 } 766 767 pmem = tee_pager_get_page(area->flags); 768 if (!pmem) { 769 abort_print(ai); 770 panic(); 771 } 772 773 /* load page code & data */ 774 tee_pager_load_page(area, page_va, pmem->va_alias); 775 776 /* 777 * We've updated the page using the aliased mapping and 778 * some cache maintenence is now needed if it's an 779 * executable page. 780 * 781 * Since the d-cache is a Physically-indexed, 782 * physically-tagged (PIPT) cache we can clean the aliased 783 * address instead of the real virtual address. 784 * 785 * The i-cache can also be PIPT, but may be something else 786 * to, to keep it simple we invalidate the entire i-cache. 787 * As a future optimization we may invalidate only the 788 * aliased area if it a PIPT cache else the entire cache. 789 */ 790 if (area->flags & TEE_MATTR_PX) { 791 /* 792 * Doing these operations to LoUIS (Level of 793 * unification, Inner Shareable) would be enough 794 */ 795 cache_maintenance_l1(DCACHE_AREA_CLEAN, 796 pmem->va_alias, SMALL_PAGE_SIZE); 797 798 cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0); 799 } 800 801 pmem->area = area; 802 pmem->pgidx = core_mmu_va2idx(ti, ai->va); 803 attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW); 804 core_mmu_set_entry(ti, pmem->pgidx, get_pmem_pa(pmem), attr); 805 806 FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, 807 core_mmu_idx2va(ti, pmem->pgidx), get_pmem_pa(pmem)); 808 809 } 810 811 tee_pager_hide_pages(); 812 ret = true; 813 out: 814 cpu_spin_unlock(&pager_lock); 815 thread_unmask_exceptions(exceptions); 816 return ret; 817 } 818 819 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap) 820 { 821 struct core_mmu_table_info *ti = &tee_pager_tbl_info; 822 size_t n; 823 824 DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d", 825 vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap); 826 827 /* setup memory */ 828 for (n = 0; n < npages; n++) { 829 struct tee_pager_pmem *pmem; 830 tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE; 831 unsigned pgidx = core_mmu_va2idx(ti, va); 832 paddr_t pa; 833 uint32_t attr; 834 835 core_mmu_get_entry(ti, pgidx, &pa, &attr); 836 837 /* Ignore unmapped pages/blocks */ 838 if (!(attr & TEE_MATTR_VALID_BLOCK)) 839 continue; 840 841 pmem = malloc(sizeof(struct tee_pager_pmem)); 842 if (pmem == NULL) { 843 EMSG("Can't allocate memory"); 844 panic(); 845 } 846 847 pmem->va_alias = pager_add_alias_page(pa); 848 849 if (unmap) { 850 pmem->area = NULL; 851 pmem->pgidx = INVALID_PGIDX; 852 core_mmu_set_entry(ti, pgidx, 0, 0); 853 } else { 854 /* 855 * The page is still mapped, let's assign the area 856 * and update the protection bits accordingly. 857 */ 858 pmem->area = tee_pager_find_area(va); 859 pmem->pgidx = pgidx; 860 assert(pa == get_pmem_pa(pmem)); 861 core_mmu_set_entry(ti, pgidx, pa, 862 get_area_mattr(pmem->area)); 863 } 864 865 tee_pager_npages++; 866 incr_npages_all(); 867 set_npages(); 868 TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link); 869 } 870 871 /* Invalidate secure TLB */ 872 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 873 } 874 875 void tee_pager_release_phys(void *addr, size_t size) 876 { 877 bool unmaped = false; 878 vaddr_t va = (vaddr_t)addr; 879 vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE); 880 vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE); 881 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 882 883 cpu_spin_lock(&pager_lock); 884 885 for (va = begin; va < end; va += SMALL_PAGE_SIZE) 886 unmaped |= tee_pager_release_one_phys(va); 887 888 /* Invalidate secure TLB */ 889 if (unmaped) 890 core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); 891 892 cpu_spin_unlock(&pager_lock); 893 thread_set_exceptions(exceptions); 894 } 895 KEEP_PAGER(tee_pager_release_phys); 896 897 void *tee_pager_alloc(size_t size, uint32_t flags) 898 { 899 tee_mm_entry_t *mm; 900 uint32_t f = TEE_MATTR_PRW | (flags & TEE_MATTR_LOCKED); 901 902 if (!size) 903 return NULL; 904 905 mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE)); 906 if (!mm) 907 return NULL; 908 909 tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), 910 f, NULL, NULL); 911 912 return (void *)tee_mm_get_smem(mm); 913 } 914