1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019-2021, Linaro Limited 4 */ 5 6 #include <config.h> 7 #include <crypto/crypto.h> 8 #include <crypto/internal_aes-gcm.h> 9 #include <initcall.h> 10 #include <kernel/boot.h> 11 #include <kernel/panic.h> 12 #include <mm/core_memprot.h> 13 #include <mm/core_mmu.h> 14 #include <mm/fobj.h> 15 #include <mm/tee_mm.h> 16 #include <stdlib.h> 17 #include <string.h> 18 #include <tee_api_types.h> 19 #include <types_ext.h> 20 #include <util.h> 21 22 #ifdef CFG_WITH_PAGER 23 24 #define RWP_AE_KEY_BITS 256 25 26 struct rwp_aes_gcm_iv { 27 uint32_t iv[3]; 28 }; 29 30 #define RWP_AES_GCM_TAG_LEN 16 31 32 struct rwp_state { 33 uint64_t iv; 34 uint8_t tag[RWP_AES_GCM_TAG_LEN]; 35 }; 36 37 /* 38 * Note that this struct is padded to a size which is a power of 2, this 39 * guarantees that this state will not span two pages. This avoids a corner 40 * case in the pager when making the state available. 41 */ 42 struct rwp_state_padded { 43 struct rwp_state state; 44 uint64_t pad; 45 }; 46 47 struct fobj_rwp_unpaged_iv { 48 uint8_t *store; 49 struct rwp_state *state; 50 struct fobj fobj; 51 }; 52 53 struct fobj_rwp_paged_iv { 54 size_t idx; 55 struct fobj fobj; 56 }; 57 58 const struct fobj_ops ops_rwp_paged_iv; 59 const struct fobj_ops ops_rwp_unpaged_iv; 60 61 static struct internal_aes_gcm_key rwp_ae_key; 62 63 static struct rwp_state_padded *rwp_state_base; 64 static uint8_t *rwp_store_base; 65 66 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops, 67 unsigned int num_pages) 68 { 69 fobj->ops = ops; 70 fobj->num_pages = num_pages; 71 refcount_set(&fobj->refc, 1); 72 TAILQ_INIT(&fobj->regions); 73 } 74 75 static void fobj_uninit(struct fobj *fobj) 76 { 77 assert(!refcount_val(&fobj->refc)); 78 assert(TAILQ_EMPTY(&fobj->regions)); 79 tee_pager_invalidate_fobj(fobj); 80 } 81 82 static TEE_Result rwp_load_page(void *va, struct rwp_state *state, 83 const uint8_t *src) 84 { 85 struct rwp_aes_gcm_iv iv = { 86 .iv = { (vaddr_t)state, state->iv >> 32, state->iv } 87 }; 88 89 if (!state->iv) { 90 /* 91 * IV still zero which means that this is previously unused 92 * page. 93 */ 94 memset(va, 0, SMALL_PAGE_SIZE); 95 return TEE_SUCCESS; 96 } 97 98 return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv), 99 NULL, 0, src, SMALL_PAGE_SIZE, va, 100 state->tag, sizeof(state->tag)); 101 } 102 103 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state, 104 uint8_t *dst) 105 { 106 size_t tag_len = sizeof(state->tag); 107 struct rwp_aes_gcm_iv iv = { }; 108 109 assert(state->iv + 1 > state->iv); 110 111 state->iv++; 112 113 /* 114 * IV is constructed as recommended in section "8.2.1 Deterministic 115 * Construction" of "Recommendation for Block Cipher Modes of 116 * Operation: Galois/Counter Mode (GCM) and GMAC", 117 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf 118 */ 119 iv.iv[0] = (vaddr_t)state; 120 iv.iv[1] = state->iv >> 32; 121 iv.iv[2] = state->iv; 122 123 return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv), 124 NULL, 0, va, SMALL_PAGE_SIZE, dst, 125 state->tag, &tag_len); 126 } 127 128 static struct rwp_state_padded *idx_to_state_padded(size_t idx) 129 { 130 assert(rwp_state_base); 131 return rwp_state_base + idx; 132 } 133 134 static uint8_t *idx_to_store(size_t idx) 135 { 136 assert(rwp_store_base); 137 return rwp_store_base + idx * SMALL_PAGE_SIZE; 138 } 139 140 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages) 141 { 142 struct fobj_rwp_paged_iv *rwp = NULL; 143 tee_mm_entry_t *mm = NULL; 144 size_t size = 0; 145 146 COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded))); 147 148 rwp = calloc(1, sizeof(*rwp)); 149 if (!rwp) 150 return NULL; 151 152 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 153 goto err; 154 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 155 if (!mm) 156 goto err; 157 rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE; 158 159 memset(idx_to_state_padded(rwp->idx), 0, 160 num_pages * sizeof(struct rwp_state_padded)); 161 162 fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages); 163 164 return &rwp->fobj; 165 err: 166 tee_mm_free(mm); 167 free(rwp); 168 169 return NULL; 170 } 171 172 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj) 173 { 174 assert(fobj->ops == &ops_rwp_paged_iv); 175 176 return container_of(fobj, struct fobj_rwp_paged_iv, fobj); 177 } 178 179 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj, 180 unsigned int page_idx, void *va) 181 { 182 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 183 uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; 184 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 185 186 assert(refcount_val(&fobj->refc)); 187 assert(page_idx < fobj->num_pages); 188 189 return rwp_load_page(va, &st->state, src); 190 } 191 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page); 192 193 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj, 194 unsigned int page_idx, const void *va) 195 { 196 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 197 uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; 198 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 199 200 assert(page_idx < fobj->num_pages); 201 202 if (!refcount_val(&fobj->refc)) { 203 /* 204 * This fobj is being teared down, it just hasn't had the time 205 * to call tee_pager_invalidate_fobj() yet. 206 */ 207 assert(TAILQ_EMPTY(&fobj->regions)); 208 return TEE_SUCCESS; 209 } 210 211 return rwp_save_page(va, &st->state, dst); 212 } 213 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page); 214 215 static void rwp_paged_iv_free(struct fobj *fobj) 216 { 217 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 218 paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo; 219 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa); 220 221 assert(mm); 222 223 fobj_uninit(fobj); 224 tee_mm_free(mm); 225 free(rwp); 226 } 227 228 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj, 229 unsigned int page_idx) 230 { 231 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj); 232 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx); 233 234 assert(page_idx < fobj->num_pages); 235 return (vaddr_t)&st->state & ~SMALL_PAGE_MASK; 236 } 237 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr); 238 239 /* 240 * Note: this variable is weak just to ease breaking its dependency chain 241 * when added to the unpaged area. 242 */ 243 const struct fobj_ops ops_rwp_paged_iv 244 __weak __relrodata_unpaged("ops_rwp_paged_iv") = { 245 .free = rwp_paged_iv_free, 246 .load_page = rwp_paged_iv_load_page, 247 .save_page = rwp_paged_iv_save_page, 248 .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr, 249 }; 250 251 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages) 252 { 253 struct fobj_rwp_unpaged_iv *rwp = NULL; 254 tee_mm_entry_t *mm = NULL; 255 size_t size = 0; 256 257 rwp = calloc(1, sizeof(*rwp)); 258 if (!rwp) 259 return NULL; 260 261 rwp->state = calloc(num_pages, sizeof(*rwp->state)); 262 if (!rwp->state) 263 goto err_free_rwp; 264 265 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 266 goto err_free_state; 267 mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 268 if (!mm) 269 goto err_free_state; 270 rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size); 271 assert(rwp->store); 272 273 fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages); 274 275 return &rwp->fobj; 276 277 err_free_state: 278 free(rwp->state); 279 err_free_rwp: 280 free(rwp); 281 return NULL; 282 } 283 284 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj) 285 { 286 assert(fobj->ops == &ops_rwp_unpaged_iv); 287 288 return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj); 289 } 290 291 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj, 292 unsigned int page_idx, void *va) 293 { 294 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj); 295 uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE; 296 297 assert(refcount_val(&fobj->refc)); 298 assert(page_idx < fobj->num_pages); 299 300 return rwp_load_page(va, rwp->state + page_idx, src); 301 } 302 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page); 303 304 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj, 305 unsigned int page_idx, 306 const void *va) 307 { 308 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj); 309 uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE; 310 311 assert(page_idx < fobj->num_pages); 312 313 if (!refcount_val(&fobj->refc)) { 314 /* 315 * This fobj is being teared down, it just hasn't had the time 316 * to call tee_pager_invalidate_fobj() yet. 317 */ 318 assert(TAILQ_EMPTY(&fobj->regions)); 319 return TEE_SUCCESS; 320 } 321 322 return rwp_save_page(va, rwp->state + page_idx, dst); 323 } 324 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page); 325 326 static void rwp_unpaged_iv_free(struct fobj *fobj) 327 { 328 struct fobj_rwp_unpaged_iv *rwp = NULL; 329 tee_mm_entry_t *mm = NULL; 330 331 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV)) 332 panic(); 333 334 rwp = to_rwp_unpaged_iv(fobj); 335 mm = tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)); 336 337 assert(mm); 338 339 fobj_uninit(fobj); 340 tee_mm_free(mm); 341 free(rwp->state); 342 free(rwp); 343 } 344 345 /* 346 * Note: this variable is weak just to ease breaking its dependency chain 347 * when added to the unpaged area. 348 */ 349 const struct fobj_ops ops_rwp_unpaged_iv 350 __weak __relrodata_unpaged("ops_rwp_unpaged_iv") = { 351 .free = rwp_unpaged_iv_free, 352 .load_page = rwp_unpaged_iv_load_page, 353 .save_page = rwp_unpaged_iv_save_page, 354 }; 355 356 static TEE_Result rwp_init(void) 357 { 358 uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 }; 359 struct fobj *fobj = NULL; 360 size_t num_pool_pages = 0; 361 size_t num_fobj_pages = 0; 362 363 if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS) 364 panic("failed to generate random"); 365 if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data, 366 sizeof(rwp_ae_key.data), 367 &rwp_ae_key.rounds)) 368 panic("failed to expand key"); 369 370 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV)) 371 return TEE_SUCCESS; 372 373 assert(tee_mm_sec_ddr.size && !(tee_mm_sec_ddr.size & SMALL_PAGE_SIZE)); 374 375 num_pool_pages = tee_mm_sec_ddr.size / SMALL_PAGE_SIZE; 376 num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base), 377 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 378 379 /* 380 * Each page in the pool needs a struct rwp_state. 381 * 382 * This isn't entirely true, the pages not used by 383 * fobj_rw_paged_alloc() don't need any. A future optimization 384 * may try to avoid allocating for such pages. 385 */ 386 fobj = rwp_unpaged_iv_alloc(num_fobj_pages); 387 if (!fobj) 388 panic(); 389 390 rwp_state_base = (void *)tee_pager_init_iv_region(fobj); 391 assert(rwp_state_base); 392 393 rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM, 394 tee_mm_sec_ddr.size); 395 assert(rwp_store_base); 396 397 return TEE_SUCCESS; 398 } 399 driver_init_late(rwp_init); 400 401 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages) 402 { 403 assert(num_pages); 404 405 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV)) 406 return rwp_paged_iv_alloc(num_pages); 407 else 408 return rwp_unpaged_iv_alloc(num_pages); 409 } 410 411 struct fobj_rop { 412 uint8_t *hashes; 413 uint8_t *store; 414 struct fobj fobj; 415 }; 416 417 const struct fobj_ops ops_ro_paged; 418 419 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops, 420 unsigned int num_pages, void *hashes, void *store) 421 { 422 rop->hashes = hashes; 423 rop->store = store; 424 fobj_init(&rop->fobj, ops, num_pages); 425 } 426 427 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes, 428 void *store) 429 { 430 struct fobj_rop *rop = NULL; 431 432 assert(num_pages && hashes && store); 433 434 rop = calloc(1, sizeof(*rop)); 435 if (!rop) 436 return NULL; 437 438 rop_init(rop, &ops_ro_paged, num_pages, hashes, store); 439 440 return &rop->fobj; 441 } 442 443 static struct fobj_rop *to_rop(struct fobj *fobj) 444 { 445 assert(fobj->ops == &ops_ro_paged); 446 447 return container_of(fobj, struct fobj_rop, fobj); 448 } 449 450 static void rop_uninit(struct fobj_rop *rop) 451 { 452 fobj_uninit(&rop->fobj); 453 tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store))); 454 free(rop->hashes); 455 } 456 457 static void rop_free(struct fobj *fobj) 458 { 459 struct fobj_rop *rop = to_rop(fobj); 460 461 rop_uninit(rop); 462 free(rop); 463 } 464 465 static TEE_Result rop_load_page_helper(struct fobj_rop *rop, 466 unsigned int page_idx, void *va) 467 { 468 const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE; 469 const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE; 470 471 assert(refcount_val(&rop->fobj.refc)); 472 assert(page_idx < rop->fobj.num_pages); 473 memcpy(va, src, SMALL_PAGE_SIZE); 474 475 return hash_sha256_check(hash, va, SMALL_PAGE_SIZE); 476 } 477 478 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx, 479 void *va) 480 { 481 return rop_load_page_helper(to_rop(fobj), page_idx, va); 482 } 483 DECLARE_KEEP_PAGER(rop_load_page); 484 485 static TEE_Result rop_save_page(struct fobj *fobj __unused, 486 unsigned int page_idx __unused, 487 const void *va __unused) 488 { 489 return TEE_ERROR_GENERIC; 490 } 491 DECLARE_KEEP_PAGER(rop_save_page); 492 493 /* 494 * Note: this variable is weak just to ease breaking its dependency chain 495 * when added to the unpaged area. 496 */ 497 const struct fobj_ops ops_ro_paged 498 __weak __relrodata_unpaged("ops_ro_paged") = { 499 .free = rop_free, 500 .load_page = rop_load_page, 501 .save_page = rop_save_page, 502 }; 503 504 #ifdef CFG_CORE_ASLR 505 /* 506 * When using relocated pages the relocation information must be applied 507 * before the pages can be used. With read-only paging the content is only 508 * integrity protected so relocation cannot be applied on pages in the less 509 * secure "store" or the load_address selected by ASLR could be given away. 510 * This means that each time a page has been loaded and verified it has to 511 * have its relocation information applied before it can be used. 512 * 513 * Only the relative relocations are supported, this allows a rather compact 514 * represenation of the needed relocation information in this struct. 515 * r_offset is replaced with the offset into the page that need to be updated, 516 * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be 517 * used to represent it. 518 * 519 * All relocations are converted and stored in @relocs. @page_reloc_idx is 520 * an array of length @rop.fobj.num_pages with an entry for each page. If 521 * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs. 522 */ 523 struct fobj_ro_reloc_paged { 524 uint16_t *page_reloc_idx; 525 uint16_t *relocs; 526 unsigned int num_relocs; 527 struct fobj_rop rop; 528 }; 529 530 const struct fobj_ops ops_ro_reloc_paged; 531 532 static unsigned int get_num_rels(unsigned int num_pages, 533 unsigned int reloc_offs, 534 const uint32_t *reloc, unsigned int num_relocs) 535 { 536 const unsigned int align_mask __maybe_unused = sizeof(long) - 1; 537 unsigned int nrels = 0; 538 unsigned int n = 0; 539 vaddr_t offs = 0; 540 541 /* 542 * Count the number of relocations which are needed for these 543 * pages. Also check that the data is well formed, only expected 544 * relocations and sorted in order of address which it applies to. 545 */ 546 for (; n < num_relocs; n++) { 547 assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long)); 548 assert(offs < reloc[n]); /* check that it's sorted */ 549 offs = reloc[n]; 550 if (offs >= reloc_offs && 551 offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE) 552 nrels++; 553 } 554 555 return nrels; 556 } 557 558 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs, 559 const uint32_t *reloc, unsigned int num_relocs) 560 { 561 unsigned int npg = rrp->rop.fobj.num_pages; 562 unsigned int pg_idx = 0; 563 unsigned int reln = 0; 564 unsigned int n = 0; 565 uint32_t r = 0; 566 567 for (n = 0; n < npg; n++) 568 rrp->page_reloc_idx[n] = UINT16_MAX; 569 570 for (n = 0; n < num_relocs ; n++) { 571 if (reloc[n] < reloc_offs) 572 continue; 573 574 /* r is the offset from beginning of this fobj */ 575 r = reloc[n] - reloc_offs; 576 577 pg_idx = r / SMALL_PAGE_SIZE; 578 if (pg_idx >= npg) 579 break; 580 581 if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX) 582 rrp->page_reloc_idx[pg_idx] = reln; 583 rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE; 584 reln++; 585 } 586 587 assert(reln == rrp->num_relocs); 588 } 589 590 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes, 591 unsigned int reloc_offs, 592 const void *reloc, 593 unsigned int reloc_len, void *store) 594 { 595 struct fobj_ro_reloc_paged *rrp = NULL; 596 const unsigned int num_relocs = reloc_len / sizeof(uint32_t); 597 unsigned int nrels = 0; 598 599 assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t)); 600 assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t)); 601 assert(num_pages && hashes && store); 602 if (!reloc_len) { 603 assert(!reloc); 604 return fobj_ro_paged_alloc(num_pages, hashes, store); 605 } 606 assert(reloc); 607 608 nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs); 609 if (!nrels) 610 return fobj_ro_paged_alloc(num_pages, hashes, store); 611 612 rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) + 613 nrels * sizeof(uint16_t)); 614 if (!rrp) 615 return NULL; 616 rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store); 617 rrp->page_reloc_idx = (uint16_t *)(rrp + 1); 618 rrp->relocs = rrp->page_reloc_idx + num_pages; 619 rrp->num_relocs = nrels; 620 init_rels(rrp, reloc_offs, reloc, num_relocs); 621 622 return &rrp->rop.fobj; 623 } 624 625 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj) 626 { 627 assert(fobj->ops == &ops_ro_reloc_paged); 628 629 return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj); 630 } 631 632 static void rrp_free(struct fobj *fobj) 633 { 634 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 635 636 rop_uninit(&rrp->rop); 637 free(rrp); 638 } 639 640 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx, 641 void *va) 642 { 643 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj); 644 unsigned int end_rel = rrp->num_relocs; 645 TEE_Result res = TEE_SUCCESS; 646 unsigned long *where = NULL; 647 unsigned int n = 0; 648 649 res = rop_load_page_helper(&rrp->rop, page_idx, va); 650 if (res) 651 return res; 652 653 /* Find the reloc index of the next page to tell when we're done */ 654 for (n = page_idx + 1; n < fobj->num_pages; n++) { 655 if (rrp->page_reloc_idx[n] != UINT16_MAX) { 656 end_rel = rrp->page_reloc_idx[n]; 657 break; 658 } 659 } 660 661 for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) { 662 where = (void *)((vaddr_t)va + rrp->relocs[n]); 663 *where += boot_mmu_config.load_offset; 664 } 665 666 return TEE_SUCCESS; 667 } 668 DECLARE_KEEP_PAGER(rrp_load_page); 669 670 /* 671 * Note: this variable is weak just to ease breaking its dependency chain 672 * when added to the unpaged area. 673 */ 674 const struct fobj_ops ops_ro_reloc_paged 675 __weak __relrodata_unpaged("ops_ro_reloc_paged") = { 676 .free = rrp_free, 677 .load_page = rrp_load_page, 678 .save_page = rop_save_page, /* Direct reuse */ 679 }; 680 #endif /*CFG_CORE_ASLR*/ 681 682 const struct fobj_ops ops_locked_paged; 683 684 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages) 685 { 686 struct fobj *f = NULL; 687 688 assert(num_pages); 689 690 f = calloc(1, sizeof(*f)); 691 if (!f) 692 return NULL; 693 694 fobj_init(f, &ops_locked_paged, num_pages); 695 696 return f; 697 } 698 699 static void lop_free(struct fobj *fobj) 700 { 701 assert(fobj->ops == &ops_locked_paged); 702 fobj_uninit(fobj); 703 free(fobj); 704 } 705 706 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused, 707 unsigned int page_idx __maybe_unused, 708 void *va) 709 { 710 assert(fobj->ops == &ops_locked_paged); 711 assert(refcount_val(&fobj->refc)); 712 assert(page_idx < fobj->num_pages); 713 714 memset(va, 0, SMALL_PAGE_SIZE); 715 716 return TEE_SUCCESS; 717 } 718 DECLARE_KEEP_PAGER(lop_load_page); 719 720 static TEE_Result lop_save_page(struct fobj *fobj __unused, 721 unsigned int page_idx __unused, 722 const void *va __unused) 723 { 724 return TEE_ERROR_GENERIC; 725 } 726 DECLARE_KEEP_PAGER(lop_save_page); 727 728 /* 729 * Note: this variable is weak just to ease breaking its dependency chain 730 * when added to the unpaged area. 731 */ 732 const struct fobj_ops ops_locked_paged 733 __weak __relrodata_unpaged("ops_locked_paged") = { 734 .free = lop_free, 735 .load_page = lop_load_page, 736 .save_page = lop_save_page, 737 }; 738 #endif /*CFG_WITH_PAGER*/ 739 740 #ifndef CFG_PAGED_USER_TA 741 742 struct fobj_sec_mem { 743 tee_mm_entry_t *mm; 744 struct fobj fobj; 745 }; 746 747 const struct fobj_ops ops_sec_mem; 748 749 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages) 750 { 751 struct fobj_sec_mem *f = calloc(1, sizeof(*f)); 752 size_t size = 0; 753 void *va = NULL; 754 755 if (!f) 756 return NULL; 757 758 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) 759 goto err; 760 761 f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size); 762 if (!f->mm) 763 goto err; 764 765 va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size); 766 if (!va) 767 goto err; 768 769 memset(va, 0, size); 770 f->fobj.ops = &ops_sec_mem; 771 f->fobj.num_pages = num_pages; 772 refcount_set(&f->fobj.refc, 1); 773 774 return &f->fobj; 775 err: 776 tee_mm_free(f->mm); 777 free(f); 778 779 return NULL; 780 } 781 782 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj) 783 { 784 assert(fobj->ops == &ops_sec_mem); 785 786 return container_of(fobj, struct fobj_sec_mem, fobj); 787 } 788 789 static void sec_mem_free(struct fobj *fobj) 790 { 791 struct fobj_sec_mem *f = to_sec_mem(fobj); 792 793 assert(!refcount_val(&fobj->refc)); 794 tee_mm_free(f->mm); 795 free(f); 796 } 797 798 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx) 799 { 800 struct fobj_sec_mem *f = to_sec_mem(fobj); 801 802 assert(refcount_val(&fobj->refc)); 803 assert(page_idx < fobj->num_pages); 804 805 return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE; 806 } 807 808 /* 809 * Note: this variable is weak just to ease breaking its dependency chain 810 * when added to the unpaged area. 811 */ 812 const struct fobj_ops ops_sec_mem __weak __relrodata_unpaged("ops_sec_mem") = { 813 .free = sec_mem_free, 814 .get_pa = sec_mem_get_pa, 815 }; 816 817 #endif /*PAGED_USER_TA*/ 818