1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <initcall.h> 10 #include <kernel/panic.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/tee_common.h> 13 #include <kernel/tee_misc.h> 14 #include <kernel/tlb_helpers.h> 15 #include <kernel/user_mode_ctx.h> 16 #include <kernel/virtualization.h> 17 #include <mm/core_memprot.h> 18 #include <mm/core_mmu.h> 19 #include <mm/mobj.h> 20 #include <mm/pgt_cache.h> 21 #include <mm/tee_mm.h> 22 #include <mm/tee_mmu_types.h> 23 #include <mm/tee_pager.h> 24 #include <mm/vm.h> 25 #include <sm/optee_smc.h> 26 #include <stdlib.h> 27 #include <tee_api_defines_extensions.h> 28 #include <tee_api_types.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <user_ta_header.h> 32 #include <util.h> 33 34 #ifdef CFG_PL310 35 #include <kernel/tee_l2cc_mutex.h> 36 #endif 37 38 #define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \ 39 TEE_MATTR_PRW | TEE_MATTR_URW | \ 40 TEE_MATTR_SECURE) 41 #define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \ 42 TEE_MATTR_PRW | TEE_MATTR_URWX | \ 43 TEE_MATTR_SECURE) 44 45 #define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_CACHE_CACHED << \ 46 TEE_MATTR_CACHE_SHIFT) 47 48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg, 49 const struct vm_region *next_reg, 50 const struct vm_region *reg, 51 size_t pad_begin, size_t pad_end, 52 size_t granul) 53 { 54 const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT | 55 VM_FLAG_SHAREABLE; 56 vaddr_t begin_va = 0; 57 vaddr_t end_va = 0; 58 size_t pad = 0; 59 60 /* 61 * Insert an unmapped entry to separate regions with differing 62 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE 63 * bits as they never are to be contiguous with another region. 64 */ 65 if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f)) 66 pad = SMALL_PAGE_SIZE; 67 else 68 pad = 0; 69 70 #ifndef CFG_WITH_LPAE 71 if ((prev_reg->attr & TEE_MATTR_SECURE) != 72 (reg->attr & TEE_MATTR_SECURE)) 73 granul = CORE_MMU_PGDIR_SIZE; 74 #endif 75 76 if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) || 77 ADD_OVERFLOW(begin_va, pad_begin, &begin_va) || 78 ADD_OVERFLOW(begin_va, pad, &begin_va) || 79 ROUNDUP_OVERFLOW(begin_va, granul, &begin_va)) 80 return 0; 81 82 if (reg->va) { 83 if (reg->va < begin_va) 84 return 0; 85 begin_va = reg->va; 86 } 87 88 if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f)) 89 pad = SMALL_PAGE_SIZE; 90 else 91 pad = 0; 92 93 #ifndef CFG_WITH_LPAE 94 if ((next_reg->attr & TEE_MATTR_SECURE) != 95 (reg->attr & TEE_MATTR_SECURE)) 96 granul = CORE_MMU_PGDIR_SIZE; 97 #endif 98 if (ADD_OVERFLOW(begin_va, reg->size, &end_va) || 99 ADD_OVERFLOW(end_va, pad_end, &end_va) || 100 ADD_OVERFLOW(end_va, pad, &end_va) || 101 ROUNDUP_OVERFLOW(end_va, granul, &end_va)) 102 return 0; 103 104 if (end_va <= next_reg->va) { 105 assert(!reg->va || reg->va == begin_va); 106 return begin_va; 107 } 108 109 return 0; 110 } 111 112 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin, 113 vaddr_t *end) 114 { 115 vaddr_t b; 116 vaddr_t e; 117 118 if (TAILQ_EMPTY(&uctx->vm_info.regions)) { 119 core_mmu_get_user_va_range(&b, NULL); 120 e = b; 121 } else { 122 struct vm_region *r; 123 124 b = TAILQ_FIRST(&uctx->vm_info.regions)->va; 125 r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head); 126 e = r->va + r->size; 127 b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE); 128 e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE); 129 } 130 131 if (begin) 132 *begin = b; 133 if (end) 134 *end = e; 135 return (e - b) >> CORE_MMU_PGDIR_SHIFT; 136 } 137 138 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx) 139 { 140 struct thread_specific_data *tsd __maybe_unused; 141 vaddr_t b; 142 vaddr_t e; 143 size_t ntbl; 144 145 ntbl = get_num_req_pgts(uctx, &b, &e); 146 if (!pgt_check_avail(ntbl)) { 147 EMSG("%zu page tables not available", ntbl); 148 return TEE_ERROR_OUT_OF_MEMORY; 149 } 150 151 #ifdef CFG_PAGED_USER_TA 152 tsd = thread_get_tsd(); 153 if (uctx->ts_ctx == tsd->ctx) { 154 /* 155 * The supplied utc is the current active utc, allocate the 156 * page tables too as the pager needs to use them soon. 157 */ 158 pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1); 159 } 160 #endif 161 162 return TEE_SUCCESS; 163 } 164 165 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) 166 { 167 struct thread_specific_data *tsd = thread_get_tsd(); 168 struct pgt_cache *pgt_cache = NULL; 169 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); 170 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE); 171 struct vm_region *r2 = NULL; 172 173 if (uctx->ts_ctx == tsd->ctx) 174 pgt_cache = &tsd->pgt_cache; 175 176 if (mobj_is_paged(r->mobj)) { 177 tee_pager_rem_um_region(uctx, r->va, r->size); 178 } else { 179 pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va, 180 r->va + r->size); 181 tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE, 182 uctx->vm_info.asid); 183 } 184 185 r2 = TAILQ_NEXT(r, link); 186 if (r2) 187 last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE)); 188 189 r2 = TAILQ_PREV(r, vm_region_head, link); 190 if (r2) 191 begin = MAX(begin, 192 ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE)); 193 194 /* If there's no unused page tables, there's nothing left to do */ 195 if (begin >= last) 196 return; 197 198 pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size); 199 } 200 201 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg, 202 size_t pad_begin, size_t pad_end, 203 size_t align) 204 { 205 struct vm_region dummy_first_reg = { }; 206 struct vm_region dummy_last_reg = { }; 207 struct vm_region *r = NULL; 208 struct vm_region *prev_r = NULL; 209 vaddr_t va_range_base = 0; 210 size_t va_range_size = 0; 211 size_t granul; 212 vaddr_t va = 0; 213 size_t offs_plus_size = 0; 214 215 core_mmu_get_user_va_range(&va_range_base, &va_range_size); 216 dummy_first_reg.va = va_range_base; 217 dummy_last_reg.va = va_range_base + va_range_size; 218 219 /* Check alignment, it has to be at least SMALL_PAGE based */ 220 if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK) 221 return TEE_ERROR_ACCESS_CONFLICT; 222 223 /* Check that the mobj is defined for the entire range */ 224 if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size)) 225 return TEE_ERROR_BAD_PARAMETERS; 226 if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE)) 227 return TEE_ERROR_BAD_PARAMETERS; 228 229 granul = MAX(align, SMALL_PAGE_SIZE); 230 if (!IS_POWER_OF_TWO(granul)) 231 return TEE_ERROR_BAD_PARAMETERS; 232 233 prev_r = &dummy_first_reg; 234 TAILQ_FOREACH(r, &vmi->regions, link) { 235 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end, 236 granul); 237 if (va) { 238 reg->va = va; 239 TAILQ_INSERT_BEFORE(r, reg, link); 240 return TEE_SUCCESS; 241 } 242 prev_r = r; 243 } 244 245 r = TAILQ_LAST(&vmi->regions, vm_region_head); 246 if (!r) 247 r = &dummy_first_reg; 248 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end, 249 granul); 250 if (va) { 251 reg->va = va; 252 TAILQ_INSERT_TAIL(&vmi->regions, reg, link); 253 return TEE_SUCCESS; 254 } 255 256 return TEE_ERROR_ACCESS_CONFLICT; 257 } 258 259 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, 260 uint32_t prot, uint32_t flags, struct mobj *mobj, 261 size_t offs, size_t pad_begin, size_t pad_end, 262 size_t align) 263 { 264 TEE_Result res = TEE_SUCCESS; 265 struct vm_region *reg = NULL; 266 uint32_t attr = 0; 267 268 if (prot & ~TEE_MATTR_PROT_MASK) 269 return TEE_ERROR_BAD_PARAMETERS; 270 271 reg = calloc(1, sizeof(*reg)); 272 if (!reg) 273 return TEE_ERROR_OUT_OF_MEMORY; 274 275 if (!mobj_is_paged(mobj)) { 276 uint32_t cattr; 277 278 res = mobj_get_cattr(mobj, &cattr); 279 if (res) 280 goto err_free_reg; 281 attr |= cattr << TEE_MATTR_CACHE_SHIFT; 282 } 283 attr |= TEE_MATTR_VALID_BLOCK; 284 if (mobj_is_secure(mobj)) 285 attr |= TEE_MATTR_SECURE; 286 287 reg->mobj = mobj_get(mobj); 288 reg->offset = offs; 289 reg->va = *va; 290 reg->size = ROUNDUP(len, SMALL_PAGE_SIZE); 291 reg->attr = attr | prot; 292 reg->flags = flags; 293 294 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align); 295 if (res) 296 goto err_put_mobj; 297 298 res = alloc_pgt(uctx); 299 if (res) 300 goto err_rem_reg; 301 302 if (mobj_is_paged(mobj)) { 303 struct fobj *fobj = mobj_get_fobj(mobj); 304 305 if (!fobj) { 306 res = TEE_ERROR_GENERIC; 307 goto err_rem_reg; 308 } 309 310 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); 311 fobj_put(fobj); 312 if (res) 313 goto err_rem_reg; 314 } 315 316 /* 317 * If the context currently is active set it again to update 318 * the mapping. 319 */ 320 if (thread_get_tsd()->ctx == uctx->ts_ctx) 321 vm_set_ctx(uctx->ts_ctx); 322 323 *va = reg->va; 324 325 return TEE_SUCCESS; 326 327 err_rem_reg: 328 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link); 329 err_put_mobj: 330 mobj_put(reg->mobj); 331 err_free_reg: 332 free(reg); 333 return res; 334 } 335 336 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va) 337 { 338 struct vm_region *r = NULL; 339 340 TAILQ_FOREACH(r, &vm_info->regions, link) 341 if (va >= r->va && va < r->va + r->size) 342 return r; 343 344 return NULL; 345 } 346 347 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va, 348 size_t len, 349 bool (*cmp_regs)(const struct vm_region *r0, 350 const struct vm_region *r, 351 const struct vm_region *rn)) 352 { 353 struct vm_region *r = r0; 354 vaddr_t end_va = 0; 355 356 if (ADD_OVERFLOW(va, len, &end_va)) 357 return false; 358 359 while (true) { 360 struct vm_region *r_next = TAILQ_NEXT(r, link); 361 vaddr_t r_end_va = r->va + r->size; 362 363 if (r_end_va >= end_va) 364 return true; 365 if (!r_next) 366 return false; 367 if (r_end_va != r_next->va) 368 return false; 369 if (cmp_regs && !cmp_regs(r0, r, r_next)) 370 return false; 371 r = r_next; 372 } 373 } 374 375 static TEE_Result split_vm_region(struct user_mode_ctx *uctx, 376 struct vm_region *r, vaddr_t va) 377 { 378 struct vm_region *r2 = NULL; 379 size_t diff = va - r->va; 380 381 assert(diff && diff < r->size); 382 383 r2 = calloc(1, sizeof(*r2)); 384 if (!r2) 385 return TEE_ERROR_OUT_OF_MEMORY; 386 387 if (mobj_is_paged(r->mobj)) { 388 TEE_Result res = tee_pager_split_um_region(uctx, va); 389 390 if (res) { 391 free(r2); 392 return res; 393 } 394 } 395 396 r2->mobj = mobj_get(r->mobj); 397 r2->offset = r->offset + diff; 398 r2->va = va; 399 r2->size = r->size - diff; 400 r2->attr = r->attr; 401 r2->flags = r->flags; 402 403 r->size = diff; 404 405 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); 406 407 return TEE_SUCCESS; 408 } 409 410 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, 411 size_t len, 412 bool (*cmp_regs)(const struct vm_region *r0, 413 const struct vm_region *r, 414 const struct vm_region *rn), 415 struct vm_region **r0_ret) 416 { 417 TEE_Result res = TEE_SUCCESS; 418 struct vm_region *r = NULL; 419 vaddr_t end_va = 0; 420 421 if ((va | len) & SMALL_PAGE_MASK) 422 return TEE_ERROR_BAD_PARAMETERS; 423 424 if (ADD_OVERFLOW(va, len, &end_va)) 425 return TEE_ERROR_BAD_PARAMETERS; 426 427 /* 428 * Find first vm_region in range and check that the entire range is 429 * contiguous. 430 */ 431 r = find_vm_region(&uctx->vm_info, va); 432 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs)) 433 return TEE_ERROR_BAD_PARAMETERS; 434 435 /* 436 * If needed split regions so that va and len covers only complete 437 * regions. 438 */ 439 if (va != r->va) { 440 res = split_vm_region(uctx, r, va); 441 if (res) 442 return res; 443 r = TAILQ_NEXT(r, link); 444 } 445 446 *r0_ret = r; 447 r = find_vm_region(&uctx->vm_info, va + len - 1); 448 if (!r) 449 return TEE_ERROR_BAD_PARAMETERS; 450 if (end_va != r->va + r->size) { 451 res = split_vm_region(uctx, r, end_va); 452 if (res) 453 return res; 454 } 455 456 return TEE_SUCCESS; 457 } 458 459 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 460 { 461 struct vm_region *r_next = NULL; 462 struct vm_region *r = NULL; 463 vaddr_t end_va = 0; 464 465 if (ADD_OVERFLOW(va, len, &end_va)) 466 return; 467 468 tee_pager_merge_um_region(uctx, va, len); 469 470 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { 471 r_next = TAILQ_NEXT(r, link); 472 if (!r_next) 473 return; 474 475 /* Try merging with the region just before va */ 476 if (r->va + r->size < va) 477 continue; 478 479 /* 480 * If r->va is well past our range we're done. 481 * Note that if it's just the page after our range we'll 482 * try to merge. 483 */ 484 if (r->va > end_va) 485 return; 486 487 if (r->va + r->size != r_next->va) 488 continue; 489 if (r->mobj != r_next->mobj || 490 r->flags != r_next->flags || 491 r->attr != r_next->attr) 492 continue; 493 if (r->offset + r->size != r_next->offset) 494 continue; 495 496 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link); 497 r->size += r_next->size; 498 mobj_put(r_next->mobj); 499 free(r_next); 500 r_next = r; 501 } 502 } 503 504 static bool cmp_region_for_remap(const struct vm_region *r0, 505 const struct vm_region *r, 506 const struct vm_region *rn) 507 { 508 /* 509 * All the essentionals has to match for remap to make sense. The 510 * essentials are, mobj/fobj, attr, flags and the offset should be 511 * contiguous. 512 * 513 * Note that vm_remap() depends on mobj/fobj to be the same. 514 */ 515 return r0->flags == r->flags && r0->attr == r->attr && 516 r0->mobj == r->mobj && rn->offset == r->offset + r->size; 517 } 518 519 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, 520 size_t len, size_t pad_begin, size_t pad_end) 521 { 522 struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs); 523 TEE_Result res = TEE_SUCCESS; 524 struct vm_region *r0 = NULL; 525 struct vm_region *r = NULL; 526 struct vm_region *r_next = NULL; 527 struct vm_region *r_last = NULL; 528 struct vm_region *r_first = NULL; 529 struct fobj *fobj = NULL; 530 vaddr_t next_va = 0; 531 532 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 533 534 if (!len || ((len | old_va) & SMALL_PAGE_MASK)) 535 return TEE_ERROR_BAD_PARAMETERS; 536 537 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0); 538 if (res) 539 return res; 540 541 if (mobj_is_paged(r0->mobj)) { 542 fobj = mobj_get_fobj(r0->mobj); 543 if (!fobj) 544 panic(); 545 } 546 547 for (r = r0; r; r = r_next) { 548 if (r->va + r->size > old_va + len) 549 break; 550 r_next = TAILQ_NEXT(r, link); 551 rem_um_region(uctx, r); 552 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 553 TAILQ_INSERT_TAIL(®s, r, link); 554 } 555 556 /* 557 * Synchronize change to translation tables. Even though the pager 558 * case unmaps immediately we may still free a translation table. 559 */ 560 vm_set_ctx(uctx->ts_ctx); 561 562 r_first = TAILQ_FIRST(®s); 563 while (!TAILQ_EMPTY(®s)) { 564 r = TAILQ_FIRST(®s); 565 TAILQ_REMOVE(®s, r, link); 566 if (r_last) { 567 r->va = r_last->va + r_last->size; 568 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); 569 } else { 570 r->va = *new_va; 571 res = umap_add_region(&uctx->vm_info, r, pad_begin, 572 pad_end + len - r->size, 0); 573 } 574 if (!res) 575 r_last = r; 576 if (!res) 577 res = alloc_pgt(uctx); 578 if (fobj && !res) 579 res = tee_pager_add_um_region(uctx, r->va, fobj, 580 r->attr); 581 582 if (res) { 583 /* 584 * Something went wrong move all the recently added 585 * regions back to regs for later reinsertion at 586 * the original spot. 587 */ 588 struct vm_region *r_tmp = NULL; 589 590 if (r != r_last) { 591 /* 592 * umap_add_region() failed, move r back to 593 * regs before all the rest are moved back. 594 */ 595 TAILQ_INSERT_HEAD(®s, r, link); 596 } 597 for (r = r_first; r_last && r != r_last; r = r_next) { 598 r_next = TAILQ_NEXT(r, link); 599 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 600 if (r_tmp) 601 TAILQ_INSERT_AFTER(®s, r_tmp, r, 602 link); 603 else 604 TAILQ_INSERT_HEAD(®s, r, link); 605 r_tmp = r; 606 } 607 608 goto err_restore_map; 609 } 610 } 611 612 fobj_put(fobj); 613 614 vm_set_ctx(uctx->ts_ctx); 615 *new_va = r_first->va; 616 617 return TEE_SUCCESS; 618 619 err_restore_map: 620 next_va = old_va; 621 while (!TAILQ_EMPTY(®s)) { 622 r = TAILQ_FIRST(®s); 623 TAILQ_REMOVE(®s, r, link); 624 r->va = next_va; 625 next_va += r->size; 626 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) 627 panic("Cannot restore mapping"); 628 if (alloc_pgt(uctx)) 629 panic("Cannot restore mapping"); 630 if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) 631 panic("Cannot restore mapping"); 632 } 633 fobj_put(fobj); 634 vm_set_ctx(uctx->ts_ctx); 635 636 return res; 637 } 638 639 static bool cmp_region_for_get_flags(const struct vm_region *r0, 640 const struct vm_region *r, 641 const struct vm_region *rn __unused) 642 { 643 return r0->flags == r->flags; 644 } 645 646 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 647 uint32_t *flags) 648 { 649 struct vm_region *r = NULL; 650 651 if (!len || ((len | va) & SMALL_PAGE_MASK)) 652 return TEE_ERROR_BAD_PARAMETERS; 653 654 r = find_vm_region(&uctx->vm_info, va); 655 if (!r) 656 return TEE_ERROR_BAD_PARAMETERS; 657 658 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags)) 659 return TEE_ERROR_BAD_PARAMETERS; 660 661 *flags = r->flags; 662 663 return TEE_SUCCESS; 664 } 665 666 static bool cmp_region_for_get_prot(const struct vm_region *r0, 667 const struct vm_region *r, 668 const struct vm_region *rn __unused) 669 { 670 return (r0->attr & TEE_MATTR_PROT_MASK) == 671 (r->attr & TEE_MATTR_PROT_MASK); 672 } 673 674 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 675 uint16_t *prot) 676 { 677 struct vm_region *r = NULL; 678 679 if (!len || ((len | va) & SMALL_PAGE_MASK)) 680 return TEE_ERROR_BAD_PARAMETERS; 681 682 r = find_vm_region(&uctx->vm_info, va); 683 if (!r) 684 return TEE_ERROR_BAD_PARAMETERS; 685 686 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot)) 687 return TEE_ERROR_BAD_PARAMETERS; 688 689 *prot = r->attr & TEE_MATTR_PROT_MASK; 690 691 return TEE_SUCCESS; 692 } 693 694 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 695 uint32_t prot) 696 { 697 TEE_Result res = TEE_SUCCESS; 698 struct vm_region *r0 = NULL; 699 struct vm_region *r = NULL; 700 bool was_writeable = false; 701 bool need_sync = false; 702 703 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 704 705 if (prot & ~TEE_MATTR_PROT_MASK || !len) 706 return TEE_ERROR_BAD_PARAMETERS; 707 708 res = split_vm_range(uctx, va, len, NULL, &r0); 709 if (res) 710 return res; 711 712 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 713 if (r->va + r->size > va + len) 714 break; 715 if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW)) 716 was_writeable = true; 717 718 if (!mobj_is_paged(r->mobj)) 719 need_sync = true; 720 721 r->attr &= ~TEE_MATTR_PROT_MASK; 722 r->attr |= prot; 723 } 724 725 if (need_sync) { 726 /* Synchronize changes to translation tables */ 727 vm_set_ctx(uctx->ts_ctx); 728 } 729 730 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 731 if (r->va + r->size > va + len) 732 break; 733 if (mobj_is_paged(r->mobj)) { 734 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, 735 prot)) 736 panic(); 737 } else if (was_writeable) { 738 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va, 739 r->size); 740 } 741 742 } 743 if (need_sync && was_writeable) 744 cache_op_inner(ICACHE_INVALIDATE, NULL, 0); 745 746 merge_vm_range(uctx, va, len); 747 748 return TEE_SUCCESS; 749 } 750 751 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg) 752 { 753 TAILQ_REMOVE(&vmi->regions, reg, link); 754 mobj_put(reg->mobj); 755 free(reg); 756 } 757 758 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 759 { 760 TEE_Result res = TEE_SUCCESS; 761 struct vm_region *r = NULL; 762 struct vm_region *r_next = NULL; 763 size_t end_va = 0; 764 size_t unmap_end_va = 0; 765 size_t l = 0; 766 767 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 768 769 if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l)) 770 return TEE_ERROR_BAD_PARAMETERS; 771 772 if (!l || (va & SMALL_PAGE_MASK)) 773 return TEE_ERROR_BAD_PARAMETERS; 774 775 if (ADD_OVERFLOW(va, l, &end_va)) 776 return TEE_ERROR_BAD_PARAMETERS; 777 778 res = split_vm_range(uctx, va, l, NULL, &r); 779 if (res) 780 return res; 781 782 while (true) { 783 r_next = TAILQ_NEXT(r, link); 784 unmap_end_va = r->va + r->size; 785 rem_um_region(uctx, r); 786 umap_remove_region(&uctx->vm_info, r); 787 if (!r_next || unmap_end_va == end_va) 788 break; 789 r = r_next; 790 } 791 792 return TEE_SUCCESS; 793 } 794 795 static TEE_Result map_kinit(struct user_mode_ctx *uctx) 796 { 797 TEE_Result res; 798 struct mobj *mobj; 799 size_t offs; 800 vaddr_t va; 801 size_t sz; 802 803 thread_get_user_kcode(&mobj, &offs, &va, &sz); 804 if (sz) { 805 res = vm_map(uctx, &va, sz, TEE_MATTR_PRX, VM_FLAG_PERMANENT, 806 mobj, offs); 807 if (res) 808 return res; 809 } 810 811 thread_get_user_kdata(&mobj, &offs, &va, &sz); 812 if (sz) 813 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, 814 mobj, offs); 815 816 return TEE_SUCCESS; 817 } 818 819 TEE_Result vm_info_init(struct user_mode_ctx *uctx) 820 { 821 TEE_Result res; 822 uint32_t asid = asid_alloc(); 823 824 if (!asid) { 825 DMSG("Failed to allocate ASID"); 826 return TEE_ERROR_GENERIC; 827 } 828 829 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); 830 TAILQ_INIT(&uctx->vm_info.regions); 831 uctx->vm_info.asid = asid; 832 833 res = map_kinit(uctx); 834 if (res) 835 vm_info_final(uctx); 836 return res; 837 } 838 839 void vm_clean_param(struct user_mode_ctx *uctx) 840 { 841 struct vm_region *next_r; 842 struct vm_region *r; 843 844 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { 845 if (r->flags & VM_FLAG_EPHEMERAL) { 846 rem_um_region(uctx, r); 847 umap_remove_region(&uctx->vm_info, r); 848 } 849 } 850 } 851 852 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused) 853 { 854 struct vm_region *r = NULL; 855 856 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 857 assert(!(r->flags & VM_FLAG_EPHEMERAL)); 858 } 859 860 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx, 861 struct param_mem *mem, void **user_va) 862 { 863 struct vm_region *region = NULL; 864 865 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 866 vaddr_t va = 0; 867 size_t phys_offs = 0; 868 869 if (!(region->flags & VM_FLAG_EPHEMERAL)) 870 continue; 871 if (mem->mobj != region->mobj) 872 continue; 873 874 phys_offs = mobj_get_phys_offs(mem->mobj, 875 CORE_MMU_USER_PARAM_SIZE); 876 phys_offs += mem->offs; 877 if (phys_offs < region->offset) 878 continue; 879 if (phys_offs >= (region->offset + region->size)) 880 continue; 881 va = region->va + phys_offs - region->offset; 882 *user_va = (void *)va; 883 return TEE_SUCCESS; 884 } 885 return TEE_ERROR_GENERIC; 886 } 887 888 static int cmp_param_mem(const void *a0, const void *a1) 889 { 890 const struct param_mem *m1 = a1; 891 const struct param_mem *m0 = a0; 892 int ret; 893 894 /* Make sure that invalid param_mem are placed last in the array */ 895 if (!m0->mobj && !m1->mobj) 896 return 0; 897 if (!m0->mobj) 898 return 1; 899 if (!m1->mobj) 900 return -1; 901 902 ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj)); 903 if (ret) 904 return ret; 905 906 ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj); 907 if (ret) 908 return ret; 909 910 ret = CMP_TRILEAN(m0->offs, m1->offs); 911 if (ret) 912 return ret; 913 914 return CMP_TRILEAN(m0->size, m1->size); 915 } 916 917 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, 918 void *param_va[TEE_NUM_PARAMS]) 919 { 920 TEE_Result res = TEE_SUCCESS; 921 size_t n; 922 size_t m; 923 struct param_mem mem[TEE_NUM_PARAMS]; 924 925 memset(mem, 0, sizeof(mem)); 926 for (n = 0; n < TEE_NUM_PARAMS; n++) { 927 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 928 size_t phys_offs; 929 930 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 931 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 932 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 933 continue; 934 phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj, 935 CORE_MMU_USER_PARAM_SIZE); 936 mem[n].mobj = param->u[n].mem.mobj; 937 mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs, 938 CORE_MMU_USER_PARAM_SIZE); 939 mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs - 940 mem[n].offs + param->u[n].mem.size, 941 CORE_MMU_USER_PARAM_SIZE); 942 /* 943 * For size 0 (raw pointer parameter), add minimum size 944 * value to allow address to be mapped 945 */ 946 if (!mem[n].size) 947 mem[n].size = CORE_MMU_USER_PARAM_SIZE; 948 } 949 950 /* 951 * Sort arguments so NULL mobj is last, secure mobjs first, then by 952 * mobj pointer value since those entries can't be merged either, 953 * finally by offset. 954 * 955 * This should result in a list where all mergeable entries are 956 * next to each other and unused/invalid entries are at the end. 957 */ 958 qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem); 959 960 for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) { 961 if (mem[n].mobj == mem[m].mobj && 962 (mem[n].offs == (mem[m].offs + mem[m].size) || 963 core_is_buffer_intersect(mem[m].offs, mem[m].size, 964 mem[n].offs, mem[n].size))) { 965 mem[m].size = mem[n].offs + mem[n].size - mem[m].offs; 966 continue; 967 } 968 m++; 969 if (n != m) 970 mem[m] = mem[n]; 971 } 972 /* 973 * We'd like 'm' to be the number of valid entries. Here 'm' is the 974 * index of the last valid entry if the first entry is valid, else 975 * 0. 976 */ 977 if (mem[0].mobj) 978 m++; 979 980 check_param_map_empty(uctx); 981 982 for (n = 0; n < m; n++) { 983 vaddr_t va = 0; 984 985 res = vm_map(uctx, &va, mem[n].size, 986 TEE_MATTR_PRW | TEE_MATTR_URW, 987 VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE, 988 mem[n].mobj, mem[n].offs); 989 if (res) 990 goto out; 991 } 992 993 for (n = 0; n < TEE_NUM_PARAMS; n++) { 994 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 995 996 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 997 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 998 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 999 continue; 1000 if (!param->u[n].mem.mobj) 1001 continue; 1002 1003 res = param_mem_to_user_va(uctx, ¶m->u[n].mem, 1004 param_va + n); 1005 if (res != TEE_SUCCESS) 1006 goto out; 1007 } 1008 1009 res = alloc_pgt(uctx); 1010 out: 1011 if (res) 1012 vm_clean_param(uctx); 1013 1014 return res; 1015 } 1016 1017 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, 1018 vaddr_t *va) 1019 { 1020 TEE_Result res; 1021 struct vm_region *reg = calloc(1, sizeof(*reg)); 1022 1023 if (!reg) 1024 return TEE_ERROR_OUT_OF_MEMORY; 1025 1026 reg->mobj = mobj; 1027 reg->offset = 0; 1028 reg->va = 0; 1029 reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); 1030 if (mobj_is_secure(mobj)) 1031 reg->attr = TEE_MATTR_SECURE; 1032 else 1033 reg->attr = 0; 1034 1035 res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0); 1036 if (res) { 1037 free(reg); 1038 return res; 1039 } 1040 1041 res = alloc_pgt(uctx); 1042 if (res) 1043 umap_remove_region(&uctx->vm_info, reg); 1044 else 1045 *va = reg->va; 1046 1047 return res; 1048 } 1049 1050 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va) 1051 { 1052 struct vm_region *r = NULL; 1053 1054 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1055 if (r->mobj == mobj && r->va == va) { 1056 rem_um_region(uctx, r); 1057 umap_remove_region(&uctx->vm_info, r); 1058 return; 1059 } 1060 } 1061 } 1062 1063 void vm_info_final(struct user_mode_ctx *uctx) 1064 { 1065 if (!uctx->vm_info.asid) 1066 return; 1067 1068 /* clear MMU entries to avoid clash when asid is reused */ 1069 tlbi_asid(uctx->vm_info.asid); 1070 1071 asid_free(uctx->vm_info.asid); 1072 while (!TAILQ_EMPTY(&uctx->vm_info.regions)) 1073 umap_remove_region(&uctx->vm_info, 1074 TAILQ_FIRST(&uctx->vm_info.regions)); 1075 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); 1076 } 1077 1078 /* return true only if buffer fits inside TA private memory */ 1079 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, 1080 const void *va, size_t size) 1081 { 1082 struct vm_region *r = NULL; 1083 1084 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1085 if (r->flags & VM_FLAGS_NONPRIV) 1086 continue; 1087 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) 1088 return true; 1089 } 1090 1091 return false; 1092 } 1093 1094 /* return true only if buffer intersects TA private memory */ 1095 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, 1096 const void *va, size_t size) 1097 { 1098 struct vm_region *r = NULL; 1099 1100 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1101 if (r->attr & VM_FLAGS_NONPRIV) 1102 continue; 1103 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size)) 1104 return true; 1105 } 1106 1107 return false; 1108 } 1109 1110 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, 1111 const void *va, size_t size, 1112 struct mobj **mobj, size_t *offs) 1113 { 1114 struct vm_region *r = NULL; 1115 1116 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1117 if (!r->mobj) 1118 continue; 1119 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) { 1120 size_t poffs; 1121 1122 poffs = mobj_get_phys_offs(r->mobj, 1123 CORE_MMU_USER_PARAM_SIZE); 1124 *mobj = r->mobj; 1125 *offs = (vaddr_t)va - r->va + r->offset - poffs; 1126 return TEE_SUCCESS; 1127 } 1128 } 1129 1130 return TEE_ERROR_BAD_PARAMETERS; 1131 } 1132 1133 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx, 1134 void *ua, paddr_t *pa, uint32_t *attr) 1135 { 1136 struct vm_region *region = NULL; 1137 1138 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1139 if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va, 1140 region->size)) 1141 continue; 1142 1143 if (pa) { 1144 TEE_Result res; 1145 paddr_t p; 1146 size_t offset; 1147 size_t granule; 1148 1149 /* 1150 * mobj and input user address may each include 1151 * a specific offset-in-granule position. 1152 * Drop both to get target physical page base 1153 * address then apply only user address 1154 * offset-in-granule. 1155 * Mapping lowest granule is the small page. 1156 */ 1157 granule = MAX(region->mobj->phys_granule, 1158 (size_t)SMALL_PAGE_SIZE); 1159 assert(!granule || IS_POWER_OF_TWO(granule)); 1160 1161 offset = region->offset + 1162 ROUNDDOWN((vaddr_t)ua - region->va, granule); 1163 1164 res = mobj_get_pa(region->mobj, offset, granule, &p); 1165 if (res != TEE_SUCCESS) 1166 return res; 1167 1168 *pa = p | ((vaddr_t)ua & (granule - 1)); 1169 } 1170 if (attr) 1171 *attr = region->attr; 1172 1173 return TEE_SUCCESS; 1174 } 1175 1176 return TEE_ERROR_ACCESS_DENIED; 1177 } 1178 1179 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa) 1180 { 1181 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL); 1182 } 1183 1184 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size) 1185 { 1186 paddr_t p = 0; 1187 struct vm_region *region = NULL; 1188 1189 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1190 size_t granule = 0; 1191 size_t size = 0; 1192 size_t ofs = 0; 1193 1194 /* pa2va is expected only for memory tracked through mobj */ 1195 if (!region->mobj) 1196 continue; 1197 1198 /* Physically granulated memory object must be scanned */ 1199 granule = region->mobj->phys_granule; 1200 assert(!granule || IS_POWER_OF_TWO(granule)); 1201 1202 for (ofs = region->offset; ofs < region->size; ofs += size) { 1203 1204 if (granule) { 1205 /* From current offset to buffer/granule end */ 1206 size = granule - (ofs & (granule - 1)); 1207 1208 if (size > (region->size - ofs)) 1209 size = region->size - ofs; 1210 } else { 1211 size = region->size; 1212 } 1213 1214 if (mobj_get_pa(region->mobj, ofs, granule, &p)) 1215 continue; 1216 1217 if (core_is_buffer_inside(pa, pa_size, p, size)) { 1218 /* Remove region offset (mobj phys offset) */ 1219 ofs -= region->offset; 1220 /* Get offset-in-granule */ 1221 p = pa - p; 1222 1223 return (void *)(region->va + ofs + (vaddr_t)p); 1224 } 1225 } 1226 } 1227 1228 return NULL; 1229 } 1230 1231 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, 1232 uint32_t flags, uaddr_t uaddr, size_t len) 1233 { 1234 uaddr_t a = 0; 1235 uaddr_t end_addr = 0; 1236 size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE, 1237 CORE_MMU_USER_PARAM_SIZE); 1238 1239 if (ADD_OVERFLOW(uaddr, len, &end_addr)) 1240 return TEE_ERROR_ACCESS_DENIED; 1241 1242 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1243 (flags & TEE_MEMORY_ACCESS_SECURE)) 1244 return TEE_ERROR_ACCESS_DENIED; 1245 1246 /* 1247 * Rely on TA private memory test to check if address range is private 1248 * to TA or not. 1249 */ 1250 if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) && 1251 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len)) 1252 return TEE_ERROR_ACCESS_DENIED; 1253 1254 for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) { 1255 uint32_t attr; 1256 TEE_Result res; 1257 1258 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr); 1259 if (res != TEE_SUCCESS) 1260 return res; 1261 1262 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1263 (attr & TEE_MATTR_SECURE)) 1264 return TEE_ERROR_ACCESS_DENIED; 1265 1266 if ((flags & TEE_MEMORY_ACCESS_SECURE) && 1267 !(attr & TEE_MATTR_SECURE)) 1268 return TEE_ERROR_ACCESS_DENIED; 1269 1270 if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW)) 1271 return TEE_ERROR_ACCESS_DENIED; 1272 if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR)) 1273 return TEE_ERROR_ACCESS_DENIED; 1274 } 1275 1276 return TEE_SUCCESS; 1277 } 1278 1279 void vm_set_ctx(struct ts_ctx *ctx) 1280 { 1281 struct thread_specific_data *tsd = thread_get_tsd(); 1282 1283 core_mmu_set_user_map(NULL); 1284 /* 1285 * No matter what happens below, the current user TA will not be 1286 * current any longer. Make sure pager is in sync with that. 1287 * This function has to be called before there's a chance that 1288 * pgt_free_unlocked() is called. 1289 * 1290 * Save translation tables in a cache if it's a user TA. 1291 */ 1292 pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx)); 1293 1294 if (is_user_mode_ctx(ctx)) { 1295 struct core_mmu_user_map map = { }; 1296 struct user_mode_ctx *uctx = to_user_mode_ctx(ctx); 1297 1298 core_mmu_create_user_map(uctx, &map); 1299 core_mmu_set_user_map(&map); 1300 tee_pager_assign_um_tables(uctx); 1301 } 1302 tsd->ctx = ctx; 1303 } 1304 1305