1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2021, Arm Limited 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <initcall.h> 11 #include <kernel/panic.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/tee_common.h> 14 #include <kernel/tee_misc.h> 15 #include <kernel/tlb_helpers.h> 16 #include <kernel/user_mode_ctx.h> 17 #include <kernel/virtualization.h> 18 #include <mm/core_memprot.h> 19 #include <mm/core_mmu.h> 20 #include <mm/mobj.h> 21 #include <mm/pgt_cache.h> 22 #include <mm/tee_mm.h> 23 #include <mm/tee_mmu_types.h> 24 #include <mm/tee_pager.h> 25 #include <mm/vm.h> 26 #include <stdlib.h> 27 #include <tee_api_defines_extensions.h> 28 #include <tee_api_types.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <user_ta_header.h> 32 #include <util.h> 33 34 #ifdef CFG_PL310 35 #include <kernel/tee_l2cc_mutex.h> 36 #endif 37 38 #define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \ 39 TEE_MATTR_PRW | TEE_MATTR_URW | \ 40 TEE_MATTR_SECURE) 41 #define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \ 42 TEE_MATTR_PRW | TEE_MATTR_URWX | \ 43 TEE_MATTR_SECURE) 44 45 #define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_MEM_TYPE_CACHED << \ 46 TEE_MATTR_MEM_TYPE_SHIFT) 47 48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg, 49 const struct vm_region *next_reg, 50 const struct vm_region *reg, 51 size_t pad_begin, size_t pad_end, 52 size_t granul) 53 { 54 const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT | 55 VM_FLAG_SHAREABLE; 56 vaddr_t begin_va = 0; 57 vaddr_t end_va = 0; 58 size_t pad = 0; 59 60 /* 61 * Insert an unmapped entry to separate regions with differing 62 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE 63 * bits as they never are to be contiguous with another region. 64 */ 65 if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f)) 66 pad = SMALL_PAGE_SIZE; 67 else 68 pad = 0; 69 70 #ifndef CFG_WITH_LPAE 71 if ((prev_reg->attr & TEE_MATTR_SECURE) != 72 (reg->attr & TEE_MATTR_SECURE)) 73 granul = CORE_MMU_PGDIR_SIZE; 74 #endif 75 76 if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) || 77 ADD_OVERFLOW(begin_va, pad_begin, &begin_va) || 78 ADD_OVERFLOW(begin_va, pad, &begin_va) || 79 ROUNDUP_OVERFLOW(begin_va, granul, &begin_va)) 80 return 0; 81 82 if (reg->va) { 83 if (reg->va < begin_va) 84 return 0; 85 begin_va = reg->va; 86 } 87 88 if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f)) 89 pad = SMALL_PAGE_SIZE; 90 else 91 pad = 0; 92 93 #ifndef CFG_WITH_LPAE 94 if ((next_reg->attr & TEE_MATTR_SECURE) != 95 (reg->attr & TEE_MATTR_SECURE)) 96 granul = CORE_MMU_PGDIR_SIZE; 97 #endif 98 if (ADD_OVERFLOW(begin_va, reg->size, &end_va) || 99 ADD_OVERFLOW(end_va, pad_end, &end_va) || 100 ADD_OVERFLOW(end_va, pad, &end_va) || 101 ROUNDUP_OVERFLOW(end_va, granul, &end_va)) 102 return 0; 103 104 if (end_va <= next_reg->va) { 105 assert(!reg->va || reg->va == begin_va); 106 return begin_va; 107 } 108 109 return 0; 110 } 111 112 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin, 113 vaddr_t *end) 114 { 115 vaddr_t b; 116 vaddr_t e; 117 118 if (TAILQ_EMPTY(&uctx->vm_info.regions)) { 119 core_mmu_get_user_va_range(&b, NULL); 120 e = b; 121 } else { 122 struct vm_region *r; 123 124 b = TAILQ_FIRST(&uctx->vm_info.regions)->va; 125 r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head); 126 e = r->va + r->size; 127 b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE); 128 e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE); 129 } 130 131 if (begin) 132 *begin = b; 133 if (end) 134 *end = e; 135 return (e - b) >> CORE_MMU_PGDIR_SHIFT; 136 } 137 138 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx) 139 { 140 struct thread_specific_data *tsd __maybe_unused; 141 vaddr_t b; 142 vaddr_t e; 143 size_t ntbl; 144 145 ntbl = get_num_req_pgts(uctx, &b, &e); 146 if (!pgt_check_avail(ntbl)) { 147 EMSG("%zu page tables not available", ntbl); 148 return TEE_ERROR_OUT_OF_MEMORY; 149 } 150 151 #ifdef CFG_PAGED_USER_TA 152 tsd = thread_get_tsd(); 153 if (uctx->ts_ctx == tsd->ctx) { 154 /* 155 * The supplied utc is the current active utc, allocate the 156 * page tables too as the pager needs to use them soon. 157 */ 158 pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1); 159 } 160 #endif 161 162 return TEE_SUCCESS; 163 } 164 165 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) 166 { 167 struct thread_specific_data *tsd = thread_get_tsd(); 168 struct pgt_cache *pgt_cache = NULL; 169 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); 170 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE); 171 struct vm_region *r2 = NULL; 172 173 if (uctx->ts_ctx == tsd->ctx) 174 pgt_cache = &tsd->pgt_cache; 175 176 if (mobj_is_paged(r->mobj)) { 177 tee_pager_rem_um_region(uctx, r->va, r->size); 178 } else { 179 pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va, 180 r->va + r->size); 181 tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE, 182 uctx->vm_info.asid); 183 } 184 185 r2 = TAILQ_NEXT(r, link); 186 if (r2) 187 last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE)); 188 189 r2 = TAILQ_PREV(r, vm_region_head, link); 190 if (r2) 191 begin = MAX(begin, 192 ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE)); 193 194 /* If there's no unused page tables, there's nothing left to do */ 195 if (begin >= last) 196 return; 197 198 pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size); 199 } 200 201 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg, 202 size_t pad_begin, size_t pad_end, 203 size_t align) 204 { 205 struct vm_region dummy_first_reg = { }; 206 struct vm_region dummy_last_reg = { }; 207 struct vm_region *r = NULL; 208 struct vm_region *prev_r = NULL; 209 vaddr_t va_range_base = 0; 210 size_t va_range_size = 0; 211 size_t granul; 212 vaddr_t va = 0; 213 size_t offs_plus_size = 0; 214 215 core_mmu_get_user_va_range(&va_range_base, &va_range_size); 216 dummy_first_reg.va = va_range_base; 217 dummy_last_reg.va = va_range_base + va_range_size; 218 219 /* Check alignment, it has to be at least SMALL_PAGE based */ 220 if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK) 221 return TEE_ERROR_ACCESS_CONFLICT; 222 223 /* Check that the mobj is defined for the entire range */ 224 if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size)) 225 return TEE_ERROR_BAD_PARAMETERS; 226 if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE)) 227 return TEE_ERROR_BAD_PARAMETERS; 228 229 granul = MAX(align, SMALL_PAGE_SIZE); 230 if (!IS_POWER_OF_TWO(granul)) 231 return TEE_ERROR_BAD_PARAMETERS; 232 233 prev_r = &dummy_first_reg; 234 TAILQ_FOREACH(r, &vmi->regions, link) { 235 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end, 236 granul); 237 if (va) { 238 reg->va = va; 239 TAILQ_INSERT_BEFORE(r, reg, link); 240 return TEE_SUCCESS; 241 } 242 prev_r = r; 243 } 244 245 r = TAILQ_LAST(&vmi->regions, vm_region_head); 246 if (!r) 247 r = &dummy_first_reg; 248 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end, 249 granul); 250 if (va) { 251 reg->va = va; 252 TAILQ_INSERT_TAIL(&vmi->regions, reg, link); 253 return TEE_SUCCESS; 254 } 255 256 return TEE_ERROR_ACCESS_CONFLICT; 257 } 258 259 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, 260 uint32_t prot, uint32_t flags, struct mobj *mobj, 261 size_t offs, size_t pad_begin, size_t pad_end, 262 size_t align) 263 { 264 TEE_Result res = TEE_SUCCESS; 265 struct vm_region *reg = NULL; 266 uint32_t attr = 0; 267 268 if (prot & ~TEE_MATTR_PROT_MASK) 269 return TEE_ERROR_BAD_PARAMETERS; 270 271 reg = calloc(1, sizeof(*reg)); 272 if (!reg) 273 return TEE_ERROR_OUT_OF_MEMORY; 274 275 if (!mobj_is_paged(mobj)) { 276 uint32_t mem_type = 0; 277 278 res = mobj_get_mem_type(mobj, &mem_type); 279 if (res) 280 goto err_free_reg; 281 attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT; 282 } 283 attr |= TEE_MATTR_VALID_BLOCK; 284 if (mobj_is_secure(mobj)) 285 attr |= TEE_MATTR_SECURE; 286 287 reg->mobj = mobj_get(mobj); 288 reg->offset = offs; 289 reg->va = *va; 290 reg->size = ROUNDUP(len, SMALL_PAGE_SIZE); 291 reg->attr = attr | prot; 292 reg->flags = flags; 293 294 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align); 295 if (res) 296 goto err_put_mobj; 297 298 res = alloc_pgt(uctx); 299 if (res) 300 goto err_rem_reg; 301 302 if (mobj_is_paged(mobj)) { 303 struct fobj *fobj = mobj_get_fobj(mobj); 304 305 if (!fobj) { 306 res = TEE_ERROR_GENERIC; 307 goto err_rem_reg; 308 } 309 310 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); 311 fobj_put(fobj); 312 if (res) 313 goto err_rem_reg; 314 } 315 316 /* 317 * If the context currently is active set it again to update 318 * the mapping. 319 */ 320 if (thread_get_tsd()->ctx == uctx->ts_ctx) 321 vm_set_ctx(uctx->ts_ctx); 322 323 *va = reg->va; 324 325 return TEE_SUCCESS; 326 327 err_rem_reg: 328 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link); 329 err_put_mobj: 330 mobj_put(reg->mobj); 331 err_free_reg: 332 free(reg); 333 return res; 334 } 335 336 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va) 337 { 338 struct vm_region *r = NULL; 339 340 TAILQ_FOREACH(r, &vm_info->regions, link) 341 if (va >= r->va && va < r->va + r->size) 342 return r; 343 344 return NULL; 345 } 346 347 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va, 348 size_t len, 349 bool (*cmp_regs)(const struct vm_region *r0, 350 const struct vm_region *r, 351 const struct vm_region *rn)) 352 { 353 struct vm_region *r = r0; 354 vaddr_t end_va = 0; 355 356 if (ADD_OVERFLOW(va, len, &end_va)) 357 return false; 358 359 while (true) { 360 struct vm_region *r_next = TAILQ_NEXT(r, link); 361 vaddr_t r_end_va = r->va + r->size; 362 363 if (r_end_va >= end_va) 364 return true; 365 if (!r_next) 366 return false; 367 if (r_end_va != r_next->va) 368 return false; 369 if (cmp_regs && !cmp_regs(r0, r, r_next)) 370 return false; 371 r = r_next; 372 } 373 } 374 375 static TEE_Result split_vm_region(struct user_mode_ctx *uctx, 376 struct vm_region *r, vaddr_t va) 377 { 378 struct vm_region *r2 = NULL; 379 size_t diff = va - r->va; 380 381 assert(diff && diff < r->size); 382 383 r2 = calloc(1, sizeof(*r2)); 384 if (!r2) 385 return TEE_ERROR_OUT_OF_MEMORY; 386 387 if (mobj_is_paged(r->mobj)) { 388 TEE_Result res = tee_pager_split_um_region(uctx, va); 389 390 if (res) { 391 free(r2); 392 return res; 393 } 394 } 395 396 r2->mobj = mobj_get(r->mobj); 397 r2->offset = r->offset + diff; 398 r2->va = va; 399 r2->size = r->size - diff; 400 r2->attr = r->attr; 401 r2->flags = r->flags; 402 403 r->size = diff; 404 405 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); 406 407 return TEE_SUCCESS; 408 } 409 410 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, 411 size_t len, 412 bool (*cmp_regs)(const struct vm_region *r0, 413 const struct vm_region *r, 414 const struct vm_region *rn), 415 struct vm_region **r0_ret) 416 { 417 TEE_Result res = TEE_SUCCESS; 418 struct vm_region *r = NULL; 419 vaddr_t end_va = 0; 420 421 if ((va | len) & SMALL_PAGE_MASK) 422 return TEE_ERROR_BAD_PARAMETERS; 423 424 if (ADD_OVERFLOW(va, len, &end_va)) 425 return TEE_ERROR_BAD_PARAMETERS; 426 427 /* 428 * Find first vm_region in range and check that the entire range is 429 * contiguous. 430 */ 431 r = find_vm_region(&uctx->vm_info, va); 432 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs)) 433 return TEE_ERROR_BAD_PARAMETERS; 434 435 /* 436 * If needed split regions so that va and len covers only complete 437 * regions. 438 */ 439 if (va != r->va) { 440 res = split_vm_region(uctx, r, va); 441 if (res) 442 return res; 443 r = TAILQ_NEXT(r, link); 444 } 445 446 *r0_ret = r; 447 r = find_vm_region(&uctx->vm_info, va + len - 1); 448 if (!r) 449 return TEE_ERROR_BAD_PARAMETERS; 450 if (end_va != r->va + r->size) { 451 res = split_vm_region(uctx, r, end_va); 452 if (res) 453 return res; 454 } 455 456 return TEE_SUCCESS; 457 } 458 459 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 460 { 461 struct vm_region *r_next = NULL; 462 struct vm_region *r = NULL; 463 vaddr_t end_va = 0; 464 465 if (ADD_OVERFLOW(va, len, &end_va)) 466 return; 467 468 tee_pager_merge_um_region(uctx, va, len); 469 470 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { 471 r_next = TAILQ_NEXT(r, link); 472 if (!r_next) 473 return; 474 475 /* Try merging with the region just before va */ 476 if (r->va + r->size < va) 477 continue; 478 479 /* 480 * If r->va is well past our range we're done. 481 * Note that if it's just the page after our range we'll 482 * try to merge. 483 */ 484 if (r->va > end_va) 485 return; 486 487 if (r->va + r->size != r_next->va) 488 continue; 489 if (r->mobj != r_next->mobj || 490 r->flags != r_next->flags || 491 r->attr != r_next->attr) 492 continue; 493 if (r->offset + r->size != r_next->offset) 494 continue; 495 496 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link); 497 r->size += r_next->size; 498 mobj_put(r_next->mobj); 499 free(r_next); 500 r_next = r; 501 } 502 } 503 504 static bool cmp_region_for_remap(const struct vm_region *r0, 505 const struct vm_region *r, 506 const struct vm_region *rn) 507 { 508 /* 509 * All the essentionals has to match for remap to make sense. The 510 * essentials are, mobj/fobj, attr, flags and the offset should be 511 * contiguous. 512 * 513 * Note that vm_remap() depends on mobj/fobj to be the same. 514 */ 515 return r0->flags == r->flags && r0->attr == r->attr && 516 r0->mobj == r->mobj && rn->offset == r->offset + r->size; 517 } 518 519 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, 520 size_t len, size_t pad_begin, size_t pad_end) 521 { 522 struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs); 523 TEE_Result res = TEE_SUCCESS; 524 struct vm_region *r0 = NULL; 525 struct vm_region *r = NULL; 526 struct vm_region *r_next = NULL; 527 struct vm_region *r_last = NULL; 528 struct vm_region *r_first = NULL; 529 struct fobj *fobj = NULL; 530 vaddr_t next_va = 0; 531 532 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 533 534 if (!len || ((len | old_va) & SMALL_PAGE_MASK)) 535 return TEE_ERROR_BAD_PARAMETERS; 536 537 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0); 538 if (res) 539 return res; 540 541 if (mobj_is_paged(r0->mobj)) { 542 fobj = mobj_get_fobj(r0->mobj); 543 if (!fobj) 544 panic(); 545 } 546 547 for (r = r0; r; r = r_next) { 548 if (r->va + r->size > old_va + len) 549 break; 550 r_next = TAILQ_NEXT(r, link); 551 rem_um_region(uctx, r); 552 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 553 TAILQ_INSERT_TAIL(®s, r, link); 554 } 555 556 /* 557 * Synchronize change to translation tables. Even though the pager 558 * case unmaps immediately we may still free a translation table. 559 */ 560 vm_set_ctx(uctx->ts_ctx); 561 562 r_first = TAILQ_FIRST(®s); 563 while (!TAILQ_EMPTY(®s)) { 564 r = TAILQ_FIRST(®s); 565 TAILQ_REMOVE(®s, r, link); 566 if (r_last) { 567 r->va = r_last->va + r_last->size; 568 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); 569 } else { 570 r->va = *new_va; 571 res = umap_add_region(&uctx->vm_info, r, pad_begin, 572 pad_end + len - r->size, 0); 573 } 574 if (!res) 575 r_last = r; 576 if (!res) 577 res = alloc_pgt(uctx); 578 if (fobj && !res) 579 res = tee_pager_add_um_region(uctx, r->va, fobj, 580 r->attr); 581 582 if (res) { 583 /* 584 * Something went wrong move all the recently added 585 * regions back to regs for later reinsertion at 586 * the original spot. 587 */ 588 struct vm_region *r_tmp = NULL; 589 struct vm_region *r_stop = NULL; 590 591 if (r != r_last) { 592 /* 593 * umap_add_region() failed, move r back to 594 * regs before all the rest are moved back. 595 */ 596 TAILQ_INSERT_HEAD(®s, r, link); 597 } 598 if (r_last) 599 r_stop = TAILQ_NEXT(r_last, link); 600 for (r = r_first; r != r_stop; r = r_next) { 601 r_next = TAILQ_NEXT(r, link); 602 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 603 if (r_tmp) 604 TAILQ_INSERT_AFTER(®s, r_tmp, r, 605 link); 606 else 607 TAILQ_INSERT_HEAD(®s, r, link); 608 r_tmp = r; 609 } 610 611 goto err_restore_map; 612 } 613 } 614 615 fobj_put(fobj); 616 617 vm_set_ctx(uctx->ts_ctx); 618 *new_va = r_first->va; 619 620 return TEE_SUCCESS; 621 622 err_restore_map: 623 next_va = old_va; 624 while (!TAILQ_EMPTY(®s)) { 625 r = TAILQ_FIRST(®s); 626 TAILQ_REMOVE(®s, r, link); 627 r->va = next_va; 628 next_va += r->size; 629 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) 630 panic("Cannot restore mapping"); 631 if (alloc_pgt(uctx)) 632 panic("Cannot restore mapping"); 633 if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) 634 panic("Cannot restore mapping"); 635 } 636 fobj_put(fobj); 637 vm_set_ctx(uctx->ts_ctx); 638 639 return res; 640 } 641 642 static bool cmp_region_for_get_flags(const struct vm_region *r0, 643 const struct vm_region *r, 644 const struct vm_region *rn __unused) 645 { 646 return r0->flags == r->flags; 647 } 648 649 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 650 uint32_t *flags) 651 { 652 struct vm_region *r = NULL; 653 654 if (!len || ((len | va) & SMALL_PAGE_MASK)) 655 return TEE_ERROR_BAD_PARAMETERS; 656 657 r = find_vm_region(&uctx->vm_info, va); 658 if (!r) 659 return TEE_ERROR_BAD_PARAMETERS; 660 661 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags)) 662 return TEE_ERROR_BAD_PARAMETERS; 663 664 *flags = r->flags; 665 666 return TEE_SUCCESS; 667 } 668 669 static bool cmp_region_for_get_prot(const struct vm_region *r0, 670 const struct vm_region *r, 671 const struct vm_region *rn __unused) 672 { 673 return (r0->attr & TEE_MATTR_PROT_MASK) == 674 (r->attr & TEE_MATTR_PROT_MASK); 675 } 676 677 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 678 uint16_t *prot) 679 { 680 struct vm_region *r = NULL; 681 682 if (!len || ((len | va) & SMALL_PAGE_MASK)) 683 return TEE_ERROR_BAD_PARAMETERS; 684 685 r = find_vm_region(&uctx->vm_info, va); 686 if (!r) 687 return TEE_ERROR_BAD_PARAMETERS; 688 689 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot)) 690 return TEE_ERROR_BAD_PARAMETERS; 691 692 *prot = r->attr & TEE_MATTR_PROT_MASK; 693 694 return TEE_SUCCESS; 695 } 696 697 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 698 uint32_t prot) 699 { 700 TEE_Result res = TEE_SUCCESS; 701 struct vm_region *r0 = NULL; 702 struct vm_region *r = NULL; 703 bool was_writeable = false; 704 bool need_sync = false; 705 706 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 707 708 if (prot & ~TEE_MATTR_PROT_MASK || !len) 709 return TEE_ERROR_BAD_PARAMETERS; 710 711 res = split_vm_range(uctx, va, len, NULL, &r0); 712 if (res) 713 return res; 714 715 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 716 if (r->va + r->size > va + len) 717 break; 718 if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW)) 719 was_writeable = true; 720 721 if (!mobj_is_paged(r->mobj)) 722 need_sync = true; 723 724 r->attr &= ~TEE_MATTR_PROT_MASK; 725 r->attr |= prot; 726 } 727 728 if (need_sync) { 729 /* Synchronize changes to translation tables */ 730 vm_set_ctx(uctx->ts_ctx); 731 } 732 733 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 734 if (r->va + r->size > va + len) 735 break; 736 if (mobj_is_paged(r->mobj)) { 737 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, 738 prot)) 739 panic(); 740 } else if (was_writeable) { 741 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va, 742 r->size); 743 } 744 745 } 746 if (need_sync && was_writeable) 747 cache_op_inner(ICACHE_INVALIDATE, NULL, 0); 748 749 merge_vm_range(uctx, va, len); 750 751 return TEE_SUCCESS; 752 } 753 754 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg) 755 { 756 TAILQ_REMOVE(&vmi->regions, reg, link); 757 mobj_put(reg->mobj); 758 free(reg); 759 } 760 761 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 762 { 763 TEE_Result res = TEE_SUCCESS; 764 struct vm_region *r = NULL; 765 struct vm_region *r_next = NULL; 766 size_t end_va = 0; 767 size_t unmap_end_va = 0; 768 size_t l = 0; 769 770 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 771 772 if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l)) 773 return TEE_ERROR_BAD_PARAMETERS; 774 775 if (!l || (va & SMALL_PAGE_MASK)) 776 return TEE_ERROR_BAD_PARAMETERS; 777 778 if (ADD_OVERFLOW(va, l, &end_va)) 779 return TEE_ERROR_BAD_PARAMETERS; 780 781 res = split_vm_range(uctx, va, l, NULL, &r); 782 if (res) 783 return res; 784 785 while (true) { 786 r_next = TAILQ_NEXT(r, link); 787 unmap_end_va = r->va + r->size; 788 rem_um_region(uctx, r); 789 umap_remove_region(&uctx->vm_info, r); 790 if (!r_next || unmap_end_va == end_va) 791 break; 792 r = r_next; 793 } 794 795 return TEE_SUCCESS; 796 } 797 798 static TEE_Result map_kinit(struct user_mode_ctx *uctx) 799 { 800 TEE_Result res = TEE_SUCCESS; 801 struct mobj *mobj = NULL; 802 size_t offs = 0; 803 vaddr_t va = 0; 804 size_t sz = 0; 805 uint32_t prot = 0; 806 807 thread_get_user_kcode(&mobj, &offs, &va, &sz); 808 if (sz) { 809 prot = TEE_MATTR_PRX; 810 if (IS_ENABLED(CFG_CORE_BTI)) 811 prot |= TEE_MATTR_GUARDED; 812 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT, 813 mobj, offs); 814 if (res) 815 return res; 816 } 817 818 thread_get_user_kdata(&mobj, &offs, &va, &sz); 819 if (sz) 820 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, 821 mobj, offs); 822 823 return TEE_SUCCESS; 824 } 825 826 TEE_Result vm_info_init(struct user_mode_ctx *uctx) 827 { 828 TEE_Result res; 829 uint32_t asid = asid_alloc(); 830 831 if (!asid) { 832 DMSG("Failed to allocate ASID"); 833 return TEE_ERROR_GENERIC; 834 } 835 836 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); 837 TAILQ_INIT(&uctx->vm_info.regions); 838 uctx->vm_info.asid = asid; 839 840 res = map_kinit(uctx); 841 if (res) 842 vm_info_final(uctx); 843 return res; 844 } 845 846 void vm_clean_param(struct user_mode_ctx *uctx) 847 { 848 struct vm_region *next_r; 849 struct vm_region *r; 850 851 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { 852 if (r->flags & VM_FLAG_EPHEMERAL) { 853 rem_um_region(uctx, r); 854 umap_remove_region(&uctx->vm_info, r); 855 } 856 } 857 } 858 859 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused) 860 { 861 struct vm_region *r = NULL; 862 863 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 864 assert(!(r->flags & VM_FLAG_EPHEMERAL)); 865 } 866 867 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx, 868 struct param_mem *mem, void **user_va) 869 { 870 struct vm_region *region = NULL; 871 872 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 873 vaddr_t va = 0; 874 size_t phys_offs = 0; 875 876 if (!(region->flags & VM_FLAG_EPHEMERAL)) 877 continue; 878 if (mem->mobj != region->mobj) 879 continue; 880 881 phys_offs = mobj_get_phys_offs(mem->mobj, 882 CORE_MMU_USER_PARAM_SIZE); 883 phys_offs += mem->offs; 884 if (phys_offs < region->offset) 885 continue; 886 if (phys_offs >= (region->offset + region->size)) 887 continue; 888 va = region->va + phys_offs - region->offset; 889 *user_va = (void *)va; 890 return TEE_SUCCESS; 891 } 892 return TEE_ERROR_GENERIC; 893 } 894 895 static int cmp_param_mem(const void *a0, const void *a1) 896 { 897 const struct param_mem *m1 = a1; 898 const struct param_mem *m0 = a0; 899 int ret; 900 901 /* Make sure that invalid param_mem are placed last in the array */ 902 if (!m0->mobj && !m1->mobj) 903 return 0; 904 if (!m0->mobj) 905 return 1; 906 if (!m1->mobj) 907 return -1; 908 909 ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj)); 910 if (ret) 911 return ret; 912 913 ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj); 914 if (ret) 915 return ret; 916 917 ret = CMP_TRILEAN(m0->offs, m1->offs); 918 if (ret) 919 return ret; 920 921 return CMP_TRILEAN(m0->size, m1->size); 922 } 923 924 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, 925 void *param_va[TEE_NUM_PARAMS]) 926 { 927 TEE_Result res = TEE_SUCCESS; 928 size_t n; 929 size_t m; 930 struct param_mem mem[TEE_NUM_PARAMS]; 931 932 memset(mem, 0, sizeof(mem)); 933 for (n = 0; n < TEE_NUM_PARAMS; n++) { 934 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 935 size_t phys_offs; 936 937 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 938 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 939 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 940 continue; 941 phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj, 942 CORE_MMU_USER_PARAM_SIZE); 943 mem[n].mobj = param->u[n].mem.mobj; 944 mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs, 945 CORE_MMU_USER_PARAM_SIZE); 946 mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs - 947 mem[n].offs + param->u[n].mem.size, 948 CORE_MMU_USER_PARAM_SIZE); 949 /* 950 * For size 0 (raw pointer parameter), add minimum size 951 * value to allow address to be mapped 952 */ 953 if (!mem[n].size) 954 mem[n].size = CORE_MMU_USER_PARAM_SIZE; 955 } 956 957 /* 958 * Sort arguments so NULL mobj is last, secure mobjs first, then by 959 * mobj pointer value since those entries can't be merged either, 960 * finally by offset. 961 * 962 * This should result in a list where all mergeable entries are 963 * next to each other and unused/invalid entries are at the end. 964 */ 965 qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem); 966 967 for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) { 968 if (mem[n].mobj == mem[m].mobj && 969 (mem[n].offs == (mem[m].offs + mem[m].size) || 970 core_is_buffer_intersect(mem[m].offs, mem[m].size, 971 mem[n].offs, mem[n].size))) { 972 mem[m].size = mem[n].offs + mem[n].size - mem[m].offs; 973 continue; 974 } 975 m++; 976 if (n != m) 977 mem[m] = mem[n]; 978 } 979 /* 980 * We'd like 'm' to be the number of valid entries. Here 'm' is the 981 * index of the last valid entry if the first entry is valid, else 982 * 0. 983 */ 984 if (mem[0].mobj) 985 m++; 986 987 check_param_map_empty(uctx); 988 989 for (n = 0; n < m; n++) { 990 vaddr_t va = 0; 991 992 res = vm_map(uctx, &va, mem[n].size, 993 TEE_MATTR_PRW | TEE_MATTR_URW, 994 VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE, 995 mem[n].mobj, mem[n].offs); 996 if (res) 997 goto out; 998 } 999 1000 for (n = 0; n < TEE_NUM_PARAMS; n++) { 1001 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 1002 1003 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 1004 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 1005 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 1006 continue; 1007 if (!param->u[n].mem.mobj) 1008 continue; 1009 1010 res = param_mem_to_user_va(uctx, ¶m->u[n].mem, 1011 param_va + n); 1012 if (res != TEE_SUCCESS) 1013 goto out; 1014 } 1015 1016 res = alloc_pgt(uctx); 1017 out: 1018 if (res) 1019 vm_clean_param(uctx); 1020 1021 return res; 1022 } 1023 1024 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, 1025 vaddr_t *va) 1026 { 1027 TEE_Result res = TEE_SUCCESS; 1028 struct vm_region *reg = NULL; 1029 1030 if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj)) 1031 return TEE_ERROR_BAD_PARAMETERS; 1032 1033 reg = calloc(1, sizeof(*reg)); 1034 if (!reg) 1035 return TEE_ERROR_OUT_OF_MEMORY; 1036 1037 reg->mobj = mobj; 1038 reg->offset = 0; 1039 reg->va = 0; 1040 reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); 1041 reg->attr = TEE_MATTR_SECURE; 1042 1043 res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0); 1044 if (res) { 1045 free(reg); 1046 return res; 1047 } 1048 1049 res = alloc_pgt(uctx); 1050 if (res) 1051 umap_remove_region(&uctx->vm_info, reg); 1052 else 1053 *va = reg->va; 1054 1055 return res; 1056 } 1057 1058 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va) 1059 { 1060 struct vm_region *r = NULL; 1061 1062 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1063 if (r->mobj == mobj && r->va == va) { 1064 rem_um_region(uctx, r); 1065 umap_remove_region(&uctx->vm_info, r); 1066 return; 1067 } 1068 } 1069 } 1070 1071 void vm_info_final(struct user_mode_ctx *uctx) 1072 { 1073 if (!uctx->vm_info.asid) 1074 return; 1075 1076 /* clear MMU entries to avoid clash when asid is reused */ 1077 tlbi_asid(uctx->vm_info.asid); 1078 1079 asid_free(uctx->vm_info.asid); 1080 while (!TAILQ_EMPTY(&uctx->vm_info.regions)) 1081 umap_remove_region(&uctx->vm_info, 1082 TAILQ_FIRST(&uctx->vm_info.regions)); 1083 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); 1084 } 1085 1086 /* return true only if buffer fits inside TA private memory */ 1087 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, 1088 const void *va, size_t size) 1089 { 1090 struct vm_region *r = NULL; 1091 1092 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1093 if (r->flags & VM_FLAGS_NONPRIV) 1094 continue; 1095 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) 1096 return true; 1097 } 1098 1099 return false; 1100 } 1101 1102 /* return true only if buffer intersects TA private memory */ 1103 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, 1104 const void *va, size_t size) 1105 { 1106 struct vm_region *r = NULL; 1107 1108 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1109 if (r->attr & VM_FLAGS_NONPRIV) 1110 continue; 1111 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size)) 1112 return true; 1113 } 1114 1115 return false; 1116 } 1117 1118 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, 1119 const void *va, size_t size, 1120 struct mobj **mobj, size_t *offs) 1121 { 1122 struct vm_region *r = NULL; 1123 1124 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1125 if (!r->mobj) 1126 continue; 1127 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) { 1128 size_t poffs; 1129 1130 poffs = mobj_get_phys_offs(r->mobj, 1131 CORE_MMU_USER_PARAM_SIZE); 1132 *mobj = r->mobj; 1133 *offs = (vaddr_t)va - r->va + r->offset - poffs; 1134 return TEE_SUCCESS; 1135 } 1136 } 1137 1138 return TEE_ERROR_BAD_PARAMETERS; 1139 } 1140 1141 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx, 1142 void *ua, paddr_t *pa, uint32_t *attr) 1143 { 1144 struct vm_region *region = NULL; 1145 1146 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1147 if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va, 1148 region->size)) 1149 continue; 1150 1151 if (pa) { 1152 TEE_Result res; 1153 paddr_t p; 1154 size_t offset; 1155 size_t granule; 1156 1157 /* 1158 * mobj and input user address may each include 1159 * a specific offset-in-granule position. 1160 * Drop both to get target physical page base 1161 * address then apply only user address 1162 * offset-in-granule. 1163 * Mapping lowest granule is the small page. 1164 */ 1165 granule = MAX(region->mobj->phys_granule, 1166 (size_t)SMALL_PAGE_SIZE); 1167 assert(!granule || IS_POWER_OF_TWO(granule)); 1168 1169 offset = region->offset + 1170 ROUNDDOWN((vaddr_t)ua - region->va, granule); 1171 1172 res = mobj_get_pa(region->mobj, offset, granule, &p); 1173 if (res != TEE_SUCCESS) 1174 return res; 1175 1176 *pa = p | ((vaddr_t)ua & (granule - 1)); 1177 } 1178 if (attr) 1179 *attr = region->attr; 1180 1181 return TEE_SUCCESS; 1182 } 1183 1184 return TEE_ERROR_ACCESS_DENIED; 1185 } 1186 1187 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa) 1188 { 1189 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL); 1190 } 1191 1192 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size) 1193 { 1194 paddr_t p = 0; 1195 struct vm_region *region = NULL; 1196 1197 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1198 size_t granule = 0; 1199 size_t size = 0; 1200 size_t ofs = 0; 1201 1202 /* pa2va is expected only for memory tracked through mobj */ 1203 if (!region->mobj) 1204 continue; 1205 1206 /* Physically granulated memory object must be scanned */ 1207 granule = region->mobj->phys_granule; 1208 assert(!granule || IS_POWER_OF_TWO(granule)); 1209 1210 for (ofs = region->offset; ofs < region->size; ofs += size) { 1211 1212 if (granule) { 1213 /* From current offset to buffer/granule end */ 1214 size = granule - (ofs & (granule - 1)); 1215 1216 if (size > (region->size - ofs)) 1217 size = region->size - ofs; 1218 } else { 1219 size = region->size; 1220 } 1221 1222 if (mobj_get_pa(region->mobj, ofs, granule, &p)) 1223 continue; 1224 1225 if (core_is_buffer_inside(pa, pa_size, p, size)) { 1226 /* Remove region offset (mobj phys offset) */ 1227 ofs -= region->offset; 1228 /* Get offset-in-granule */ 1229 p = pa - p; 1230 1231 return (void *)(region->va + ofs + (vaddr_t)p); 1232 } 1233 } 1234 } 1235 1236 return NULL; 1237 } 1238 1239 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, 1240 uint32_t flags, uaddr_t uaddr, size_t len) 1241 { 1242 uaddr_t a = 0; 1243 uaddr_t end_addr = 0; 1244 size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE, 1245 CORE_MMU_USER_PARAM_SIZE); 1246 1247 if (ADD_OVERFLOW(uaddr, len, &end_addr)) 1248 return TEE_ERROR_ACCESS_DENIED; 1249 1250 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1251 (flags & TEE_MEMORY_ACCESS_SECURE)) 1252 return TEE_ERROR_ACCESS_DENIED; 1253 1254 /* 1255 * Rely on TA private memory test to check if address range is private 1256 * to TA or not. 1257 */ 1258 if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) && 1259 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len)) 1260 return TEE_ERROR_ACCESS_DENIED; 1261 1262 for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) { 1263 uint32_t attr; 1264 TEE_Result res; 1265 1266 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr); 1267 if (res != TEE_SUCCESS) 1268 return res; 1269 1270 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1271 (attr & TEE_MATTR_SECURE)) 1272 return TEE_ERROR_ACCESS_DENIED; 1273 1274 if ((flags & TEE_MEMORY_ACCESS_SECURE) && 1275 !(attr & TEE_MATTR_SECURE)) 1276 return TEE_ERROR_ACCESS_DENIED; 1277 1278 if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW)) 1279 return TEE_ERROR_ACCESS_DENIED; 1280 if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR)) 1281 return TEE_ERROR_ACCESS_DENIED; 1282 } 1283 1284 return TEE_SUCCESS; 1285 } 1286 1287 void vm_set_ctx(struct ts_ctx *ctx) 1288 { 1289 struct thread_specific_data *tsd = thread_get_tsd(); 1290 1291 core_mmu_set_user_map(NULL); 1292 /* 1293 * No matter what happens below, the current user TA will not be 1294 * current any longer. Make sure pager is in sync with that. 1295 * This function has to be called before there's a chance that 1296 * pgt_free_unlocked() is called. 1297 * 1298 * Save translation tables in a cache if it's a user TA. 1299 */ 1300 pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx)); 1301 1302 if (is_user_mode_ctx(ctx)) { 1303 struct core_mmu_user_map map = { }; 1304 struct user_mode_ctx *uctx = to_user_mode_ctx(ctx); 1305 1306 core_mmu_create_user_map(uctx, &map); 1307 core_mmu_set_user_map(&map); 1308 tee_pager_assign_um_tables(uctx); 1309 } 1310 tsd->ctx = ctx; 1311 } 1312 1313 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, 1314 uint16_t *prot, size_t *offs) 1315 { 1316 struct vm_region *r = NULL; 1317 size_t r_offs = 0; 1318 1319 if (!len || ((*len | va) & SMALL_PAGE_MASK)) 1320 return NULL; 1321 1322 r = find_vm_region(&uctx->vm_info, va); 1323 if (!r) 1324 return NULL; 1325 1326 r_offs = va - r->va; 1327 1328 *len = MIN(r->size - r_offs, *len); 1329 *offs = r->offset + r_offs; 1330 *prot = r->attr & TEE_MATTR_PROT_MASK; 1331 return mobj_get(r->mobj); 1332 } 1333