1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2021, Arm Limited 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <initcall.h> 11 #include <kernel/panic.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/tee_common.h> 14 #include <kernel/tee_misc.h> 15 #include <kernel/tlb_helpers.h> 16 #include <kernel/user_mode_ctx.h> 17 #include <kernel/virtualization.h> 18 #include <mm/core_memprot.h> 19 #include <mm/core_mmu.h> 20 #include <mm/mobj.h> 21 #include <mm/pgt_cache.h> 22 #include <mm/tee_mm.h> 23 #include <mm/tee_mmu_types.h> 24 #include <mm/tee_pager.h> 25 #include <mm/vm.h> 26 #include <stdlib.h> 27 #include <tee_api_defines_extensions.h> 28 #include <tee_api_types.h> 29 #include <trace.h> 30 #include <types_ext.h> 31 #include <user_ta_header.h> 32 #include <util.h> 33 34 #ifdef CFG_PL310 35 #include <kernel/tee_l2cc_mutex.h> 36 #endif 37 38 #define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \ 39 TEE_MATTR_PRW | TEE_MATTR_URW | \ 40 TEE_MATTR_SECURE) 41 #define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \ 42 TEE_MATTR_PRW | TEE_MATTR_URWX | \ 43 TEE_MATTR_SECURE) 44 45 #define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_MEM_TYPE_CACHED << \ 46 TEE_MATTR_MEM_TYPE_SHIFT) 47 48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg, 49 const struct vm_region *next_reg, 50 const struct vm_region *reg, 51 size_t pad_begin, size_t pad_end, 52 size_t granul) 53 { 54 const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT | 55 VM_FLAG_SHAREABLE; 56 vaddr_t begin_va = 0; 57 vaddr_t end_va = 0; 58 size_t pad = 0; 59 60 /* 61 * Insert an unmapped entry to separate regions with differing 62 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE 63 * bits as they never are to be contiguous with another region. 64 */ 65 if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f)) 66 pad = SMALL_PAGE_SIZE; 67 else 68 pad = 0; 69 70 #ifndef CFG_WITH_LPAE 71 if ((prev_reg->attr & TEE_MATTR_SECURE) != 72 (reg->attr & TEE_MATTR_SECURE)) 73 granul = CORE_MMU_PGDIR_SIZE; 74 #endif 75 76 if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) || 77 ADD_OVERFLOW(begin_va, pad_begin, &begin_va) || 78 ADD_OVERFLOW(begin_va, pad, &begin_va) || 79 ROUNDUP_OVERFLOW(begin_va, granul, &begin_va)) 80 return 0; 81 82 if (reg->va) { 83 if (reg->va < begin_va) 84 return 0; 85 begin_va = reg->va; 86 } 87 88 if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f)) 89 pad = SMALL_PAGE_SIZE; 90 else 91 pad = 0; 92 93 #ifndef CFG_WITH_LPAE 94 if ((next_reg->attr & TEE_MATTR_SECURE) != 95 (reg->attr & TEE_MATTR_SECURE)) 96 granul = CORE_MMU_PGDIR_SIZE; 97 #endif 98 if (ADD_OVERFLOW(begin_va, reg->size, &end_va) || 99 ADD_OVERFLOW(end_va, pad_end, &end_va) || 100 ADD_OVERFLOW(end_va, pad, &end_va) || 101 ROUNDUP_OVERFLOW(end_va, granul, &end_va)) 102 return 0; 103 104 if (end_va <= next_reg->va) { 105 assert(!reg->va || reg->va == begin_va); 106 return begin_va; 107 } 108 109 return 0; 110 } 111 112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx) 113 { 114 struct thread_specific_data *tsd __maybe_unused; 115 116 if (!pgt_check_avail(uctx)) { 117 EMSG("Page tables are not available"); 118 return TEE_ERROR_OUT_OF_MEMORY; 119 } 120 121 #ifdef CFG_PAGED_USER_TA 122 tsd = thread_get_tsd(); 123 if (uctx->ts_ctx == tsd->ctx) { 124 /* 125 * The supplied utc is the current active utc, allocate the 126 * page tables too as the pager needs to use them soon. 127 */ 128 pgt_get_all(uctx); 129 } 130 #endif 131 132 return TEE_SUCCESS; 133 } 134 135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) 136 { 137 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); 138 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE); 139 struct vm_region *r2 = NULL; 140 141 if (mobj_is_paged(r->mobj)) { 142 tee_pager_rem_um_region(uctx, r->va, r->size); 143 } else { 144 pgt_clear_range(uctx, r->va, r->va + r->size); 145 tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE, 146 uctx->vm_info.asid); 147 } 148 149 /* 150 * Figure out how much virtual memory on a CORE_MMU_PGDIR_SIZE 151 * grunalarity can be freed. Only completely unused 152 * CORE_MMU_PGDIR_SIZE ranges can be supplied to pgt_flush_range(). 153 * 154 * Note that there's is no margin for error here, both flushing too 155 * many or too few translation tables can be fatal. 156 */ 157 r2 = TAILQ_NEXT(r, link); 158 if (r2) 159 last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE)); 160 161 r2 = TAILQ_PREV(r, vm_region_head, link); 162 if (r2) 163 begin = MAX(begin, 164 ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE)); 165 166 if (begin < last) 167 pgt_flush_range(uctx, begin, last); 168 } 169 170 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va, 171 paddr_t pa, size_t size, uint32_t attr) 172 { 173 unsigned int end = core_mmu_va2idx(ti, va + size); 174 unsigned int idx = core_mmu_va2idx(ti, va); 175 176 while (idx < end) { 177 core_mmu_set_entry(ti, idx, pa, attr); 178 idx++; 179 pa += BIT64(ti->shift); 180 } 181 } 182 183 static void set_reg_in_table(struct core_mmu_table_info *ti, 184 struct vm_region *r) 185 { 186 vaddr_t va = MAX(r->va, ti->va_base); 187 vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE); 188 size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj)); 189 size_t granule = BIT(ti->shift); 190 size_t offset = 0; 191 paddr_t pa = 0; 192 193 while (va < end) { 194 offset = va - r->va + r->offset; 195 if (mobj_get_pa(r->mobj, offset, granule, &pa)) 196 panic("Failed to get PA"); 197 set_pa_range(ti, va, pa, sz, r->attr); 198 va += sz; 199 } 200 } 201 202 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r) 203 { 204 struct pgt *p = SLIST_FIRST(&uctx->pgt_cache); 205 struct core_mmu_table_info ti = { }; 206 207 assert(!mobj_is_paged(r->mobj)); 208 209 core_mmu_set_info_table(&ti, CORE_MMU_PGDIR_LEVEL, 0, NULL); 210 211 if (p) { 212 /* All the pgts are already allocated, update in place */ 213 do { 214 ti.va_base = p->vabase; 215 ti.table = p->tbl; 216 set_reg_in_table(&ti, r); 217 p = SLIST_NEXT(p, link); 218 } while (p); 219 } else { 220 /* 221 * We may have a few pgts in the cache list, update the 222 * ones found. 223 */ 224 for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); 225 ti.va_base < r->va + r->size; 226 ti.va_base += CORE_MMU_PGDIR_SIZE) { 227 p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx); 228 if (!p) 229 continue; 230 ti.table = p->tbl; 231 set_reg_in_table(&ti, r); 232 pgt_push_to_cache_list(p); 233 } 234 } 235 } 236 237 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg, 238 size_t pad_begin, size_t pad_end, 239 size_t align) 240 { 241 struct vm_region dummy_first_reg = { }; 242 struct vm_region dummy_last_reg = { }; 243 struct vm_region *r = NULL; 244 struct vm_region *prev_r = NULL; 245 vaddr_t va_range_base = 0; 246 size_t va_range_size = 0; 247 size_t granul; 248 vaddr_t va = 0; 249 size_t offs_plus_size = 0; 250 251 core_mmu_get_user_va_range(&va_range_base, &va_range_size); 252 dummy_first_reg.va = va_range_base; 253 dummy_last_reg.va = va_range_base + va_range_size; 254 255 /* Check alignment, it has to be at least SMALL_PAGE based */ 256 if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK) 257 return TEE_ERROR_ACCESS_CONFLICT; 258 259 /* Check that the mobj is defined for the entire range */ 260 if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size)) 261 return TEE_ERROR_BAD_PARAMETERS; 262 if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE)) 263 return TEE_ERROR_BAD_PARAMETERS; 264 265 granul = MAX(align, SMALL_PAGE_SIZE); 266 if (!IS_POWER_OF_TWO(granul)) 267 return TEE_ERROR_BAD_PARAMETERS; 268 269 prev_r = &dummy_first_reg; 270 TAILQ_FOREACH(r, &vmi->regions, link) { 271 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end, 272 granul); 273 if (va) { 274 reg->va = va; 275 TAILQ_INSERT_BEFORE(r, reg, link); 276 return TEE_SUCCESS; 277 } 278 prev_r = r; 279 } 280 281 r = TAILQ_LAST(&vmi->regions, vm_region_head); 282 if (!r) 283 r = &dummy_first_reg; 284 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end, 285 granul); 286 if (va) { 287 reg->va = va; 288 TAILQ_INSERT_TAIL(&vmi->regions, reg, link); 289 return TEE_SUCCESS; 290 } 291 292 return TEE_ERROR_ACCESS_CONFLICT; 293 } 294 295 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, 296 uint32_t prot, uint32_t flags, struct mobj *mobj, 297 size_t offs, size_t pad_begin, size_t pad_end, 298 size_t align) 299 { 300 TEE_Result res = TEE_SUCCESS; 301 struct vm_region *reg = NULL; 302 uint32_t attr = 0; 303 304 if (prot & ~TEE_MATTR_PROT_MASK) 305 return TEE_ERROR_BAD_PARAMETERS; 306 307 reg = calloc(1, sizeof(*reg)); 308 if (!reg) 309 return TEE_ERROR_OUT_OF_MEMORY; 310 311 if (!mobj_is_paged(mobj)) { 312 uint32_t mem_type = 0; 313 314 res = mobj_get_mem_type(mobj, &mem_type); 315 if (res) 316 goto err_free_reg; 317 attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT; 318 } 319 attr |= TEE_MATTR_VALID_BLOCK; 320 if (mobj_is_secure(mobj)) 321 attr |= TEE_MATTR_SECURE; 322 323 reg->mobj = mobj_get(mobj); 324 reg->offset = offs; 325 reg->va = *va; 326 reg->size = ROUNDUP(len, SMALL_PAGE_SIZE); 327 reg->attr = attr | prot; 328 reg->flags = flags; 329 330 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align); 331 if (res) 332 goto err_put_mobj; 333 334 res = alloc_pgt(uctx); 335 if (res) 336 goto err_rem_reg; 337 338 if (mobj_is_paged(mobj)) { 339 struct fobj *fobj = mobj_get_fobj(mobj); 340 341 if (!fobj) { 342 res = TEE_ERROR_GENERIC; 343 goto err_rem_reg; 344 } 345 346 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); 347 fobj_put(fobj); 348 if (res) 349 goto err_rem_reg; 350 } else { 351 set_um_region(uctx, reg); 352 } 353 354 /* 355 * If the context currently is active set it again to update 356 * the mapping. 357 */ 358 if (thread_get_tsd()->ctx == uctx->ts_ctx) 359 vm_set_ctx(uctx->ts_ctx); 360 361 *va = reg->va; 362 363 return TEE_SUCCESS; 364 365 err_rem_reg: 366 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link); 367 err_put_mobj: 368 mobj_put(reg->mobj); 369 err_free_reg: 370 free(reg); 371 return res; 372 } 373 374 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va) 375 { 376 struct vm_region *r = NULL; 377 378 TAILQ_FOREACH(r, &vm_info->regions, link) 379 if (va >= r->va && va < r->va + r->size) 380 return r; 381 382 return NULL; 383 } 384 385 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va, 386 size_t len, 387 bool (*cmp_regs)(const struct vm_region *r0, 388 const struct vm_region *r, 389 const struct vm_region *rn)) 390 { 391 struct vm_region *r = r0; 392 vaddr_t end_va = 0; 393 394 if (ADD_OVERFLOW(va, len, &end_va)) 395 return false; 396 397 while (true) { 398 struct vm_region *r_next = TAILQ_NEXT(r, link); 399 vaddr_t r_end_va = r->va + r->size; 400 401 if (r_end_va >= end_va) 402 return true; 403 if (!r_next) 404 return false; 405 if (r_end_va != r_next->va) 406 return false; 407 if (cmp_regs && !cmp_regs(r0, r, r_next)) 408 return false; 409 r = r_next; 410 } 411 } 412 413 static TEE_Result split_vm_region(struct user_mode_ctx *uctx, 414 struct vm_region *r, vaddr_t va) 415 { 416 struct vm_region *r2 = NULL; 417 size_t diff = va - r->va; 418 419 assert(diff && diff < r->size); 420 421 r2 = calloc(1, sizeof(*r2)); 422 if (!r2) 423 return TEE_ERROR_OUT_OF_MEMORY; 424 425 if (mobj_is_paged(r->mobj)) { 426 TEE_Result res = tee_pager_split_um_region(uctx, va); 427 428 if (res) { 429 free(r2); 430 return res; 431 } 432 } 433 434 r2->mobj = mobj_get(r->mobj); 435 r2->offset = r->offset + diff; 436 r2->va = va; 437 r2->size = r->size - diff; 438 r2->attr = r->attr; 439 r2->flags = r->flags; 440 441 r->size = diff; 442 443 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); 444 445 return TEE_SUCCESS; 446 } 447 448 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, 449 size_t len, 450 bool (*cmp_regs)(const struct vm_region *r0, 451 const struct vm_region *r, 452 const struct vm_region *rn), 453 struct vm_region **r0_ret) 454 { 455 TEE_Result res = TEE_SUCCESS; 456 struct vm_region *r = NULL; 457 vaddr_t end_va = 0; 458 459 if ((va | len) & SMALL_PAGE_MASK) 460 return TEE_ERROR_BAD_PARAMETERS; 461 462 if (ADD_OVERFLOW(va, len, &end_va)) 463 return TEE_ERROR_BAD_PARAMETERS; 464 465 /* 466 * Find first vm_region in range and check that the entire range is 467 * contiguous. 468 */ 469 r = find_vm_region(&uctx->vm_info, va); 470 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs)) 471 return TEE_ERROR_BAD_PARAMETERS; 472 473 /* 474 * If needed split regions so that va and len covers only complete 475 * regions. 476 */ 477 if (va != r->va) { 478 res = split_vm_region(uctx, r, va); 479 if (res) 480 return res; 481 r = TAILQ_NEXT(r, link); 482 } 483 484 *r0_ret = r; 485 r = find_vm_region(&uctx->vm_info, va + len - 1); 486 if (!r) 487 return TEE_ERROR_BAD_PARAMETERS; 488 if (end_va != r->va + r->size) { 489 res = split_vm_region(uctx, r, end_va); 490 if (res) 491 return res; 492 } 493 494 return TEE_SUCCESS; 495 } 496 497 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 498 { 499 struct vm_region *r_next = NULL; 500 struct vm_region *r = NULL; 501 vaddr_t end_va = 0; 502 503 if (ADD_OVERFLOW(va, len, &end_va)) 504 return; 505 506 tee_pager_merge_um_region(uctx, va, len); 507 508 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { 509 r_next = TAILQ_NEXT(r, link); 510 if (!r_next) 511 return; 512 513 /* Try merging with the region just before va */ 514 if (r->va + r->size < va) 515 continue; 516 517 /* 518 * If r->va is well past our range we're done. 519 * Note that if it's just the page after our range we'll 520 * try to merge. 521 */ 522 if (r->va > end_va) 523 return; 524 525 if (r->va + r->size != r_next->va) 526 continue; 527 if (r->mobj != r_next->mobj || 528 r->flags != r_next->flags || 529 r->attr != r_next->attr) 530 continue; 531 if (r->offset + r->size != r_next->offset) 532 continue; 533 534 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link); 535 r->size += r_next->size; 536 mobj_put(r_next->mobj); 537 free(r_next); 538 r_next = r; 539 } 540 } 541 542 static bool cmp_region_for_remap(const struct vm_region *r0, 543 const struct vm_region *r, 544 const struct vm_region *rn) 545 { 546 /* 547 * All the essentionals has to match for remap to make sense. The 548 * essentials are, mobj/fobj, attr, flags and the offset should be 549 * contiguous. 550 * 551 * Note that vm_remap() depends on mobj/fobj to be the same. 552 */ 553 return r0->flags == r->flags && r0->attr == r->attr && 554 r0->mobj == r->mobj && rn->offset == r->offset + r->size; 555 } 556 557 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, 558 size_t len, size_t pad_begin, size_t pad_end) 559 { 560 struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs); 561 TEE_Result res = TEE_SUCCESS; 562 struct vm_region *r0 = NULL; 563 struct vm_region *r = NULL; 564 struct vm_region *r_next = NULL; 565 struct vm_region *r_last = NULL; 566 struct vm_region *r_first = NULL; 567 struct fobj *fobj = NULL; 568 vaddr_t next_va = 0; 569 570 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 571 572 if (!len || ((len | old_va) & SMALL_PAGE_MASK)) 573 return TEE_ERROR_BAD_PARAMETERS; 574 575 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0); 576 if (res) 577 return res; 578 579 if (mobj_is_paged(r0->mobj)) { 580 fobj = mobj_get_fobj(r0->mobj); 581 if (!fobj) 582 panic(); 583 } 584 585 for (r = r0; r; r = r_next) { 586 if (r->va + r->size > old_va + len) 587 break; 588 r_next = TAILQ_NEXT(r, link); 589 rem_um_region(uctx, r); 590 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 591 TAILQ_INSERT_TAIL(®s, r, link); 592 } 593 594 /* 595 * Synchronize change to translation tables. Even though the pager 596 * case unmaps immediately we may still free a translation table. 597 */ 598 vm_set_ctx(uctx->ts_ctx); 599 600 r_first = TAILQ_FIRST(®s); 601 while (!TAILQ_EMPTY(®s)) { 602 r = TAILQ_FIRST(®s); 603 TAILQ_REMOVE(®s, r, link); 604 if (r_last) { 605 r->va = r_last->va + r_last->size; 606 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); 607 } else { 608 r->va = *new_va; 609 res = umap_add_region(&uctx->vm_info, r, pad_begin, 610 pad_end + len - r->size, 0); 611 } 612 if (!res) { 613 r_last = r; 614 res = alloc_pgt(uctx); 615 } 616 if (!res) { 617 if (!fobj) 618 set_um_region(uctx, r); 619 else 620 res = tee_pager_add_um_region(uctx, r->va, fobj, 621 r->attr); 622 } 623 624 if (res) { 625 /* 626 * Something went wrong move all the recently added 627 * regions back to regs for later reinsertion at 628 * the original spot. 629 */ 630 struct vm_region *r_tmp = NULL; 631 struct vm_region *r_stop = NULL; 632 633 if (r != r_last) { 634 /* 635 * umap_add_region() failed, move r back to 636 * regs before all the rest are moved back. 637 */ 638 TAILQ_INSERT_HEAD(®s, r, link); 639 } 640 if (r_last) 641 r_stop = TAILQ_NEXT(r_last, link); 642 for (r = r_first; r != r_stop; r = r_next) { 643 r_next = TAILQ_NEXT(r, link); 644 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); 645 if (r_tmp) 646 TAILQ_INSERT_AFTER(®s, r_tmp, r, 647 link); 648 else 649 TAILQ_INSERT_HEAD(®s, r, link); 650 r_tmp = r; 651 } 652 653 goto err_restore_map; 654 } 655 } 656 657 fobj_put(fobj); 658 659 vm_set_ctx(uctx->ts_ctx); 660 *new_va = r_first->va; 661 662 return TEE_SUCCESS; 663 664 err_restore_map: 665 next_va = old_va; 666 while (!TAILQ_EMPTY(®s)) { 667 r = TAILQ_FIRST(®s); 668 TAILQ_REMOVE(®s, r, link); 669 r->va = next_va; 670 next_va += r->size; 671 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) 672 panic("Cannot restore mapping"); 673 if (alloc_pgt(uctx)) 674 panic("Cannot restore mapping"); 675 if (fobj) { 676 if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) 677 panic("Cannot restore mapping"); 678 } else { 679 set_um_region(uctx, r); 680 } 681 } 682 fobj_put(fobj); 683 vm_set_ctx(uctx->ts_ctx); 684 685 return res; 686 } 687 688 static bool cmp_region_for_get_flags(const struct vm_region *r0, 689 const struct vm_region *r, 690 const struct vm_region *rn __unused) 691 { 692 return r0->flags == r->flags; 693 } 694 695 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 696 uint32_t *flags) 697 { 698 struct vm_region *r = NULL; 699 700 if (!len || ((len | va) & SMALL_PAGE_MASK)) 701 return TEE_ERROR_BAD_PARAMETERS; 702 703 r = find_vm_region(&uctx->vm_info, va); 704 if (!r) 705 return TEE_ERROR_BAD_PARAMETERS; 706 707 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags)) 708 return TEE_ERROR_BAD_PARAMETERS; 709 710 *flags = r->flags; 711 712 return TEE_SUCCESS; 713 } 714 715 static bool cmp_region_for_get_prot(const struct vm_region *r0, 716 const struct vm_region *r, 717 const struct vm_region *rn __unused) 718 { 719 return (r0->attr & TEE_MATTR_PROT_MASK) == 720 (r->attr & TEE_MATTR_PROT_MASK); 721 } 722 723 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 724 uint16_t *prot) 725 { 726 struct vm_region *r = NULL; 727 728 if (!len || ((len | va) & SMALL_PAGE_MASK)) 729 return TEE_ERROR_BAD_PARAMETERS; 730 731 r = find_vm_region(&uctx->vm_info, va); 732 if (!r) 733 return TEE_ERROR_BAD_PARAMETERS; 734 735 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot)) 736 return TEE_ERROR_BAD_PARAMETERS; 737 738 *prot = r->attr & TEE_MATTR_PROT_MASK; 739 740 return TEE_SUCCESS; 741 } 742 743 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, 744 uint32_t prot) 745 { 746 TEE_Result res = TEE_SUCCESS; 747 struct vm_region *r0 = NULL; 748 struct vm_region *r = NULL; 749 bool was_writeable = false; 750 bool need_sync = false; 751 752 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 753 754 if (prot & ~TEE_MATTR_PROT_MASK || !len) 755 return TEE_ERROR_BAD_PARAMETERS; 756 757 res = split_vm_range(uctx, va, len, NULL, &r0); 758 if (res) 759 return res; 760 761 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 762 if (r->va + r->size > va + len) 763 break; 764 if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW)) 765 was_writeable = true; 766 767 r->attr &= ~TEE_MATTR_PROT_MASK; 768 r->attr |= prot; 769 770 if (!mobj_is_paged(r->mobj)) { 771 need_sync = true; 772 set_um_region(uctx, r); 773 /* 774 * Normally when set_um_region() is called we 775 * change from no mapping to some mapping, but in 776 * this case we change the permissions on an 777 * already present mapping so some TLB invalidation 778 * is needed. We also depend on the dsb() performed 779 * as part of the TLB invalidation. 780 */ 781 tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE, 782 uctx->vm_info.asid); 783 } 784 } 785 786 for (r = r0; r; r = TAILQ_NEXT(r, link)) { 787 if (r->va + r->size > va + len) 788 break; 789 if (mobj_is_paged(r->mobj)) { 790 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, 791 prot)) 792 panic(); 793 } else if (was_writeable) { 794 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va, 795 r->size); 796 } 797 798 } 799 if (need_sync && was_writeable) 800 cache_op_inner(ICACHE_INVALIDATE, NULL, 0); 801 802 merge_vm_range(uctx, va, len); 803 804 return TEE_SUCCESS; 805 } 806 807 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg) 808 { 809 TAILQ_REMOVE(&vmi->regions, reg, link); 810 mobj_put(reg->mobj); 811 free(reg); 812 } 813 814 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) 815 { 816 TEE_Result res = TEE_SUCCESS; 817 struct vm_region *r = NULL; 818 struct vm_region *r_next = NULL; 819 size_t end_va = 0; 820 size_t unmap_end_va = 0; 821 size_t l = 0; 822 823 assert(thread_get_tsd()->ctx == uctx->ts_ctx); 824 825 if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l)) 826 return TEE_ERROR_BAD_PARAMETERS; 827 828 if (!l || (va & SMALL_PAGE_MASK)) 829 return TEE_ERROR_BAD_PARAMETERS; 830 831 if (ADD_OVERFLOW(va, l, &end_va)) 832 return TEE_ERROR_BAD_PARAMETERS; 833 834 res = split_vm_range(uctx, va, l, NULL, &r); 835 if (res) 836 return res; 837 838 while (true) { 839 r_next = TAILQ_NEXT(r, link); 840 unmap_end_va = r->va + r->size; 841 rem_um_region(uctx, r); 842 umap_remove_region(&uctx->vm_info, r); 843 if (!r_next || unmap_end_va == end_va) 844 break; 845 r = r_next; 846 } 847 848 return TEE_SUCCESS; 849 } 850 851 static TEE_Result map_kinit(struct user_mode_ctx *uctx) 852 { 853 TEE_Result res = TEE_SUCCESS; 854 struct mobj *mobj = NULL; 855 size_t offs = 0; 856 vaddr_t va = 0; 857 size_t sz = 0; 858 uint32_t prot = 0; 859 860 thread_get_user_kcode(&mobj, &offs, &va, &sz); 861 if (sz) { 862 prot = TEE_MATTR_PRX; 863 if (IS_ENABLED(CFG_CORE_BTI)) 864 prot |= TEE_MATTR_GUARDED; 865 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT, 866 mobj, offs); 867 if (res) 868 return res; 869 } 870 871 thread_get_user_kdata(&mobj, &offs, &va, &sz); 872 if (sz) 873 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, 874 mobj, offs); 875 876 return TEE_SUCCESS; 877 } 878 879 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx) 880 { 881 TEE_Result res; 882 uint32_t asid = asid_alloc(); 883 884 if (!asid) { 885 DMSG("Failed to allocate ASID"); 886 return TEE_ERROR_GENERIC; 887 } 888 889 memset(uctx, 0, sizeof(*uctx)); 890 TAILQ_INIT(&uctx->vm_info.regions); 891 SLIST_INIT(&uctx->pgt_cache); 892 uctx->vm_info.asid = asid; 893 uctx->ts_ctx = ts_ctx; 894 895 res = map_kinit(uctx); 896 if (res) 897 vm_info_final(uctx); 898 return res; 899 } 900 901 void vm_clean_param(struct user_mode_ctx *uctx) 902 { 903 struct vm_region *next_r; 904 struct vm_region *r; 905 906 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { 907 if (r->flags & VM_FLAG_EPHEMERAL) { 908 rem_um_region(uctx, r); 909 umap_remove_region(&uctx->vm_info, r); 910 } 911 } 912 } 913 914 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused) 915 { 916 struct vm_region *r = NULL; 917 918 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 919 assert(!(r->flags & VM_FLAG_EPHEMERAL)); 920 } 921 922 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx, 923 struct param_mem *mem, void **user_va) 924 { 925 struct vm_region *region = NULL; 926 927 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 928 vaddr_t va = 0; 929 size_t phys_offs = 0; 930 931 if (!(region->flags & VM_FLAG_EPHEMERAL)) 932 continue; 933 if (mem->mobj != region->mobj) 934 continue; 935 936 phys_offs = mobj_get_phys_offs(mem->mobj, 937 CORE_MMU_USER_PARAM_SIZE); 938 phys_offs += mem->offs; 939 if (phys_offs < region->offset) 940 continue; 941 if (phys_offs >= (region->offset + region->size)) 942 continue; 943 va = region->va + phys_offs - region->offset; 944 *user_va = (void *)va; 945 return TEE_SUCCESS; 946 } 947 return TEE_ERROR_GENERIC; 948 } 949 950 static int cmp_param_mem(const void *a0, const void *a1) 951 { 952 const struct param_mem *m1 = a1; 953 const struct param_mem *m0 = a0; 954 int ret; 955 956 /* Make sure that invalid param_mem are placed last in the array */ 957 if (!m0->mobj && !m1->mobj) 958 return 0; 959 if (!m0->mobj) 960 return 1; 961 if (!m1->mobj) 962 return -1; 963 964 ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj)); 965 if (ret) 966 return ret; 967 968 ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj); 969 if (ret) 970 return ret; 971 972 ret = CMP_TRILEAN(m0->offs, m1->offs); 973 if (ret) 974 return ret; 975 976 return CMP_TRILEAN(m0->size, m1->size); 977 } 978 979 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, 980 void *param_va[TEE_NUM_PARAMS]) 981 { 982 TEE_Result res = TEE_SUCCESS; 983 size_t n; 984 size_t m; 985 struct param_mem mem[TEE_NUM_PARAMS]; 986 987 memset(mem, 0, sizeof(mem)); 988 for (n = 0; n < TEE_NUM_PARAMS; n++) { 989 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 990 size_t phys_offs; 991 992 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 993 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 994 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 995 continue; 996 phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj, 997 CORE_MMU_USER_PARAM_SIZE); 998 mem[n].mobj = param->u[n].mem.mobj; 999 mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs, 1000 CORE_MMU_USER_PARAM_SIZE); 1001 mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs - 1002 mem[n].offs + param->u[n].mem.size, 1003 CORE_MMU_USER_PARAM_SIZE); 1004 /* 1005 * For size 0 (raw pointer parameter), add minimum size 1006 * value to allow address to be mapped 1007 */ 1008 if (!mem[n].size) 1009 mem[n].size = CORE_MMU_USER_PARAM_SIZE; 1010 } 1011 1012 /* 1013 * Sort arguments so NULL mobj is last, secure mobjs first, then by 1014 * mobj pointer value since those entries can't be merged either, 1015 * finally by offset. 1016 * 1017 * This should result in a list where all mergeable entries are 1018 * next to each other and unused/invalid entries are at the end. 1019 */ 1020 qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem); 1021 1022 for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) { 1023 if (mem[n].mobj == mem[m].mobj && 1024 (mem[n].offs == (mem[m].offs + mem[m].size) || 1025 core_is_buffer_intersect(mem[m].offs, mem[m].size, 1026 mem[n].offs, mem[n].size))) { 1027 mem[m].size = mem[n].offs + mem[n].size - mem[m].offs; 1028 continue; 1029 } 1030 m++; 1031 if (n != m) 1032 mem[m] = mem[n]; 1033 } 1034 /* 1035 * We'd like 'm' to be the number of valid entries. Here 'm' is the 1036 * index of the last valid entry if the first entry is valid, else 1037 * 0. 1038 */ 1039 if (mem[0].mobj) 1040 m++; 1041 1042 check_param_map_empty(uctx); 1043 1044 for (n = 0; n < m; n++) { 1045 vaddr_t va = 0; 1046 1047 res = vm_map(uctx, &va, mem[n].size, 1048 TEE_MATTR_PRW | TEE_MATTR_URW, 1049 VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE, 1050 mem[n].mobj, mem[n].offs); 1051 if (res) 1052 goto out; 1053 } 1054 1055 for (n = 0; n < TEE_NUM_PARAMS; n++) { 1056 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); 1057 1058 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && 1059 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && 1060 param_type != TEE_PARAM_TYPE_MEMREF_INOUT) 1061 continue; 1062 if (!param->u[n].mem.mobj) 1063 continue; 1064 1065 res = param_mem_to_user_va(uctx, ¶m->u[n].mem, 1066 param_va + n); 1067 if (res != TEE_SUCCESS) 1068 goto out; 1069 } 1070 1071 res = alloc_pgt(uctx); 1072 out: 1073 if (res) 1074 vm_clean_param(uctx); 1075 1076 return res; 1077 } 1078 1079 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, 1080 vaddr_t *va) 1081 { 1082 TEE_Result res = TEE_SUCCESS; 1083 struct vm_region *reg = NULL; 1084 1085 if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj)) 1086 return TEE_ERROR_BAD_PARAMETERS; 1087 1088 reg = calloc(1, sizeof(*reg)); 1089 if (!reg) 1090 return TEE_ERROR_OUT_OF_MEMORY; 1091 1092 reg->mobj = mobj; 1093 reg->offset = 0; 1094 reg->va = 0; 1095 reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); 1096 reg->attr = TEE_MATTR_SECURE; 1097 1098 res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0); 1099 if (res) { 1100 free(reg); 1101 return res; 1102 } 1103 1104 res = alloc_pgt(uctx); 1105 if (res) 1106 umap_remove_region(&uctx->vm_info, reg); 1107 else 1108 *va = reg->va; 1109 1110 return res; 1111 } 1112 1113 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va) 1114 { 1115 struct vm_region *r = NULL; 1116 1117 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1118 if (r->mobj == mobj && r->va == va) { 1119 rem_um_region(uctx, r); 1120 umap_remove_region(&uctx->vm_info, r); 1121 return; 1122 } 1123 } 1124 } 1125 1126 void vm_info_final(struct user_mode_ctx *uctx) 1127 { 1128 if (!uctx->vm_info.asid) 1129 return; 1130 1131 pgt_flush(uctx); 1132 tee_pager_rem_um_regions(uctx); 1133 1134 /* clear MMU entries to avoid clash when asid is reused */ 1135 tlbi_asid(uctx->vm_info.asid); 1136 1137 asid_free(uctx->vm_info.asid); 1138 uctx->vm_info.asid = 0; 1139 1140 while (!TAILQ_EMPTY(&uctx->vm_info.regions)) 1141 umap_remove_region(&uctx->vm_info, 1142 TAILQ_FIRST(&uctx->vm_info.regions)); 1143 } 1144 1145 /* return true only if buffer fits inside TA private memory */ 1146 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, 1147 const void *va, size_t size) 1148 { 1149 struct vm_region *r = NULL; 1150 1151 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1152 if (r->flags & VM_FLAGS_NONPRIV) 1153 continue; 1154 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) 1155 return true; 1156 } 1157 1158 return false; 1159 } 1160 1161 /* return true only if buffer intersects TA private memory */ 1162 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, 1163 const void *va, size_t size) 1164 { 1165 struct vm_region *r = NULL; 1166 1167 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1168 if (r->attr & VM_FLAGS_NONPRIV) 1169 continue; 1170 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size)) 1171 return true; 1172 } 1173 1174 return false; 1175 } 1176 1177 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, 1178 const void *va, size_t size, 1179 struct mobj **mobj, size_t *offs) 1180 { 1181 struct vm_region *r = NULL; 1182 1183 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { 1184 if (!r->mobj) 1185 continue; 1186 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) { 1187 size_t poffs; 1188 1189 poffs = mobj_get_phys_offs(r->mobj, 1190 CORE_MMU_USER_PARAM_SIZE); 1191 *mobj = r->mobj; 1192 *offs = (vaddr_t)va - r->va + r->offset - poffs; 1193 return TEE_SUCCESS; 1194 } 1195 } 1196 1197 return TEE_ERROR_BAD_PARAMETERS; 1198 } 1199 1200 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx, 1201 void *ua, paddr_t *pa, uint32_t *attr) 1202 { 1203 struct vm_region *region = NULL; 1204 1205 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1206 if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va, 1207 region->size)) 1208 continue; 1209 1210 if (pa) { 1211 TEE_Result res; 1212 paddr_t p; 1213 size_t offset; 1214 size_t granule; 1215 1216 /* 1217 * mobj and input user address may each include 1218 * a specific offset-in-granule position. 1219 * Drop both to get target physical page base 1220 * address then apply only user address 1221 * offset-in-granule. 1222 * Mapping lowest granule is the small page. 1223 */ 1224 granule = MAX(region->mobj->phys_granule, 1225 (size_t)SMALL_PAGE_SIZE); 1226 assert(!granule || IS_POWER_OF_TWO(granule)); 1227 1228 offset = region->offset + 1229 ROUNDDOWN((vaddr_t)ua - region->va, granule); 1230 1231 res = mobj_get_pa(region->mobj, offset, granule, &p); 1232 if (res != TEE_SUCCESS) 1233 return res; 1234 1235 *pa = p | ((vaddr_t)ua & (granule - 1)); 1236 } 1237 if (attr) 1238 *attr = region->attr; 1239 1240 return TEE_SUCCESS; 1241 } 1242 1243 return TEE_ERROR_ACCESS_DENIED; 1244 } 1245 1246 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa) 1247 { 1248 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL); 1249 } 1250 1251 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size) 1252 { 1253 paddr_t p = 0; 1254 struct vm_region *region = NULL; 1255 1256 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 1257 size_t granule = 0; 1258 size_t size = 0; 1259 size_t ofs = 0; 1260 1261 /* pa2va is expected only for memory tracked through mobj */ 1262 if (!region->mobj) 1263 continue; 1264 1265 /* Physically granulated memory object must be scanned */ 1266 granule = region->mobj->phys_granule; 1267 assert(!granule || IS_POWER_OF_TWO(granule)); 1268 1269 for (ofs = region->offset; ofs < region->size; ofs += size) { 1270 1271 if (granule) { 1272 /* From current offset to buffer/granule end */ 1273 size = granule - (ofs & (granule - 1)); 1274 1275 if (size > (region->size - ofs)) 1276 size = region->size - ofs; 1277 } else { 1278 size = region->size; 1279 } 1280 1281 if (mobj_get_pa(region->mobj, ofs, granule, &p)) 1282 continue; 1283 1284 if (core_is_buffer_inside(pa, pa_size, p, size)) { 1285 /* Remove region offset (mobj phys offset) */ 1286 ofs -= region->offset; 1287 /* Get offset-in-granule */ 1288 p = pa - p; 1289 1290 return (void *)(region->va + ofs + (vaddr_t)p); 1291 } 1292 } 1293 } 1294 1295 return NULL; 1296 } 1297 1298 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, 1299 uint32_t flags, uaddr_t uaddr, size_t len) 1300 { 1301 uaddr_t a = 0; 1302 uaddr_t end_addr = 0; 1303 size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE, 1304 CORE_MMU_USER_PARAM_SIZE); 1305 1306 if (ADD_OVERFLOW(uaddr, len, &end_addr)) 1307 return TEE_ERROR_ACCESS_DENIED; 1308 1309 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1310 (flags & TEE_MEMORY_ACCESS_SECURE)) 1311 return TEE_ERROR_ACCESS_DENIED; 1312 1313 /* 1314 * Rely on TA private memory test to check if address range is private 1315 * to TA or not. 1316 */ 1317 if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) && 1318 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len)) 1319 return TEE_ERROR_ACCESS_DENIED; 1320 1321 for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) { 1322 uint32_t attr; 1323 TEE_Result res; 1324 1325 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr); 1326 if (res != TEE_SUCCESS) 1327 return res; 1328 1329 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && 1330 (attr & TEE_MATTR_SECURE)) 1331 return TEE_ERROR_ACCESS_DENIED; 1332 1333 if ((flags & TEE_MEMORY_ACCESS_SECURE) && 1334 !(attr & TEE_MATTR_SECURE)) 1335 return TEE_ERROR_ACCESS_DENIED; 1336 1337 if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW)) 1338 return TEE_ERROR_ACCESS_DENIED; 1339 if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR)) 1340 return TEE_ERROR_ACCESS_DENIED; 1341 } 1342 1343 return TEE_SUCCESS; 1344 } 1345 1346 void vm_set_ctx(struct ts_ctx *ctx) 1347 { 1348 struct thread_specific_data *tsd = thread_get_tsd(); 1349 struct user_mode_ctx *uctx = NULL; 1350 1351 core_mmu_set_user_map(NULL); 1352 1353 if (is_user_mode_ctx(tsd->ctx)) { 1354 /* 1355 * We're coming from a user mode context so we must make 1356 * the pgts available for reuse. 1357 */ 1358 uctx = to_user_mode_ctx(tsd->ctx); 1359 pgt_put_all(uctx); 1360 } 1361 1362 if (is_user_mode_ctx(ctx)) { 1363 struct core_mmu_user_map map = { }; 1364 1365 uctx = to_user_mode_ctx(ctx); 1366 core_mmu_create_user_map(uctx, &map); 1367 core_mmu_set_user_map(&map); 1368 tee_pager_assign_um_tables(uctx); 1369 } 1370 tsd->ctx = ctx; 1371 } 1372 1373 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, 1374 uint16_t *prot, size_t *offs) 1375 { 1376 struct vm_region *r = NULL; 1377 size_t r_offs = 0; 1378 1379 if (!len || ((*len | va) & SMALL_PAGE_MASK)) 1380 return NULL; 1381 1382 r = find_vm_region(&uctx->vm_info, va); 1383 if (!r) 1384 return NULL; 1385 1386 r_offs = va - r->va; 1387 1388 *len = MIN(r->size - r_offs, *len); 1389 *offs = r->offset + r_offs; 1390 *prot = r->attr & TEE_MATTR_PROT_MASK; 1391 return mobj_get(r->mobj); 1392 } 1393