Lines Matching refs:uctx

111 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)  in alloc_pgt()  argument
115 if (!pgt_check_avail(uctx)) { in alloc_pgt()
122 if (uctx->ts_ctx == tsd->ctx) { in alloc_pgt()
127 pgt_get_all(uctx); in alloc_pgt()
134 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) in rem_um_region() argument
141 tee_pager_rem_um_region(uctx, r->va, r->size); in rem_um_region()
143 pgt_clear_range(uctx, r->va, r->va + r->size); in rem_um_region()
145 uctx->vm_info.asid); in rem_um_region()
166 pgt_flush_range(uctx, begin, last); in rem_um_region()
201 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r) in set_um_region() argument
203 struct pgt *p = SLIST_FIRST(&uctx->pgt_cache); in set_um_region()
226 p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx); in set_um_region()
294 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, in vm_map_pad() argument
329 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align); in vm_map_pad()
333 res = alloc_pgt(uctx); in vm_map_pad()
345 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); in vm_map_pad()
350 set_um_region(uctx, reg); in vm_map_pad()
357 if (thread_get_tsd()->ctx == uctx->ts_ctx) in vm_map_pad()
358 vm_set_ctx(uctx->ts_ctx); in vm_map_pad()
365 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link); in vm_map_pad()
412 static TEE_Result split_vm_region(struct user_mode_ctx *uctx, in split_vm_region() argument
425 TEE_Result res = tee_pager_split_um_region(uctx, va); in split_vm_region()
442 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); in split_vm_region()
447 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, in split_vm_range() argument
468 r = find_vm_region(&uctx->vm_info, va); in split_vm_range()
477 res = split_vm_region(uctx, r, va); in split_vm_range()
484 r = find_vm_region(&uctx->vm_info, va + len - 1); in split_vm_range()
488 res = split_vm_region(uctx, r, end_va); in split_vm_range()
496 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in merge_vm_range() argument
505 tee_pager_merge_um_region(uctx, va, len); in merge_vm_range()
507 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { in merge_vm_range()
533 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link); in merge_vm_range()
556 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, in vm_remap() argument
569 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_remap()
574 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0); in vm_remap()
588 rem_um_region(uctx, r); in vm_remap()
589 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
597 vm_set_ctx(uctx->ts_ctx); in vm_remap()
605 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); in vm_remap()
608 res = umap_add_region(&uctx->vm_info, r, pad_begin, in vm_remap()
613 res = alloc_pgt(uctx); in vm_remap()
617 set_um_region(uctx, r); in vm_remap()
619 res = tee_pager_add_um_region(uctx, r->va, fobj, in vm_remap()
643 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
658 vm_set_ctx(uctx->ts_ctx); in vm_remap()
670 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) in vm_remap()
672 if (alloc_pgt(uctx)) in vm_remap()
675 if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) in vm_remap()
678 set_um_region(uctx, r); in vm_remap()
682 vm_set_ctx(uctx->ts_ctx); in vm_remap()
694 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_flags() argument
702 r = find_vm_region(&uctx->vm_info, va); in vm_get_flags()
722 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_prot() argument
730 r = find_vm_region(&uctx->vm_info, va); in vm_get_prot()
742 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_set_prot() argument
751 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_set_prot()
756 res = split_vm_range(uctx, va, len, NULL, &r0); in vm_set_prot()
771 set_um_region(uctx, r); in vm_set_prot()
781 uctx->vm_info.asid); in vm_set_prot()
789 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, in vm_set_prot()
801 merge_vm_range(uctx, va, len); in vm_set_prot()
813 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in vm_unmap() argument
822 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_unmap()
833 res = split_vm_range(uctx, va, l, NULL, &r); in vm_unmap()
840 rem_um_region(uctx, r); in vm_unmap()
841 umap_remove_region(&uctx->vm_info, r); in vm_unmap()
850 static TEE_Result map_kinit(struct user_mode_ctx *uctx) in map_kinit() argument
864 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT, in map_kinit()
872 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, in map_kinit()
878 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx) in vm_info_init() argument
888 memset(uctx, 0, sizeof(*uctx)); in vm_info_init()
889 TAILQ_INIT(&uctx->vm_info.regions); in vm_info_init()
890 SLIST_INIT(&uctx->pgt_cache); in vm_info_init()
891 uctx->vm_info.asid = asid; in vm_info_init()
892 uctx->ts_ctx = ts_ctx; in vm_info_init()
894 res = map_kinit(uctx); in vm_info_init()
896 vm_info_final(uctx); in vm_info_init()
900 void vm_clean_param(struct user_mode_ctx *uctx) in vm_clean_param() argument
905 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { in vm_clean_param()
907 rem_um_region(uctx, r); in vm_clean_param()
908 umap_remove_region(&uctx->vm_info, r); in vm_clean_param()
913 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused) in check_param_map_empty()
917 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) in check_param_map_empty()
921 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx, in param_mem_to_user_va() argument
926 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in param_mem_to_user_va()
978 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, in vm_map_param() argument
1041 check_param_map_empty(uctx); in vm_map_param()
1046 res = vm_map(uctx, &va, mem[n].size, in vm_map_param()
1064 res = param_mem_to_user_va(uctx, &param->u[n].mem, in vm_map_param()
1070 res = alloc_pgt(uctx); in vm_map_param()
1073 vm_clean_param(uctx); in vm_map_param()
1078 void vm_info_final(struct user_mode_ctx *uctx) in vm_info_final() argument
1080 if (!uctx->vm_info.asid) in vm_info_final()
1083 pgt_flush(uctx); in vm_info_final()
1084 tee_pager_rem_um_regions(uctx); in vm_info_final()
1087 tlbi_asid(uctx->vm_info.asid); in vm_info_final()
1089 asid_free(uctx->vm_info.asid); in vm_info_final()
1090 uctx->vm_info.asid = 0; in vm_info_final()
1092 while (!TAILQ_EMPTY(&uctx->vm_info.regions)) in vm_info_final()
1093 umap_remove_region(&uctx->vm_info, in vm_info_final()
1094 TAILQ_FIRST(&uctx->vm_info.regions)); in vm_info_final()
1098 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, in vm_buf_is_inside_um_private() argument
1103 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_is_inside_um_private()
1114 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, in vm_buf_intersects_um_private() argument
1119 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_intersects_um_private()
1129 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, in vm_buf_to_mboj_offs() argument
1135 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_to_mboj_offs()
1152 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx, in tee_mmu_user_va2pa_attr() argument
1157 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in tee_mmu_user_va2pa_attr()
1198 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa) in vm_va2pa() argument
1200 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL); in vm_va2pa()
1203 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size) in vm_pa2va() argument
1208 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in vm_pa2va()
1250 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, in vm_check_access_rights() argument
1273 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len)) in vm_check_access_rights()
1280 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr); in vm_check_access_rights()
1304 struct user_mode_ctx *uctx = NULL; in vm_set_ctx() local
1313 uctx = to_user_mode_ctx(tsd->ctx); in vm_set_ctx()
1314 pgt_put_all(uctx); in vm_set_ctx()
1320 uctx = to_user_mode_ctx(ctx); in vm_set_ctx()
1321 core_mmu_create_user_map(uctx, &map); in vm_set_ctx()
1323 tee_pager_assign_um_tables(uctx); in vm_set_ctx()
1328 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, in vm_get_mobj() argument
1337 r = find_vm_region(&uctx->vm_info, va); in vm_get_mobj()