Lines Matching refs:va
75 if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) || in select_va_in_range()
81 if (reg->va) { in select_va_in_range()
82 if (reg->va < begin_va) in select_va_in_range()
84 begin_va = reg->va; in select_va_in_range()
103 if (end_va <= next_reg->va) { in select_va_in_range()
104 assert(!reg->va || reg->va == begin_va); in select_va_in_range()
136 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in rem_um_region()
137 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE); in rem_um_region()
141 tee_pager_rem_um_region(uctx, r->va, r->size); in rem_um_region()
143 pgt_clear_range(uctx, r->va, r->va + r->size); in rem_um_region()
144 tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE, in rem_um_region()
158 last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE)); in rem_um_region()
163 ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE)); in rem_um_region()
169 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va, in set_pa_range() argument
172 unsigned int end = core_mmu_va2idx(ti, va + size); in set_pa_range()
173 unsigned int idx = core_mmu_va2idx(ti, va); in set_pa_range()
185 vaddr_t va = MAX(r->va, ti->va_base); in set_reg_in_table() local
186 vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE); in set_reg_in_table()
187 size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj)); in set_reg_in_table()
192 while (va < end) { in set_reg_in_table()
193 offset = va - r->va + r->offset; in set_reg_in_table()
196 set_pa_range(ti, va, pa, sz, r->attr); in set_reg_in_table()
197 va += sz; in set_reg_in_table()
223 for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in set_um_region()
224 ti.va_base < r->va + r->size; in set_um_region()
247 vaddr_t va = 0; in umap_add_region() local
251 dummy_first_reg.va = va_range_base; in umap_add_region()
252 dummy_last_reg.va = va_range_base + va_range_size; in umap_add_region()
255 if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK) in umap_add_region()
270 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end, in umap_add_region()
272 if (va) { in umap_add_region()
273 reg->va = va; in umap_add_region()
283 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end, in umap_add_region()
285 if (va) { in umap_add_region()
286 reg->va = va; in umap_add_region()
294 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, in vm_map_pad() argument
324 reg->va = *va; in vm_map_pad()
345 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); in vm_map_pad()
360 *va = reg->va; in vm_map_pad()
373 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va) in find_vm_region() argument
378 if (va >= r->va && va < r->va + r->size) in find_vm_region()
384 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va, in va_range_is_contiguous() argument
393 if (ADD_OVERFLOW(va, len, &end_va)) in va_range_is_contiguous()
398 vaddr_t r_end_va = r->va + r->size; in va_range_is_contiguous()
404 if (r_end_va != r_next->va) in va_range_is_contiguous()
413 struct vm_region *r, vaddr_t va) in split_vm_region() argument
416 size_t diff = va - r->va; in split_vm_region()
425 TEE_Result res = tee_pager_split_um_region(uctx, va); in split_vm_region()
435 r2->va = va; in split_vm_region()
447 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, in split_vm_range() argument
458 if ((va | len) & SMALL_PAGE_MASK) in split_vm_range()
461 if (ADD_OVERFLOW(va, len, &end_va)) in split_vm_range()
468 r = find_vm_region(&uctx->vm_info, va); in split_vm_range()
469 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs)) in split_vm_range()
476 if (va != r->va) { in split_vm_range()
477 res = split_vm_region(uctx, r, va); in split_vm_range()
484 r = find_vm_region(&uctx->vm_info, va + len - 1); in split_vm_range()
487 if (end_va != r->va + r->size) { in split_vm_range()
496 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in merge_vm_range() argument
502 if (ADD_OVERFLOW(va, len, &end_va)) in merge_vm_range()
505 tee_pager_merge_um_region(uctx, va, len); in merge_vm_range()
513 if (r->va + r->size < va) in merge_vm_range()
521 if (r->va > end_va) in merge_vm_range()
524 if (r->va + r->size != r_next->va) in merge_vm_range()
585 if (r->va + r->size > old_va + len) in vm_remap()
604 r->va = r_last->va + r_last->size; in vm_remap()
607 r->va = *new_va; in vm_remap()
619 res = tee_pager_add_um_region(uctx, r->va, fobj, in vm_remap()
659 *new_va = r_first->va; in vm_remap()
668 r->va = next_va; in vm_remap()
675 if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) in vm_remap()
694 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_flags() argument
699 if (!len || ((len | va) & SMALL_PAGE_MASK)) in vm_get_flags()
702 r = find_vm_region(&uctx->vm_info, va); in vm_get_flags()
706 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags)) in vm_get_flags()
722 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_prot() argument
727 if (!len || ((len | va) & SMALL_PAGE_MASK)) in vm_get_prot()
730 r = find_vm_region(&uctx->vm_info, va); in vm_get_prot()
734 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot)) in vm_get_prot()
742 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_set_prot() argument
756 res = split_vm_range(uctx, va, len, NULL, &r0); in vm_set_prot()
761 if (r->va + r->size > va + len) in vm_set_prot()
780 tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE, in vm_set_prot()
786 if (r->va + r->size > va + len) in vm_set_prot()
789 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, in vm_set_prot()
793 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va, in vm_set_prot()
801 merge_vm_range(uctx, va, len); in vm_set_prot()
813 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in vm_unmap() argument
827 if (!l || (va & SMALL_PAGE_MASK)) in vm_unmap()
830 if (ADD_OVERFLOW(va, l, &end_va)) in vm_unmap()
833 res = split_vm_range(uctx, va, l, NULL, &r); in vm_unmap()
839 unmap_end_va = r->va + r->size; in vm_unmap()
855 vaddr_t va = 0; in map_kinit() local
859 thread_get_user_kcode(&mobj, &offs, &va, &sz); in map_kinit()
864 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT, in map_kinit()
870 thread_get_user_kdata(&mobj, &offs, &va, &sz); in map_kinit()
872 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, in map_kinit()
927 vaddr_t va = 0; in param_mem_to_user_va() local
942 va = region->va + phys_offs - region->offset; in param_mem_to_user_va()
943 *user_va = (void *)va; in param_mem_to_user_va()
1044 vaddr_t va = 0; in vm_map_param() local
1046 res = vm_map(uctx, &va, mem[n].size, in vm_map_param()
1099 const void *va, size_t size) in vm_buf_is_inside_um_private() argument
1106 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) in vm_buf_is_inside_um_private()
1115 const void *va, size_t size) in vm_buf_intersects_um_private() argument
1122 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size)) in vm_buf_intersects_um_private()
1130 const void *va, size_t size, in vm_buf_to_mboj_offs() argument
1138 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) { in vm_buf_to_mboj_offs()
1144 *offs = (vaddr_t)va - r->va + r->offset - poffs; in vm_buf_to_mboj_offs()
1158 if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va, in tee_mmu_user_va2pa_attr()
1181 ROUNDDOWN2((vaddr_t)ua - region->va, granule); in tee_mmu_user_va2pa_attr()
1242 return (void *)(region->va + ofs + (vaddr_t)p); in vm_pa2va()
1328 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, in vm_get_mobj() argument
1334 if (!len || ((*len | va) & SMALL_PAGE_MASK)) in vm_get_mobj()
1337 r = find_vm_region(&uctx->vm_info, va); in vm_get_mobj()
1341 r_offs = va - r->va; in vm_get_mobj()