Lines Matching refs:vma
227 struct vm_area_struct *vma, in userfaultfd_huge_must_wait() argument
238 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait()
259 struct vm_area_struct *vma, in userfaultfd_huge_must_wait() argument
369 struct mm_struct *mm = vmf->vma->vm_mm; in handle_userfault()
396 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; in handle_userfault()
505 if (!is_vm_hugetlb_page(vmf->vma)) in handle_userfault()
509 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, in handle_userfault()
607 struct vm_area_struct *vma; in userfaultfd_event_wait_completion() local
612 for (vma = mm->mmap; vma; vma = vma->vm_next) in userfaultfd_event_wait_completion()
613 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { in userfaultfd_event_wait_completion()
614 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in userfaultfd_event_wait_completion()
615 vma->vm_flags &= ~__VM_UFFD_FLAGS; in userfaultfd_event_wait_completion()
639 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) in dup_userfaultfd() argument
644 octx = vma->vm_userfaultfd_ctx.ctx; in dup_userfaultfd()
646 vm_write_begin(vma); in dup_userfaultfd()
647 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in dup_userfaultfd()
648 WRITE_ONCE(vma->vm_flags, in dup_userfaultfd()
649 vma->vm_flags & ~__VM_UFFD_FLAGS); in dup_userfaultfd()
650 vm_write_end(vma); in dup_userfaultfd()
676 ctx->mm = vma->vm_mm; in dup_userfaultfd()
686 vma->vm_userfaultfd_ctx.ctx = ctx; in dup_userfaultfd()
714 void mremap_userfaultfd_prep(struct vm_area_struct *vma, in mremap_userfaultfd_prep() argument
719 ctx = vma->vm_userfaultfd_ctx.ctx; in mremap_userfaultfd_prep()
730 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in mremap_userfaultfd_prep()
731 vma->vm_flags &= ~__VM_UFFD_FLAGS; in mremap_userfaultfd_prep()
760 bool userfaultfd_remove(struct vm_area_struct *vma, in userfaultfd_remove() argument
763 struct mm_struct *mm = vma->vm_mm; in userfaultfd_remove()
767 ctx = vma->vm_userfaultfd_ctx.ctx; in userfaultfd_remove()
799 int userfaultfd_unmap_prep(struct vm_area_struct *vma, in userfaultfd_unmap_prep() argument
803 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { in userfaultfd_unmap_prep()
805 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; in userfaultfd_unmap_prep()
849 struct vm_area_struct *vma, *prev; in userfaultfd_release() local
869 for (vma = mm->mmap; vma; vma = vma->vm_next) { in userfaultfd_release()
871 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ in userfaultfd_release()
872 !!(vma->vm_flags & __VM_UFFD_FLAGS)); in userfaultfd_release()
873 if (vma->vm_userfaultfd_ctx.ctx != ctx) { in userfaultfd_release()
874 prev = vma; in userfaultfd_release()
877 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; in userfaultfd_release()
878 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, in userfaultfd_release()
879 new_flags, vma->anon_vma, in userfaultfd_release()
880 vma->vm_file, vma->vm_pgoff, in userfaultfd_release()
881 vma_policy(vma), in userfaultfd_release()
883 vma_get_anon_name(vma)); in userfaultfd_release()
885 vma = prev; in userfaultfd_release()
887 prev = vma; in userfaultfd_release()
888 vm_write_begin(vma); in userfaultfd_release()
889 WRITE_ONCE(vma->vm_flags, new_flags); in userfaultfd_release()
890 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in userfaultfd_release()
891 vm_write_end(vma); in userfaultfd_release()
1259 static inline bool vma_can_userfault(struct vm_area_struct *vma, in vma_can_userfault() argument
1264 if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) in vma_can_userfault()
1269 if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma))) in vma_can_userfault()
1273 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault()
1274 vma_is_shmem(vma); in vma_can_userfault()
1281 struct vm_area_struct *vma, *prev, *cur; in userfaultfd_register() local
1327 vma = find_vma_prev(mm, start, &prev); in userfaultfd_register()
1328 if (!vma) in userfaultfd_register()
1333 if (vma->vm_start >= end) in userfaultfd_register()
1340 if (is_vm_hugetlb_page(vma)) { in userfaultfd_register()
1341 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); in userfaultfd_register()
1352 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { in userfaultfd_register()
1412 if (vma->vm_start < start) in userfaultfd_register()
1413 prev = vma; in userfaultfd_register()
1419 BUG_ON(!vma_can_userfault(vma, vm_flags)); in userfaultfd_register()
1420 BUG_ON(vma->vm_userfaultfd_ctx.ctx && in userfaultfd_register()
1421 vma->vm_userfaultfd_ctx.ctx != ctx); in userfaultfd_register()
1422 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); in userfaultfd_register()
1428 if (vma->vm_userfaultfd_ctx.ctx == ctx && in userfaultfd_register()
1429 (vma->vm_flags & vm_flags) == vm_flags) in userfaultfd_register()
1432 if (vma->vm_start > start) in userfaultfd_register()
1433 start = vma->vm_start; in userfaultfd_register()
1434 vma_end = min(end, vma->vm_end); in userfaultfd_register()
1436 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; in userfaultfd_register()
1438 vma->anon_vma, vma->vm_file, vma->vm_pgoff, in userfaultfd_register()
1439 vma_policy(vma), in userfaultfd_register()
1441 vma_get_anon_name(vma)); in userfaultfd_register()
1443 vma = prev; in userfaultfd_register()
1446 if (vma->vm_start < start) { in userfaultfd_register()
1447 ret = split_vma(mm, vma, start, 1); in userfaultfd_register()
1451 if (vma->vm_end > end) { in userfaultfd_register()
1452 ret = split_vma(mm, vma, end, 0); in userfaultfd_register()
1462 vm_write_begin(vma); in userfaultfd_register()
1463 WRITE_ONCE(vma->vm_flags, new_flags); in userfaultfd_register()
1464 vma->vm_userfaultfd_ctx.ctx = ctx; in userfaultfd_register()
1465 vm_write_end(vma); in userfaultfd_register()
1467 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) in userfaultfd_register()
1468 hugetlb_unshare_all_pmds(vma); in userfaultfd_register()
1471 prev = vma; in userfaultfd_register()
1472 start = vma->vm_end; in userfaultfd_register()
1473 vma = vma->vm_next; in userfaultfd_register()
1474 } while (vma && vma->vm_start < end); in userfaultfd_register()
1511 struct vm_area_struct *vma, *prev, *cur; in userfaultfd_unregister() local
1536 vma = find_vma_prev(mm, start, &prev); in userfaultfd_unregister()
1537 if (!vma) in userfaultfd_unregister()
1542 if (vma->vm_start >= end) in userfaultfd_unregister()
1549 if (is_vm_hugetlb_page(vma)) { in userfaultfd_unregister()
1550 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); in userfaultfd_unregister()
1561 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { in userfaultfd_unregister()
1581 if (vma->vm_start < start) in userfaultfd_unregister()
1582 prev = vma; in userfaultfd_unregister()
1588 BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); in userfaultfd_unregister()
1594 if (!vma->vm_userfaultfd_ctx.ctx) in userfaultfd_unregister()
1597 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); in userfaultfd_unregister()
1599 if (vma->vm_start > start) in userfaultfd_unregister()
1600 start = vma->vm_start; in userfaultfd_unregister()
1601 vma_end = min(end, vma->vm_end); in userfaultfd_unregister()
1603 if (userfaultfd_missing(vma)) { in userfaultfd_unregister()
1613 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); in userfaultfd_unregister()
1616 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; in userfaultfd_unregister()
1618 vma->anon_vma, vma->vm_file, vma->vm_pgoff, in userfaultfd_unregister()
1619 vma_policy(vma), in userfaultfd_unregister()
1621 vma_get_anon_name(vma)); in userfaultfd_unregister()
1623 vma = prev; in userfaultfd_unregister()
1626 if (vma->vm_start < start) { in userfaultfd_unregister()
1627 ret = split_vma(mm, vma, start, 1); in userfaultfd_unregister()
1631 if (vma->vm_end > end) { in userfaultfd_unregister()
1632 ret = split_vma(mm, vma, end, 0); in userfaultfd_unregister()
1642 vm_write_begin(vma); in userfaultfd_unregister()
1643 WRITE_ONCE(vma->vm_flags, new_flags); in userfaultfd_unregister()
1644 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; in userfaultfd_unregister()
1645 vm_write_end(vma); in userfaultfd_unregister()
1648 prev = vma; in userfaultfd_unregister()
1649 start = vma->vm_end; in userfaultfd_unregister()
1650 vma = vma->vm_next; in userfaultfd_unregister()
1651 } while (vma && vma->vm_start < end); in userfaultfd_unregister()