Lines Matching refs:vma
381 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument
392 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
405 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
451 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument
454 vm_write_begin(vma); in munlock_vma_pages_range()
455 WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); in munlock_vma_pages_range()
456 vm_write_end(vma); in munlock_vma_pages_range()
473 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range()
512 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
534 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
537 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
542 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
544 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup()
545 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup()
546 vma_is_dax(vma)) in mlock_fixup()
550 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
551 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup()
552 vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
553 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); in mlock_fixup()
555 vma = *prev; in mlock_fixup()
559 if (start != vma->vm_start) { in mlock_fixup()
560 ret = split_vma(mm, vma, start, 1); in mlock_fixup()
565 if (end != vma->vm_end) { in mlock_fixup()
566 ret = split_vma(mm, vma, end, 0); in mlock_fixup()
588 vm_write_begin(vma); in mlock_fixup()
589 WRITE_ONCE(vma->vm_flags, newflags); in mlock_fixup()
590 vm_write_end(vma); in mlock_fixup()
592 munlock_vma_pages_range(vma, start, end); in mlock_fixup()
595 *prev = vma; in mlock_fixup()
603 struct vm_area_struct * vma, * prev; in apply_vma_lock_flags() local
613 vma = find_vma(current->mm, start); in apply_vma_lock_flags()
614 if (!vma || vma->vm_start > start) in apply_vma_lock_flags()
617 prev = vma->vm_prev; in apply_vma_lock_flags()
618 if (start > vma->vm_start) in apply_vma_lock_flags()
619 prev = vma; in apply_vma_lock_flags()
622 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_vma_lock_flags()
627 tmp = vma->vm_end; in apply_vma_lock_flags()
630 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags()
639 vma = prev->vm_next; in apply_vma_lock_flags()
640 if (!vma || vma->vm_start != nstart) { in apply_vma_lock_flags()
658 struct vm_area_struct *vma; in count_mm_mlocked_page_nr() local
664 vma = find_vma(mm, start); in count_mm_mlocked_page_nr()
665 if (vma == NULL) in count_mm_mlocked_page_nr()
666 vma = mm->mmap; in count_mm_mlocked_page_nr()
668 for (; vma ; vma = vma->vm_next) { in count_mm_mlocked_page_nr()
669 if (start >= vma->vm_end) in count_mm_mlocked_page_nr()
671 if (start + len <= vma->vm_start) in count_mm_mlocked_page_nr()
673 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
674 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
675 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
676 if (start + len < vma->vm_end) { in count_mm_mlocked_page_nr()
677 count += start + len - vma->vm_start; in count_mm_mlocked_page_nr()
680 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
781 struct vm_area_struct * vma, * prev = NULL; in apply_mlockall_flags() local
801 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { in apply_mlockall_flags()
804 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in apply_mlockall_flags()
808 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); in apply_mlockall_flags()