Lines Matching refs:vma

38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,  in change_pte_range()  argument
65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
68 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
69 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
72 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
91 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
96 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
116 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
137 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
140 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
176 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
212 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument
246 vma, vma->vm_mm, addr, end); in change_pmd_range()
252 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
254 int nr_ptes = change_huge_pmd(vma, pmd, addr, in change_pmd_range()
269 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range()
284 static inline unsigned long change_pud_range(struct vm_area_struct *vma, in change_pud_range() argument
297 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
304 static inline unsigned long change_p4d_range(struct vm_area_struct *vma, in change_p4d_range() argument
317 pages += change_pud_range(vma, p4d, addr, next, newprot, in change_p4d_range()
324 static unsigned long change_protection_range(struct vm_area_struct *vma, in change_protection_range() argument
328 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
336 flush_cache_range(vma, addr, end); in change_protection_range()
342 pages += change_p4d_range(vma, pgd, addr, next, newprot, in change_protection_range()
348 flush_tlb_range(vma, start, end); in change_protection_range()
354 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
362 if (is_vm_hugetlb_page(vma)) in change_protection()
363 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
365 pages = change_protection_range(vma, start, end, newprot, in change_protection()
399 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
402 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
403 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
411 *pprev = vma; in mprotect_fixup()
421 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
454 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
456 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
457 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); in mprotect_fixup()
459 vma = *pprev; in mprotect_fixup()
460 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup()
464 *pprev = vma; in mprotect_fixup()
466 if (start != vma->vm_start) { in mprotect_fixup()
467 error = split_vma(mm, vma, start, 1); in mprotect_fixup()
472 if (end != vma->vm_end) { in mprotect_fixup()
473 error = split_vma(mm, vma, end, 0); in mprotect_fixup()
483 vm_write_begin(vma); in mprotect_fixup()
484 WRITE_ONCE(vma->vm_flags, newflags); in mprotect_fixup()
485 dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); in mprotect_fixup()
486 vma_set_page_prot(vma); in mprotect_fixup()
488 change_protection(vma, start, end, vma->vm_page_prot, in mprotect_fixup()
490 vm_write_end(vma); in mprotect_fixup()
498 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
503 perf_event_mmap(vma); in mprotect_fixup()
518 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
554 vma = find_vma(current->mm, start); in do_mprotect_pkey()
556 if (!vma) in do_mprotect_pkey()
558 prev = vma->vm_prev; in do_mprotect_pkey()
560 if (vma->vm_start >= end) in do_mprotect_pkey()
562 start = vma->vm_start; in do_mprotect_pkey()
564 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
567 if (vma->vm_start > start) in do_mprotect_pkey()
570 end = vma->vm_end; in do_mprotect_pkey()
572 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
576 if (start > vma->vm_start) in do_mprotect_pkey()
577 prev = vma; in do_mprotect_pkey()
587 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
598 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
600 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
614 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
618 tmp = vma->vm_end; in do_mprotect_pkey()
621 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
631 vma = prev->vm_next; in do_mprotect_pkey()
632 if (!vma || vma->vm_start != nstart) { in do_mprotect_pkey()