Lines Matching refs:vma
70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument
84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
90 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
103 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
105 if (vma->vm_file) in take_rmap_locks()
106 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
107 if (vma->anon_vma) in take_rmap_locks()
108 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
111 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
113 if (vma->anon_vma) in drop_rmap_locks()
114 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
115 if (vma->vm_file) in drop_rmap_locks()
116 i_mmap_unlock_write(vma->vm_file->f_mapping); in drop_rmap_locks()
134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
139 struct mm_struct *mm = vma->vm_mm; in move_ptes()
164 take_rmap_locks(vma); in move_ptes()
175 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
204 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
210 drop_rmap_locks(vma); in move_ptes()
214 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) in trylock_vma_ref_count() argument
220 return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1; in trylock_vma_ref_count()
226 static inline void unlock_vma_ref_count(struct vm_area_struct *vma) in unlock_vma_ref_count() argument
232 VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1, in unlock_vma_ref_count()
233 vma); in unlock_vma_ref_count()
236 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) in trylock_vma_ref_count() argument
240 static inline void unlock_vma_ref_count(struct vm_area_struct *vma) in unlock_vma_ref_count() argument
246 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pmd() argument
250 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
284 if (!trylock_vma_ref_count(vma)) in move_normal_pmd()
291 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd()
304 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_normal_pmd()
309 unlock_vma_ref_count(vma); in move_normal_pmd()
313 static inline bool move_normal_pmd(struct vm_area_struct *vma, in move_normal_pmd() argument
322 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pud() argument
326 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
341 if (!trylock_vma_ref_count(vma)) in move_normal_pud()
348 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud()
361 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); in move_normal_pud()
366 unlock_vma_ref_count(vma); in move_normal_pud()
370 static inline bool move_normal_pud(struct vm_area_struct *vma, in move_normal_pud() argument
425 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, in move_pgt_entry() argument
433 take_rmap_locks(vma); in move_pgt_entry()
437 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
441 moved = move_normal_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
446 move_huge_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
455 drop_rmap_locks(vma); in move_pgt_entry()
460 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
473 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
475 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in move_page_tables()
489 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables()
492 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
495 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, in move_page_tables()
501 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
504 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
510 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, in move_page_tables()
513 split_huge_pmd(vma, old_pmd, old_addr); in move_page_tables()
522 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, in move_page_tables()
529 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, in move_page_tables()
538 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
544 struct mm_struct *mm = vma->vm_mm; in move_vma()
546 unsigned long vm_flags = vma->vm_flags; in move_vma()
569 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
574 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
575 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
585 if (vma != new_vma) in move_vma()
586 vm_write_begin(vma); in move_vma()
588 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
592 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
593 err = vma->vm_ops->mremap(new_vma); in move_vma()
602 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
604 if (vma != new_vma) in move_vma()
605 vm_write_end(vma); in move_vma()
606 vma = new_vma; in move_vma()
614 if (vma != new_vma) in move_vma()
615 vm_write_end(vma); in move_vma()
621 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
622 excess = vma->vm_end - vma->vm_start - old_len; in move_vma()
623 if (old_addr > vma->vm_start && in move_vma()
624 old_addr + old_len < vma->vm_end) in move_vma()
638 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma()
641 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma()
642 untrack_pfn_moved(vma); in move_vma()
647 vma->vm_flags |= VM_ACCOUNT; in move_vma()
660 if (split && new_vma == vma) in move_vma()
664 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in move_vma()
685 vma->vm_flags |= VM_ACCOUNT; in move_vma()
687 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
698 struct vm_area_struct *vma = find_vma(mm, addr); in vma_to_resize() local
701 if (!vma || vma->vm_start > addr) in vma_to_resize()
712 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in vma_to_resize()
718 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) in vma_to_resize()
721 if (is_vm_hugetlb_page(vma)) in vma_to_resize()
725 if (old_len > vma->vm_end - addr) in vma_to_resize()
729 return vma; in vma_to_resize()
732 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
733 pgoff += vma->vm_pgoff; in vma_to_resize()
737 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
740 if (vma->vm_flags & VM_LOCKED) { in vma_to_resize()
749 if (!may_expand_vm(mm, vma->vm_flags, in vma_to_resize()
753 if (vma->vm_flags & VM_ACCOUNT) { in vma_to_resize()
760 return vma; in vma_to_resize()
770 struct vm_area_struct *vma; in mremap_to() local
815 vma = vma_to_resize(addr, old_len, new_len, flags, &charged); in mremap_to()
816 if (IS_ERR(vma)) { in mremap_to()
817 ret = PTR_ERR(vma); in mremap_to()
823 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { in mremap_to()
831 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
834 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
835 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
844 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to()
857 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
859 unsigned long end = vma->vm_end + delta; in vma_expandable()
860 if (end < vma->vm_end) /* overflow */ in vma_expandable()
862 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ in vma_expandable()
864 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
882 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
966 vma = vma_to_resize(addr, old_len, new_len, flags, &charged); in SYSCALL_DEFINE5()
967 if (IS_ERR(vma)) { in SYSCALL_DEFINE5()
968 ret = PTR_ERR(vma); in SYSCALL_DEFINE5()
974 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
976 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
979 if (vma_adjust(vma, vma->vm_start, addr + new_len, in SYSCALL_DEFINE5()
980 vma->vm_pgoff, NULL)) { in SYSCALL_DEFINE5()
985 vm_stat_account(mm, vma->vm_flags, pages); in SYSCALL_DEFINE5()
986 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
1003 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
1006 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
1007 vma->vm_pgoff + in SYSCALL_DEFINE5()
1008 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
1015 ret = move_vma(vma, addr, old_len, new_len, new_addr, in SYSCALL_DEFINE5()