Lines Matching refs:vma

81 		struct vm_area_struct *vma, struct vm_area_struct *prev,
126 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
128 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
131 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
132 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
137 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
143 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
146 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
148 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
152 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
160 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
162 struct file *file = vma->vm_file; in unlink_file_vma()
167 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
172 static void __free_vma(struct vm_area_struct *vma) in __free_vma() argument
174 if (vma->vm_file) in __free_vma()
175 fput(vma->vm_file); in __free_vma()
176 mpol_put(vma_policy(vma)); in __free_vma()
177 vm_area_free(vma); in __free_vma()
181 void put_vma(struct vm_area_struct *vma) in put_vma() argument
183 if (atomic_dec_and_test(&vma->vm_ref_count)) in put_vma()
184 __free_vma(vma); in put_vma()
187 static inline void put_vma(struct vm_area_struct *vma) in put_vma() argument
189 __free_vma(vma); in put_vma()
196 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) in remove_vma() argument
198 struct vm_area_struct *next = vma->vm_next; in remove_vma()
201 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
202 vma->vm_ops->close(vma); in remove_vma()
203 put_vma(vma); in remove_vma()
308 static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) in vma_compute_gap() argument
318 gap = vm_start_gap(vma); in vma_compute_gap()
319 if (vma->vm_prev) { in vma_compute_gap()
320 prev_end = vm_end_gap(vma->vm_prev); in vma_compute_gap()
330 static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) in vma_compute_subtree_gap() argument
332 unsigned long max = vma_compute_gap(vma), subtree_gap; in vma_compute_subtree_gap()
333 if (vma->vm_rb.rb_left) { in vma_compute_subtree_gap()
334 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
339 if (vma->vm_rb.rb_right) { in vma_compute_subtree_gap()
340 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
356 struct vm_area_struct *vma; in browse_rb() local
357 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in browse_rb()
358 if (vma->vm_start < prev) { in browse_rb()
360 vma->vm_start, prev); in browse_rb()
363 if (vma->vm_start < pend) { in browse_rb()
365 vma->vm_start, pend); in browse_rb()
368 if (vma->vm_start > vma->vm_end) { in browse_rb()
370 vma->vm_start, vma->vm_end); in browse_rb()
374 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { in browse_rb()
376 vma->rb_subtree_gap, in browse_rb()
377 vma_compute_subtree_gap(vma)); in browse_rb()
383 prev = vma->vm_start; in browse_rb()
384 pend = vma->vm_end; in browse_rb()
401 struct vm_area_struct *vma; in validate_mm_rb() local
402 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in validate_mm_rb()
403 VM_BUG_ON_VMA(vma != ignore && in validate_mm_rb()
404 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), in validate_mm_rb()
405 vma); in validate_mm_rb()
414 struct vm_area_struct *vma = mm->mmap; in validate_mm() local
416 while (vma) { in validate_mm()
417 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
422 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
427 highest_address = vm_end_gap(vma); in validate_mm()
428 vma = vma->vm_next; in validate_mm()
469 static void vma_gap_update(struct vm_area_struct *vma)
475 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
478 static inline void vma_rb_insert(struct vm_area_struct *vma, in vma_rb_insert() argument
486 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert()
489 static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) in __vma_rb_erase() argument
498 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in __vma_rb_erase()
505 RB_CLEAR_NODE(&vma->vm_rb); in __vma_rb_erase()
508 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, in vma_rb_erase_ignore() argument
523 __vma_rb_erase(vma, mm); in vma_rb_erase_ignore()
526 static __always_inline void vma_rb_erase(struct vm_area_struct *vma, in vma_rb_erase() argument
529 vma_rb_erase_ignore(vma, mm, vma); in vma_rb_erase()
547 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
551 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
556 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
560 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
608 struct vm_area_struct *vma) in vma_next()
610 if (!vma) in vma_next()
613 return vma->vm_next; in vma_next()
646 struct vm_area_struct *vma; in count_vma_pages_range() local
649 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
650 if (!vma) in count_vma_pages_range()
653 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range()
654 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
657 for (vma = vma->vm_next; vma; vma = vma->vm_next) { in count_vma_pages_range()
660 if (vma->vm_start > end) in count_vma_pages_range()
663 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range()
670 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
674 if (vma->vm_next) in __vma_link_rb()
675 vma_gap_update(vma->vm_next); in __vma_link_rb()
677 mm->highest_vm_end = vm_end_gap(vma); in __vma_link_rb()
689 rb_link_node(&vma->vm_rb, rb_parent, rb_link); in __vma_link_rb()
690 vma->rb_subtree_gap = 0; in __vma_link_rb()
691 vma_gap_update(vma); in __vma_link_rb()
692 vma_rb_insert(vma, mm); in __vma_link_rb()
696 static void __vma_link_file(struct vm_area_struct *vma) in __vma_link_file() argument
700 file = vma->vm_file; in __vma_link_file()
704 if (vma->vm_flags & VM_DENYWRITE) in __vma_link_file()
706 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
710 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
716 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
720 __vma_link_list(mm, vma, prev); in __vma_link()
721 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
724 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
730 if (vma->vm_file) { in vma_link()
731 mapping = vma->vm_file->f_mapping; in vma_link()
735 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
736 __vma_link_file(vma); in vma_link()
749 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
754 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
757 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
762 struct vm_area_struct *vma, in __vma_unlink() argument
765 vma_rb_erase_ignore(vma, mm, ignore); in __vma_unlink()
766 __vma_unlink_list(mm, vma); in __vma_unlink()
778 int __vma_adjust(struct vm_area_struct *vma, unsigned long start, in __vma_adjust() argument
782 struct mm_struct *mm = vma->vm_mm; in __vma_adjust()
783 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; in __vma_adjust()
787 struct file *file = vma->vm_file; in __vma_adjust()
792 vm_write_begin(vma); in __vma_adjust()
819 swap(vma, next); in __vma_adjust()
821 VM_WARN_ON(expand != vma); in __vma_adjust()
834 importer = vma; in __vma_adjust()
850 importer = vma; in __vma_adjust()
852 } else if (end < vma->vm_end) { in __vma_adjust()
858 adjust_next = -(vma->vm_end - end); in __vma_adjust()
859 exporter = vma; in __vma_adjust()
875 if (next && next != vma) in __vma_adjust()
877 vm_write_end(vma); in __vma_adjust()
888 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in __vma_adjust()
905 anon_vma = vma->anon_vma; in __vma_adjust()
912 anon_vma_interval_tree_pre_update_vma(vma); in __vma_adjust()
919 vma_interval_tree_remove(vma, root); in __vma_adjust()
924 if (start != vma->vm_start) { in __vma_adjust()
925 WRITE_ONCE(vma->vm_start, start); in __vma_adjust()
928 if (end != vma->vm_end) { in __vma_adjust()
929 WRITE_ONCE(vma->vm_end, end); in __vma_adjust()
932 WRITE_ONCE(vma->vm_pgoff, pgoff); in __vma_adjust()
943 vma_interval_tree_insert(vma, root); in __vma_adjust()
964 __vma_unlink(mm, next, vma); in __vma_adjust()
976 vma_gap_update(vma); in __vma_adjust()
979 mm->highest_vm_end = vm_end_gap(vma); in __vma_adjust()
986 anon_vma_interval_tree_post_update_vma(vma); in __vma_adjust()
994 uprobe_mmap(vma); in __vma_adjust()
1004 anon_vma_merge(vma, next); in __vma_adjust()
1020 next = vma->vm_next; in __vma_adjust()
1034 next = vma; in __vma_adjust()
1063 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); in __vma_adjust()
1069 if (next && next != vma) in __vma_adjust()
1072 vm_write_end(vma); in __vma_adjust()
1083 static inline int is_mergeable_vma(struct vm_area_struct *vma, in is_mergeable_vma() argument
1096 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
1098 if (vma->vm_file != file) in is_mergeable_vma()
1100 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
1102 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
1104 if (vma_get_anon_name(vma) != anon_name) in is_mergeable_vma()
1111 struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
1117 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
1118 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
1135 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
1141 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_before()
1142 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
1143 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
1157 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
1163 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_after()
1164 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
1166 vm_pglen = vma_pages(vma); in can_vma_merge_after()
1167 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
1376 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1381 if (vma->vm_next) { in find_mergeable_anon_vma()
1382 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); in find_mergeable_anon_vma()
1388 if (vma->vm_prev) in find_mergeable_anon_vma()
1389 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); in find_mergeable_anon_vma()
1522 struct vm_area_struct *vma = find_vma(mm, addr); in do_mmap() local
1524 if (vma && vma->vm_start < addr + len) in do_mmap()
1744 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
1746 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify()
1747 const struct vm_operations_struct *vm_ops = vma->vm_ops; in vma_wants_writenotify()
1768 !is_vm_hugetlb_page(vma)) in vma_wants_writenotify()
1776 return vma->vm_file && vma->vm_file->f_mapping && in vma_wants_writenotify()
1777 mapping_can_writeback(vma->vm_file->f_mapping); in vma_wants_writenotify()
1801 struct vm_area_struct *vma, *prev, *merge; in mmap_region() local
1837 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1839 if (vma) in mmap_region()
1847 vma = vm_area_alloc(mm); in mmap_region()
1848 if (!vma) { in mmap_region()
1853 vma->vm_start = addr; in mmap_region()
1854 vma->vm_end = addr + len; in mmap_region()
1855 vma->vm_flags = vm_flags; in mmap_region()
1856 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
1857 vma->vm_pgoff = pgoff; in mmap_region()
1876 vma->vm_file = get_file(file); in mmap_region()
1877 error = call_mmap(file, vma); in mmap_region()
1888 WARN_ON_ONCE(addr != vma->vm_start); in mmap_region()
1890 addr = vma->vm_start; in mmap_region()
1895 if (unlikely(vm_flags != vma->vm_flags && prev)) { in mmap_region()
1896 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, in mmap_region()
1897 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, in mmap_region()
1898 vma_get_anon_name(vma)); in mmap_region()
1904 fput(vma->vm_file); in mmap_region()
1905 vm_area_free(vma); in mmap_region()
1906 vma = merge; in mmap_region()
1908 vm_flags = vma->vm_flags; in mmap_region()
1913 vm_flags = vma->vm_flags; in mmap_region()
1915 error = shmem_zero_setup(vma); in mmap_region()
1919 vma_set_anonymous(vma); in mmap_region()
1923 if (!arch_validate_flags(vma->vm_flags)) { in mmap_region()
1931 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1940 file = vma->vm_file; in mmap_region()
1942 perf_event_mmap(vma); in mmap_region()
1944 vm_write_begin(vma); in mmap_region()
1947 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in mmap_region()
1948 is_vm_hugetlb_page(vma) || in mmap_region()
1949 vma == get_gate_vma(current->mm)) in mmap_region()
1950 WRITE_ONCE(vma->vm_flags, in mmap_region()
1951 vma->vm_flags & VM_LOCKED_CLEAR_MASK); in mmap_region()
1957 uprobe_mmap(vma); in mmap_region()
1966 WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY); in mmap_region()
1968 vma_set_page_prot(vma); in mmap_region()
1969 vm_write_end(vma); in mmap_region()
1971 trace_android_vh_mmap_region(vma, addr); in mmap_region()
1976 if (vma->vm_ops && vma->vm_ops->close) in mmap_region()
1977 vma->vm_ops->close(vma); in mmap_region()
1979 vma->vm_file = NULL; in mmap_region()
1983 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1990 vm_area_free(vma); in mmap_region()
2008 struct vm_area_struct *vma; in unmapped_area() local
2028 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
2029 if (vma->rb_subtree_gap < length) in unmapped_area()
2034 gap_end = vm_start_gap(vma); in unmapped_area()
2035 if (gap_end >= low_limit && vma->vm_rb.rb_left) { in unmapped_area()
2037 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
2040 vma = left; in unmapped_area()
2045 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area()
2055 if (vma->vm_rb.rb_right) { in unmapped_area()
2057 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
2060 vma = right; in unmapped_area()
2067 struct rb_node *prev = &vma->vm_rb; in unmapped_area()
2070 vma = rb_entry(rb_parent(prev), in unmapped_area()
2072 if (prev == vma->vm_rb.rb_left) { in unmapped_area()
2073 gap_start = vm_end_gap(vma->vm_prev); in unmapped_area()
2074 gap_end = vm_start_gap(vma); in unmapped_area()
2103 struct vm_area_struct *vma; in unmapped_area_topdown() local
2137 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
2138 if (vma->rb_subtree_gap < length) in unmapped_area_topdown()
2143 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2144 if (gap_start <= high_limit && vma->vm_rb.rb_right) { in unmapped_area_topdown()
2146 rb_entry(vma->vm_rb.rb_right, in unmapped_area_topdown()
2149 vma = right; in unmapped_area_topdown()
2156 gap_end = vm_start_gap(vma); in unmapped_area_topdown()
2164 if (vma->vm_rb.rb_left) { in unmapped_area_topdown()
2166 rb_entry(vma->vm_rb.rb_left, in unmapped_area_topdown()
2169 vma = left; in unmapped_area_topdown()
2176 struct rb_node *prev = &vma->vm_rb; in unmapped_area_topdown()
2179 vma = rb_entry(rb_parent(prev), in unmapped_area_topdown()
2181 if (prev == vma->vm_rb.rb_right) { in unmapped_area_topdown()
2182 gap_start = vma->vm_prev ? in unmapped_area_topdown()
2183 vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2244 struct vm_area_struct *vma, *prev; in arch_get_unmapped_area() local
2256 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area()
2258 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area()
2283 struct vm_area_struct *vma, *prev; in arch_get_unmapped_area_topdown() local
2298 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area_topdown()
2300 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area_topdown()
2383 struct vm_area_struct *vma = NULL; in __find_vma() local
2393 vma = tmp; in __find_vma()
2401 return vma; in __find_vma()
2406 struct vm_area_struct *vma; in find_vma() local
2409 vma = vmacache_find(mm, addr); in find_vma()
2410 if (likely(vma)) in find_vma()
2411 return vma; in find_vma()
2413 vma = __find_vma(mm, addr); in find_vma()
2414 if (vma) in find_vma()
2415 vmacache_update(addr, vma); in find_vma()
2416 return vma; in find_vma()
2423 struct vm_area_struct *vma = NULL; in get_vma() local
2426 vma = __find_vma(mm, addr); in get_vma()
2441 if (vma && !atomic_inc_unless_negative(&vma->vm_ref_count)) in get_vma()
2442 vma = NULL; in get_vma()
2445 return vma; in get_vma()
2456 struct vm_area_struct *vma; in find_vma_prev() local
2458 vma = find_vma(mm, addr); in find_vma_prev()
2459 if (vma) { in find_vma_prev()
2460 *pprev = vma->vm_prev; in find_vma_prev()
2466 return vma; in find_vma_prev()
2474 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
2477 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2481 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2489 if (vma->vm_flags & VM_LOCKED) { in acct_stack_growth()
2500 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2501 vma->vm_end - size; in acct_stack_growth()
2502 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2520 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2522 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2527 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2543 next = vma->vm_next; in expand_upwards()
2551 if (unlikely(anon_vma_prepare(vma))) in expand_upwards()
2559 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2562 if (address > vma->vm_end) { in expand_upwards()
2565 size = address - vma->vm_start; in expand_upwards()
2566 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2569 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2570 error = acct_stack_growth(vma, size, grow); in expand_upwards()
2584 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
2586 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2587 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
2588 vma->vm_end = address; in expand_upwards()
2589 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
2590 if (vma->vm_next) in expand_upwards()
2591 vma_gap_update(vma->vm_next); in expand_upwards()
2593 mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2596 perf_event_mmap(vma); in expand_upwards()
2600 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2601 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_upwards()
2610 int expand_downwards(struct vm_area_struct *vma, in expand_downwards() argument
2613 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2622 prev = vma->vm_prev; in expand_downwards()
2631 if (unlikely(anon_vma_prepare(vma))) in expand_downwards()
2639 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2642 if (address < vma->vm_start) { in expand_downwards()
2645 size = vma->vm_end - address; in expand_downwards()
2646 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2649 if (grow <= vma->vm_pgoff) { in expand_downwards()
2650 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2664 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2666 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2667 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2668 WRITE_ONCE(vma->vm_start, address); in expand_downwards()
2669 WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow); in expand_downwards()
2670 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2671 vma_gap_update(vma); in expand_downwards()
2674 perf_event_mmap(vma); in expand_downwards()
2678 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2679 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_downwards()
2701 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2703 return expand_upwards(vma, address); in expand_stack()
2709 struct vm_area_struct *vma, *prev; in find_extend_vma() local
2712 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2713 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2714 return vma; in find_extend_vma()
2723 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2725 return expand_downwards(vma, address); in expand_stack()
2731 struct vm_area_struct *vma; in find_extend_vma() local
2735 vma = find_vma(mm, addr); in find_extend_vma()
2736 if (!vma) in find_extend_vma()
2738 if (vma->vm_start <= addr) in find_extend_vma()
2739 return vma; in find_extend_vma()
2740 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2742 start = vma->vm_start; in find_extend_vma()
2743 if (expand_stack(vma, addr)) in find_extend_vma()
2745 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2746 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2747 return vma; in find_extend_vma()
2759 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2766 long nrpages = vma_pages(vma); in remove_vma_list()
2768 if (vma->vm_flags & VM_ACCOUNT) in remove_vma_list()
2770 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_vma_list()
2771 vma = remove_vma(vma); in remove_vma_list()
2772 } while (vma); in remove_vma_list()
2783 struct vm_area_struct *vma, struct vm_area_struct *prev, in unmap_region() argument
2793 unmap_vmas(&tlb, vma, start, end); in unmap_region()
2803 for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) { in unmap_region()
2810 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2820 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2827 vma->vm_prev = NULL; in detach_vmas_to_be_unmapped()
2829 vma_rb_erase(vma, mm); in detach_vmas_to_be_unmapped()
2831 tail_vma = vma; in detach_vmas_to_be_unmapped()
2832 vma = vma->vm_next; in detach_vmas_to_be_unmapped()
2833 } while (vma && vma->vm_start < end); in detach_vmas_to_be_unmapped()
2834 *insertion_point = vma; in detach_vmas_to_be_unmapped()
2835 if (vma) { in detach_vmas_to_be_unmapped()
2836 vma->vm_prev = prev; in detach_vmas_to_be_unmapped()
2837 vma_gap_update(vma); in detach_vmas_to_be_unmapped()
2850 if (vma && (vma->vm_flags & VM_GROWSDOWN)) in detach_vmas_to_be_unmapped()
2861 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2867 if (vma->vm_ops && vma->vm_ops->split) { in __split_vma()
2868 err = vma->vm_ops->split(vma, addr); in __split_vma()
2873 new = vm_area_dup(vma); in __split_vma()
2881 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2884 err = vma_dup_policy(vma, new); in __split_vma()
2888 err = anon_vma_clone(new, vma); in __split_vma()
2899 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2902 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2925 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2931 return __split_vma(mm, vma, addr, new_below); in split_vma()
2943 struct vm_area_struct *vma, *prev, *last; in __do_munmap() local
2961 vma = find_vma(mm, start); in __do_munmap()
2962 if (!vma) in __do_munmap()
2964 prev = vma->vm_prev; in __do_munmap()
2968 if (vma->vm_start >= end) in __do_munmap()
2978 if (start > vma->vm_start) { in __do_munmap()
2986 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in __do_munmap()
2989 error = __split_vma(mm, vma, start, 0); in __do_munmap()
2992 prev = vma; in __do_munmap()
3002 vma = vma_next(mm, prev); in __do_munmap()
3014 int error = userfaultfd_unmap_prep(vma, start, end, uf); in __do_munmap()
3023 struct vm_area_struct *tmp = vma; in __do_munmap()
3035 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) in __do_munmap()
3041 unmap_region(mm, vma, prev, start, end); in __do_munmap()
3044 remove_vma_list(mm, vma); in __do_munmap()
3102 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
3125 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
3127 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
3130 if (start < vma->vm_start) in SYSCALL_DEFINE5()
3133 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
3136 for (next = vma->vm_next; next; next = next->vm_next) { in SYSCALL_DEFINE5()
3141 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
3144 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
3155 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
3156 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
3157 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
3161 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
3166 for (tmp = vma; tmp->vm_start >= start + size; in SYSCALL_DEFINE5()
3180 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
3181 ret = do_mmap(vma->vm_file, start, size, in SYSCALL_DEFINE5()
3201 struct vm_area_struct *vma, *prev; in do_brk_flags() local
3235 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk_flags()
3237 if (vma) in do_brk_flags()
3243 vma = vm_area_alloc(mm); in do_brk_flags()
3244 if (!vma) { in do_brk_flags()
3249 vma_set_anonymous(vma); in do_brk_flags()
3250 vma->vm_start = addr; in do_brk_flags()
3251 vma->vm_end = addr + len; in do_brk_flags()
3252 vma->vm_pgoff = pgoff; in do_brk_flags()
3253 vma->vm_flags = flags; in do_brk_flags()
3254 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
3255 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk_flags()
3257 perf_event_mmap(vma); in do_brk_flags()
3262 vma->vm_flags |= VM_SOFTDIRTY; in do_brk_flags()
3303 struct vm_area_struct *vma; in exit_mmap() local
3333 vma = mm->mmap; in exit_mmap()
3334 while (vma) { in exit_mmap()
3335 if (vma->vm_flags & VM_LOCKED) in exit_mmap()
3336 munlock_vma_pages_all(vma); in exit_mmap()
3337 vma = vma->vm_next; in exit_mmap()
3343 vma = mm->mmap; in exit_mmap()
3344 if (!vma) { in exit_mmap()
3355 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
3356 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap()
3360 while (vma) { in exit_mmap()
3361 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
3362 nr_accounted += vma_pages(vma); in exit_mmap()
3363 vma = remove_vma(vma); in exit_mmap()
3375 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3380 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
3383 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3384 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
3399 if (vma_is_anonymous(vma)) { in insert_vm_struct()
3400 BUG_ON(vma->anon_vma); in insert_vm_struct()
3401 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3404 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
3416 struct vm_area_struct *vma = *vmap; in copy_vma() local
3417 unsigned long vma_start = vma->vm_start; in copy_vma()
3418 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3427 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
3445 new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3446 vma->anon_vma, vma->vm_file, pgoff, in copy_vma()
3447 vma_policy(vma), vma->vm_userfaultfd_ctx, in copy_vma()
3448 vma_get_anon_name(vma), true); in copy_vma()
3468 *vmap = vma = new_vma; in copy_vma()
3470 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3472 new_vma = vm_area_dup(vma); in copy_vma()
3478 if (vma_dup_policy(vma, new_vma)) in copy_vma()
3480 if (anon_vma_clone(new_vma, vma)) in copy_vma()
3554 static void special_mapping_close(struct vm_area_struct *vma) in special_mapping_close() argument
3558 static const char *special_mapping_name(struct vm_area_struct *vma) in special_mapping_name() argument
3560 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3592 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() local
3596 if (vma->vm_ops == &legacy_special_mapping_vmops) { in special_mapping_fault()
3597 pages = vma->vm_private_data; in special_mapping_fault()
3599 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
3602 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3627 struct vm_area_struct *vma; in __install_special_mapping() local
3629 vma = vm_area_alloc(mm); in __install_special_mapping()
3630 if (unlikely(vma == NULL)) in __install_special_mapping()
3633 vma->vm_start = addr; in __install_special_mapping()
3634 vma->vm_end = addr + len; in __install_special_mapping()
3636 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3637 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3639 vma->vm_ops = ops; in __install_special_mapping()
3640 vma->vm_private_data = priv; in __install_special_mapping()
3642 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3646 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3648 perf_event_mmap(vma); in __install_special_mapping()
3650 return vma; in __install_special_mapping()
3653 vm_area_free(vma); in __install_special_mapping()
3657 bool vma_is_special_mapping(const struct vm_area_struct *vma, in vma_is_special_mapping() argument
3660 return vma->vm_private_data == sm && in vma_is_special_mapping()
3661 (vma->vm_ops == &special_mapping_vmops || in vma_is_special_mapping()
3662 vma->vm_ops == &legacy_special_mapping_vmops); in vma_is_special_mapping()
3687 struct vm_area_struct *vma = __install_special_mapping( in install_special_mapping() local
3691 return PTR_ERR_OR_ZERO(vma); in install_special_mapping()
3776 struct vm_area_struct *vma; in mm_take_all_locks() local
3783 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3786 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3787 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3788 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3791 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3794 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3795 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3796 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3799 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3802 if (vma->anon_vma) in mm_take_all_locks()
3803 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3856 struct vm_area_struct *vma; in mm_drop_all_locks() local
3862 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()
3863 if (vma->anon_vma) in mm_drop_all_locks()
3864 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3866 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3867 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()