Lines Matching refs:mm

80 static void unmap_region(struct mm_struct *mm,
213 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
220 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1()
223 origbrk = mm->brk; in SYSCALL_DEFINE1()
232 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
234 min_brk = mm->end_data; in SYSCALL_DEFINE1()
236 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
247 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
248 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
252 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
254 mm->brk = brk; in SYSCALL_DEFINE1()
262 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
270 mm->brk = brk; in SYSCALL_DEFINE1()
271 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); in SYSCALL_DEFINE1()
273 mm->brk = origbrk; in SYSCALL_DEFINE1()
282 next = find_vma(mm, oldbrk); in SYSCALL_DEFINE1()
289 mm->brk = brk; in SYSCALL_DEFINE1()
292 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; in SYSCALL_DEFINE1()
294 mmap_read_unlock(mm); in SYSCALL_DEFINE1()
296 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
297 userfaultfd_unmap_complete(mm, &uf); in SYSCALL_DEFINE1()
304 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
348 static int browse_rb(struct mm_struct *mm) in browse_rb() argument
350 struct rb_root *root = &mm->mm_rb; in browse_rb()
373 spin_lock(&mm->page_table_lock); in browse_rb()
380 spin_unlock(&mm->page_table_lock); in browse_rb()
409 static void validate_mm(struct mm_struct *mm) in validate_mm() argument
414 struct vm_area_struct *vma = mm->mmap; in validate_mm()
431 if (i != mm->map_count) { in validate_mm()
432 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); in validate_mm()
435 if (highest_address != mm->highest_vm_end) { in validate_mm()
437 mm->highest_vm_end, highest_address); in validate_mm()
440 i = browse_rb(mm); in validate_mm()
441 if (i != mm->map_count) { in validate_mm()
443 pr_emerg("map_count %d rb %d\n", mm->map_count, i); in validate_mm()
446 VM_BUG_ON_MM(bug, mm); in validate_mm()
450 #define validate_mm(mm) do { } while (0) argument
457 #define mm_rb_write_lock(mm) write_lock(&(mm)->mm_rb_lock) in RB_DECLARE_CALLBACKS_MAX() argument
458 #define mm_rb_write_unlock(mm) write_unlock(&(mm)->mm_rb_lock) in RB_DECLARE_CALLBACKS_MAX() argument
460 #define mm_rb_write_lock(mm) do { } while (0) in RB_DECLARE_CALLBACKS_MAX()
461 #define mm_rb_write_unlock(mm) do { } while (0)
479 struct mm_struct *mm) in vma_rb_insert() argument
481 struct rb_root *root = &mm->mm_rb; in vma_rb_insert()
489 static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) in __vma_rb_erase() argument
491 struct rb_root *root = &mm->mm_rb; in __vma_rb_erase()
497 mm_rb_write_lock(mm); in __vma_rb_erase()
499 mm_rb_write_unlock(mm); /* wmb */ in __vma_rb_erase()
509 struct mm_struct *mm, in vma_rb_erase_ignore() argument
521 validate_mm_rb(&mm->mm_rb, ignore); in vma_rb_erase_ignore()
523 __vma_rb_erase(vma, mm); in vma_rb_erase_ignore()
527 struct mm_struct *mm) in vma_rb_erase() argument
529 vma_rb_erase_ignore(vma, mm, vma); in vma_rb_erase()
564 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument
570 __rb_link = &mm->mm_rb.rb_node; in find_vma_links()
607 static inline struct vm_area_struct *vma_next(struct mm_struct *mm, in vma_next() argument
611 return mm->mmap; in vma_next()
631 munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, in munmap_vma_range() argument
636 while (find_vma_links(mm, start, start + len, pprev, link, parent)) in munmap_vma_range()
637 if (do_munmap(mm, start, len, uf)) in munmap_vma_range()
642 static unsigned long count_vma_pages_range(struct mm_struct *mm, in count_vma_pages_range() argument
649 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
670 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
677 mm->highest_vm_end = vm_end_gap(vma); in __vma_link_rb()
688 mm_rb_write_lock(mm); in __vma_link_rb()
692 vma_rb_insert(vma, mm); in __vma_link_rb()
693 mm_rb_write_unlock(mm); in __vma_link_rb()
716 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
720 __vma_link_list(mm, vma, prev); in __vma_link()
721 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
724 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
735 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
741 mm->map_count++; in vma_link()
742 validate_mm(mm); in vma_link()
749 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
754 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
757 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
758 mm->map_count++; in __insert_vm_struct()
761 static __always_inline void __vma_unlink(struct mm_struct *mm, in __vma_unlink() argument
765 vma_rb_erase_ignore(vma, mm, ignore); in __vma_unlink()
766 __vma_unlink_list(mm, vma); in __vma_unlink()
768 vmacache_invalidate(mm); in __vma_unlink()
782 struct mm_struct *mm = vma->vm_mm; in __vma_adjust() local
953 __vma_unlink(mm, next, next); in __vma_adjust()
964 __vma_unlink(mm, next, vma); in __vma_adjust()
973 __insert_vm_struct(mm, insert); in __vma_adjust()
979 mm->highest_vm_end = vm_end_gap(vma); in __vma_adjust()
1005 mm->map_count--; in __vma_adjust()
1063 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); in __vma_adjust()
1074 validate_mm(mm); in __vma_adjust()
1216 struct vm_area_struct *__vma_merge(struct mm_struct *mm, in __vma_merge() argument
1235 next = vma_next(mm, prev); in __vma_merge()
1417 static inline int mlock_future_check(struct mm_struct *mm, in mlock_future_check() argument
1426 locked += mm->locked_vm; in mlock_future_check()
1475 struct mm_struct *mm = current->mm; in do_mmap() local
1511 if (mm->map_count > sysctl_max_map_count) in do_mmap()
1522 struct vm_area_struct *vma = find_vma(mm, addr); in do_mmap()
1529 pkey = execute_only_pkey(mm); in do_mmap()
1539 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap()
1545 if (mlock_future_check(mm, vm_flags, len)) in do_mmap()
1800 struct mm_struct *mm = current->mm; in mmap_region() local
1807 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { in mmap_region()
1814 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1816 if (!may_expand_vm(mm, vm_flags, in mmap_region()
1822 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) in mmap_region()
1829 if (security_vm_enough_memory_mm(mm, charged)) in mmap_region()
1837 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1847 vma = vm_area_alloc(mm); in mmap_region()
1896 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, in mmap_region()
1931 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1945 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); in mmap_region()
1949 vma == get_gate_vma(current->mm)) in mmap_region()
1953 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1983 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
2007 struct mm_struct *mm = current->mm; in unmapped_area() local
2026 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area()
2028 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
2082 gap_start = mm->highest_vm_end; in unmapped_area()
2102 struct mm_struct *mm = current->mm; in unmapped_area_topdown() local
2112 trace_android_vh_get_from_fragment_pool(mm, info, &addr); in unmapped_area_topdown()
2130 gap_start = mm->highest_vm_end; in unmapped_area_topdown()
2135 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area_topdown()
2137 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
2243 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
2256 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area()
2265 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
2284 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
2298 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area_topdown()
2308 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); in arch_get_unmapped_area_topdown()
2311 trace_android_vh_exclude_reserved_zone(mm, &info); in arch_get_unmapped_area_topdown()
2328 trace_android_vh_include_reserved_zone(mm, &info, &addr); in arch_get_unmapped_area_topdown()
2349 get_area = current->mm->get_unmapped_area; in get_unmapped_area()
2379 static struct vm_area_struct *__find_vma(struct mm_struct *mm, in __find_vma() argument
2385 rb_node = mm->mm_rb.rb_node; in __find_vma()
2404 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
2409 vma = vmacache_find(mm, addr); in find_vma()
2413 vma = __find_vma(mm, addr); in find_vma()
2421 struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr) in get_vma() argument
2425 read_lock(&mm->mm_rb_lock); in get_vma()
2426 vma = __find_vma(mm, addr); in get_vma()
2443 read_unlock(&mm->mm_rb_lock); in get_vma()
2453 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
2458 vma = find_vma(mm, addr); in find_vma_prev()
2462 struct rb_node *rb_node = rb_last(&mm->mm_rb); in find_vma_prev()
2477 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() local
2481 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2492 locked = mm->locked_vm + grow; in acct_stack_growth()
2509 if (security_vm_enough_memory_mm(mm, grow)) in acct_stack_growth()
2522 struct mm_struct *mm = vma->vm_mm; in expand_upwards() local
2583 spin_lock(&mm->page_table_lock); in expand_upwards()
2585 mm->locked_vm += grow; in expand_upwards()
2586 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2593 mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2594 spin_unlock(&mm->page_table_lock); in expand_upwards()
2602 validate_mm(mm); in expand_upwards()
2613 struct mm_struct *mm = vma->vm_mm; in expand_downwards() local
2663 spin_lock(&mm->page_table_lock); in expand_downwards()
2665 mm->locked_vm += grow; in expand_downwards()
2666 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2672 spin_unlock(&mm->page_table_lock); in expand_downwards()
2680 validate_mm(mm); in expand_downwards()
2707 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2712 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2729 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2735 vma = find_vma(mm, addr); in find_extend_vma()
2759 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2764 update_hiwater_vm(mm); in remove_vma_list()
2770 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_vma_list()
2774 validate_mm(mm); in remove_vma_list()
2782 static void unmap_region(struct mm_struct *mm, in unmap_region() argument
2786 struct vm_area_struct *next = vma_next(mm, prev); in unmap_region()
2791 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
2792 update_hiwater_rss(mm); in unmap_region()
2820 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2826 insertion_point = (prev ? &prev->vm_next : &mm->mmap); in detach_vmas_to_be_unmapped()
2829 vma_rb_erase(vma, mm); in detach_vmas_to_be_unmapped()
2830 mm->map_count--; in detach_vmas_to_be_unmapped()
2839 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; in detach_vmas_to_be_unmapped()
2843 vmacache_invalidate(mm); in detach_vmas_to_be_unmapped()
2861 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2925 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2928 if (mm->map_count >= sysctl_max_map_count) in split_vma()
2931 return __split_vma(mm, vma, addr, new_below); in split_vma()
2939 int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, in __do_munmap() argument
2958 arch_unmap(mm, start, end); in __do_munmap()
2961 vma = find_vma(mm, start); in __do_munmap()
2986 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in __do_munmap()
2989 error = __split_vma(mm, vma, start, 0); in __do_munmap()
2996 last = find_vma(mm, end); in __do_munmap()
2998 int error = __split_vma(mm, last, end, 1); in __do_munmap()
3002 vma = vma_next(mm, prev); in __do_munmap()
3022 if (mm->locked_vm) { in __do_munmap()
3026 mm->locked_vm -= vma_pages(tmp); in __do_munmap()
3035 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) in __do_munmap()
3039 mmap_write_downgrade(mm); in __do_munmap()
3041 unmap_region(mm, vma, prev, start, end); in __do_munmap()
3044 remove_vma_list(mm, vma); in __do_munmap()
3049 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, in do_munmap() argument
3052 return __do_munmap(mm, start, len, uf, false); in do_munmap()
3058 struct mm_struct *mm = current->mm; in __vm_munmap() local
3061 if (mmap_write_lock_killable(mm)) in __vm_munmap()
3064 ret = __do_munmap(mm, start, len, &uf, downgrade); in __vm_munmap()
3071 mmap_read_unlock(mm); in __vm_munmap()
3074 mmap_write_unlock(mm); in __vm_munmap()
3076 userfaultfd_unmap_complete(mm, &uf); in __vm_munmap()
3101 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5() local
3122 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE5()
3125 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
3185 mmap_write_unlock(mm); in SYSCALL_DEFINE5()
3200 struct mm_struct *mm = current->mm; in do_brk_flags() local
3210 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; in do_brk_flags()
3216 error = mlock_future_check(mm, mm->def_flags, len); in do_brk_flags()
3221 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) in do_brk_flags()
3225 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) in do_brk_flags()
3228 if (mm->map_count > sysctl_max_map_count) in do_brk_flags()
3231 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) in do_brk_flags()
3235 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk_flags()
3243 vma = vm_area_alloc(mm); in do_brk_flags()
3255 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk_flags()
3258 mm->total_vm += len >> PAGE_SHIFT; in do_brk_flags()
3259 mm->data_vm += len >> PAGE_SHIFT; in do_brk_flags()
3261 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
3268 struct mm_struct *mm = current->mm; in vm_brk_flags() local
3280 if (mmap_write_lock_killable(mm)) in vm_brk_flags()
3284 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk_flags()
3285 mmap_write_unlock(mm); in vm_brk_flags()
3286 userfaultfd_unmap_complete(mm, &uf); in vm_brk_flags()
3300 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
3307 mmu_notifier_release(mm); in exit_mmap()
3309 if (unlikely(mm_is_oom_victim(mm))) { in exit_mmap()
3326 (void)__oom_reap_task_mm(mm); in exit_mmap()
3328 set_bit(MMF_OOM_SKIP, &mm->flags); in exit_mmap()
3331 mmap_write_lock(mm); in exit_mmap()
3332 if (mm->locked_vm) { in exit_mmap()
3333 vma = mm->mmap; in exit_mmap()
3341 arch_exit_mmap(mm); in exit_mmap()
3343 vma = mm->mmap; in exit_mmap()
3346 mmap_write_unlock(mm); in exit_mmap()
3351 flush_cache_mm(mm); in exit_mmap()
3352 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap()
3366 mm->mmap = NULL; in exit_mmap()
3367 mmap_write_unlock(mm); in exit_mmap()
3375 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3380 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
3384 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
3404 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
3418 struct mm_struct *mm = vma->vm_mm; in copy_vma() local
3432 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
3445 new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3495 vma_link(mm, new_vma, prev, rb_link, rb_parent); in copy_vma()
3512 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument
3514 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm()
3518 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm()
3521 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm()
3526 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm()
3537 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument
3539 mm->total_vm += npages; in vm_stat_account()
3542 mm->exec_vm += npages; in vm_stat_account()
3544 mm->stack_vm += npages; in vm_stat_account()
3546 mm->data_vm += npages; in vm_stat_account()
3567 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
3621 struct mm_struct *mm, in __install_special_mapping() argument
3629 vma = vm_area_alloc(mm); in __install_special_mapping()
3636 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3642 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3646 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3675 struct mm_struct *mm, in _install_special_mapping() argument
3679 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
3683 int install_special_mapping(struct mm_struct *mm, in install_special_mapping() argument
3688 mm, addr, len, vm_flags, (void *)pages, in install_special_mapping()
3696 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) in vm_lock_anon_vma() argument
3703 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()
3719 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) in vm_lock_mapping() argument
3733 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
3774 int mm_take_all_locks(struct mm_struct *mm) in mm_take_all_locks() argument
3779 BUG_ON(mmap_read_trylock(mm)); in mm_take_all_locks()
3783 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3788 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3791 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3796 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3799 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3804 vm_lock_anon_vma(mm, avc->anon_vma); in mm_take_all_locks()
3810 mm_drop_all_locks(mm); in mm_take_all_locks()
3854 void mm_drop_all_locks(struct mm_struct *mm) in mm_drop_all_locks() argument
3859 BUG_ON(mmap_read_trylock(mm)); in mm_drop_all_locks()
3862 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()