Lines Matching +full:performance +full:- +full:affecting
1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/page-isolation.h>
26 #include <linux/backing-dev.h>
45 * Any behaviour which results in changes to the vma->vm_flags needs to
73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
76 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
92 if (vma->vm_flags & VM_IO) { in madvise_behavior()
93 error = -EINVAL; in madvise_behavior()
100 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
101 error = -EINVAL; in madvise_behavior()
114 error = -EINVAL; in madvise_behavior()
133 if (new_flags == vma->vm_flags) { in madvise_behavior()
138 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
140 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior()
141 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); in madvise_behavior()
149 if (start != vma->vm_start) { in madvise_behavior()
150 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior()
151 error = -ENOMEM; in madvise_behavior()
159 if (end != vma->vm_end) { in madvise_behavior()
160 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior()
161 error = -ENOMEM; in madvise_behavior()
174 WRITE_ONCE(vma->vm_flags, new_flags); in madvise_behavior()
182 if (error == -ENOMEM) in madvise_behavior()
183 error = -EAGAIN; in madvise_behavior()
193 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry()
205 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
206 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
232 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in force_shm_swapin_readahead()
233 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); in force_shm_swapin_readahead()
266 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
267 struct file *file = vma->vm_file; in madvise_willneed()
273 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
278 if (shmem_mapping(file->f_mapping)) { in madvise_willneed()
280 file->f_mapping); in madvise_willneed()
285 return -EBADF; in madvise_willneed()
301 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
302 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
304 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); in madvise_willneed()
314 struct madvise_walk_private *private = walk->private; in madvise_cold_or_pageout_pte_range()
315 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range()
316 bool pageout = private->pageout; in madvise_cold_or_pageout_pte_range()
317 bool pageout_anon_only = pageout && !private->can_pageout_file; in madvise_cold_or_pageout_pte_range()
318 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range()
319 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range()
327 return -EINTR; in madvise_cold_or_pageout_pte_range()
359 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range()
388 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
404 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
444 pte--; in madvise_cold_or_pageout_pte_range()
445 addr -= PAGE_SIZE; in madvise_cold_or_pageout_pte_range()
451 * non-LRU page. in madvise_cold_or_pageout_pte_range()
463 tlb->fullmm); in madvise_cold_or_pageout_pte_range()
472 * As a side effect, it makes confuse idle-page tracking in madvise_cold_or_pageout_pte_range()
482 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
513 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
521 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
526 return -EINVAL; in madvise_cold()
548 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
554 if (!vma->vm_file) in can_do_file_pageout()
557 * paging out pagecache only for non-anonymous mappings that correspond in can_do_file_pageout()
559 * otherwise we'd be including shared non-exclusive mappings, which in can_do_file_pageout()
562 return inode_owner_or_capable(file_inode(vma->vm_file)) || in can_do_file_pageout()
563 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; in can_do_file_pageout()
570 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
576 return -EINVAL; in madvise_pageout()
598 struct mmu_gather *tlb = walk->private; in madvise_free_pte_range()
599 struct mm_struct *mm = tlb->mm; in madvise_free_pte_range()
600 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range()
626 * prevent swap-in which is more expensive rather than in madvise_free_pte_range()
635 nr_swap--; in madvise_free_pte_range()
637 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in madvise_free_pte_range()
668 pte--; in madvise_free_pte_range()
669 addr -= PAGE_SIZE; in madvise_free_pte_range()
704 tlb->fullmm); in madvise_free_pte_range()
715 if (current->mm == mm) in madvise_free_pte_range()
734 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
740 return -EINVAL; in madvise_free_single_vma()
742 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
743 if (range.start >= vma->vm_end) in madvise_free_single_vma()
744 return -EINVAL; in madvise_free_single_vma()
745 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
746 if (range.end <= vma->vm_start) in madvise_free_single_vma()
747 return -EINVAL; in madvise_free_single_vma()
757 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
776 * as some implementations do. This has performance implications for
788 zap_page_range(vma, start, end - start); in madvise_dontneed_single_vma()
797 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
801 return -EINVAL; in madvise_dontneed_free()
809 return -ENOMEM; in madvise_dontneed_free()
810 if (start < vma->vm_start) { in madvise_dontneed_free()
813 * with the lowest vma->vm_start where start in madvise_dontneed_free()
814 * is also < vma->vm_end. If start < in madvise_dontneed_free()
815 * vma->vm_start it means an hole materialized in madvise_dontneed_free()
820 return -ENOMEM; in madvise_dontneed_free()
823 return -EINVAL; in madvise_dontneed_free()
824 if (end > vma->vm_end) { in madvise_dontneed_free()
826 * Don't fail if end > vma->vm_end. If the old in madvise_dontneed_free()
834 * end-vma->vm_end range, but the manager can in madvise_dontneed_free()
837 end = vma->vm_end; in madvise_dontneed_free()
847 return -EINVAL; in madvise_dontneed_free()
861 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
865 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
866 return -EINVAL; in madvise_remove()
868 f = vma->vm_file; in madvise_remove()
870 if (!f || !f->f_mapping || !f->f_mapping->host) { in madvise_remove()
871 return -EINVAL; in madvise_remove()
874 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
875 return -EACCES; in madvise_remove()
877 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
878 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
893 offset, end - start); in madvise_remove()
910 return -EPERM; in madvise_inject_error()
944 /* Ensure that all poisoned pages are removed from per-cpu lists */ in madvise_inject_error()
1029 * use appropriate read-ahead and caching techniques. The information
1031 * kernel without affecting the correct operation of the application.
1034 * MADV_NORMAL - the default behavior is to read clusters. This
1035 * results in some read-ahead and read-behind.
1036 * MADV_RANDOM - the system should read the minimum amount of data
1037 * on any access, since it is unlikely that the appli-
1039 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1042 * MADV_WILLNEED - the application is notifying the system to read
1044 * MADV_DONTNEED - the application is finished with the given range,
1046 * MADV_FREE - the application marks pages in the given range as lazy free,
1048 * MADV_REMOVE - the application wants to free up the given range of
1050 * MADV_DONTFORK - omit this area from child's address space when forking:
1052 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1053 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1055 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1056 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1058 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1059 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1061 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1062 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1065 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1068 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1070 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1071 * MADV_COLD - the application is not expected to use this memory soon,
1074 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1078 * zero - success
1079 * -EINVAL - start + len < 0, start is not page-aligned,
1084 * -ENOMEM - addresses in the specified range are not currently
1086 * -EIO - an I/O error occurred while paging in data.
1087 * -EBADF - map exists, but area maps something that isn't a file.
1088 * -EAGAIN - a kernel resource was temporarily unavailable.
1095 int error = -EINVAL; in do_madvise()
1109 /* Check to see whether len was rounded up from small -ve to zero */ in do_madvise()
1129 return -EINTR; in do_madvise()
1136 * ranges, just ignore them, but return -ENOMEM at the end. in do_madvise()
1137 * - different from the way of handling in mlock etc. in do_madvise()
1140 if (vma && start > vma->vm_start) in do_madvise()
1146 error = -ENOMEM; in do_madvise()
1150 /* Here start < (end|vma->vm_end). */ in do_madvise()
1151 if (start < vma->vm_start) { in do_madvise()
1152 unmapped_error = -ENOMEM; in do_madvise()
1153 start = vma->vm_start; in do_madvise()
1158 /* Here vma->vm_start <= start < (end|vma->vm_end) */ in do_madvise()
1159 tmp = vma->vm_end; in do_madvise()
1163 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ in do_madvise()
1168 if (prev && start < prev->vm_end) in do_madvise()
1169 start = prev->vm_end; in do_madvise()
1174 vma = prev->vm_next; in do_madvise()
1190 return do_madvise(current->mm, start, len_in, behavior); in SYSCALL_DEFINE3()
1207 ret = -EINVAL; in SYSCALL_DEFINE5()
1223 ret = -ESRCH; in SYSCALL_DEFINE5()
1228 ret = -EINVAL; in SYSCALL_DEFINE5()
1235 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; in SYSCALL_DEFINE5()
1240 * Require CAP_SYS_NICE for influencing process performance. Note that in SYSCALL_DEFINE5()
1241 * only non-destructive hints are currently supported. in SYSCALL_DEFINE5()
1244 ret = -EPERM; in SYSCALL_DEFINE5()
1259 ret = (total_len - iov_iter_count(&iter)) ? : ret; in SYSCALL_DEFINE5()