1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/mm/madvise.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1999 Linus Torvalds
6*4882a593Smuzhiyun * Copyright (C) 2002 Christoph Hellwig
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/mman.h>
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/syscalls.h>
12*4882a593Smuzhiyun #include <linux/mempolicy.h>
13*4882a593Smuzhiyun #include <linux/page-isolation.h>
14*4882a593Smuzhiyun #include <linux/page_idle.h>
15*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/falloc.h>
18*4882a593Smuzhiyun #include <linux/fadvise.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/sched/mm.h>
21*4882a593Smuzhiyun #include <linux/uio.h>
22*4882a593Smuzhiyun #include <linux/ksm.h>
23*4882a593Smuzhiyun #include <linux/fs.h>
24*4882a593Smuzhiyun #include <linux/file.h>
25*4882a593Smuzhiyun #include <linux/blkdev.h>
26*4882a593Smuzhiyun #include <linux/backing-dev.h>
27*4882a593Smuzhiyun #include <linux/pagewalk.h>
28*4882a593Smuzhiyun #include <linux/swap.h>
29*4882a593Smuzhiyun #include <linux/swapops.h>
30*4882a593Smuzhiyun #include <linux/shmem_fs.h>
31*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
32*4882a593Smuzhiyun #include <trace/hooks/mm.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <asm/tlb.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "internal.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct madvise_walk_private {
39*4882a593Smuzhiyun struct mmu_gather *tlb;
40*4882a593Smuzhiyun bool pageout;
41*4882a593Smuzhiyun bool can_pageout_file;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Any behaviour which results in changes to the vma->vm_flags needs to
46*4882a593Smuzhiyun * take mmap_lock for writing. Others, which simply traverse vmas, need
47*4882a593Smuzhiyun * to only take it for reading.
48*4882a593Smuzhiyun */
madvise_need_mmap_write(int behavior)49*4882a593Smuzhiyun static int madvise_need_mmap_write(int behavior)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun switch (behavior) {
52*4882a593Smuzhiyun case MADV_REMOVE:
53*4882a593Smuzhiyun case MADV_WILLNEED:
54*4882a593Smuzhiyun case MADV_DONTNEED:
55*4882a593Smuzhiyun case MADV_COLD:
56*4882a593Smuzhiyun case MADV_PAGEOUT:
57*4882a593Smuzhiyun case MADV_FREE:
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun default:
60*4882a593Smuzhiyun /* be safe, default to 1. list exceptions explicitly */
61*4882a593Smuzhiyun return 1;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * We can potentially split a vm area into separate
67*4882a593Smuzhiyun * areas, each area with its own behavior.
68*4882a593Smuzhiyun */
madvise_behavior(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)69*4882a593Smuzhiyun static long madvise_behavior(struct vm_area_struct *vma,
70*4882a593Smuzhiyun struct vm_area_struct **prev,
71*4882a593Smuzhiyun unsigned long start, unsigned long end, int behavior)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
74*4882a593Smuzhiyun int error = 0;
75*4882a593Smuzhiyun pgoff_t pgoff;
76*4882a593Smuzhiyun unsigned long new_flags = vma->vm_flags;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun switch (behavior) {
79*4882a593Smuzhiyun case MADV_NORMAL:
80*4882a593Smuzhiyun new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun case MADV_SEQUENTIAL:
83*4882a593Smuzhiyun new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
84*4882a593Smuzhiyun break;
85*4882a593Smuzhiyun case MADV_RANDOM:
86*4882a593Smuzhiyun new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun case MADV_DONTFORK:
89*4882a593Smuzhiyun new_flags |= VM_DONTCOPY;
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun case MADV_DOFORK:
92*4882a593Smuzhiyun if (vma->vm_flags & VM_IO) {
93*4882a593Smuzhiyun error = -EINVAL;
94*4882a593Smuzhiyun goto out;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun new_flags &= ~VM_DONTCOPY;
97*4882a593Smuzhiyun break;
98*4882a593Smuzhiyun case MADV_WIPEONFORK:
99*4882a593Smuzhiyun /* MADV_WIPEONFORK is only supported on anonymous memory. */
100*4882a593Smuzhiyun if (vma->vm_file || vma->vm_flags & VM_SHARED) {
101*4882a593Smuzhiyun error = -EINVAL;
102*4882a593Smuzhiyun goto out;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun new_flags |= VM_WIPEONFORK;
105*4882a593Smuzhiyun break;
106*4882a593Smuzhiyun case MADV_KEEPONFORK:
107*4882a593Smuzhiyun new_flags &= ~VM_WIPEONFORK;
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun case MADV_DONTDUMP:
110*4882a593Smuzhiyun new_flags |= VM_DONTDUMP;
111*4882a593Smuzhiyun break;
112*4882a593Smuzhiyun case MADV_DODUMP:
113*4882a593Smuzhiyun if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
114*4882a593Smuzhiyun error = -EINVAL;
115*4882a593Smuzhiyun goto out;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun new_flags &= ~VM_DONTDUMP;
118*4882a593Smuzhiyun break;
119*4882a593Smuzhiyun case MADV_MERGEABLE:
120*4882a593Smuzhiyun case MADV_UNMERGEABLE:
121*4882a593Smuzhiyun error = ksm_madvise(vma, start, end, behavior, &new_flags);
122*4882a593Smuzhiyun if (error)
123*4882a593Smuzhiyun goto out_convert_errno;
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun case MADV_HUGEPAGE:
126*4882a593Smuzhiyun case MADV_NOHUGEPAGE:
127*4882a593Smuzhiyun error = hugepage_madvise(vma, &new_flags, behavior);
128*4882a593Smuzhiyun if (error)
129*4882a593Smuzhiyun goto out_convert_errno;
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (new_flags == vma->vm_flags) {
134*4882a593Smuzhiyun *prev = vma;
135*4882a593Smuzhiyun goto out;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
139*4882a593Smuzhiyun *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
140*4882a593Smuzhiyun vma->vm_file, pgoff, vma_policy(vma),
141*4882a593Smuzhiyun vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
142*4882a593Smuzhiyun if (*prev) {
143*4882a593Smuzhiyun vma = *prev;
144*4882a593Smuzhiyun goto success;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun *prev = vma;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (start != vma->vm_start) {
150*4882a593Smuzhiyun if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151*4882a593Smuzhiyun error = -ENOMEM;
152*4882a593Smuzhiyun goto out;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun error = __split_vma(mm, vma, start, 1);
155*4882a593Smuzhiyun if (error)
156*4882a593Smuzhiyun goto out_convert_errno;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (end != vma->vm_end) {
160*4882a593Smuzhiyun if (unlikely(mm->map_count >= sysctl_max_map_count)) {
161*4882a593Smuzhiyun error = -ENOMEM;
162*4882a593Smuzhiyun goto out;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun error = __split_vma(mm, vma, end, 0);
165*4882a593Smuzhiyun if (error)
166*4882a593Smuzhiyun goto out_convert_errno;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun success:
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * vm_flags is protected by the mmap_lock held in write mode.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun vm_write_begin(vma);
174*4882a593Smuzhiyun WRITE_ONCE(vma->vm_flags, new_flags);
175*4882a593Smuzhiyun vm_write_end(vma);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun out_convert_errno:
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * madvise() returns EAGAIN if kernel resources, such as
180*4882a593Smuzhiyun * slab, are temporarily unavailable.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun if (error == -ENOMEM)
183*4882a593Smuzhiyun error = -EAGAIN;
184*4882a593Smuzhiyun out:
185*4882a593Smuzhiyun return error;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)189*4882a593Smuzhiyun static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
190*4882a593Smuzhiyun unsigned long end, struct mm_walk *walk)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun pte_t *orig_pte;
193*4882a593Smuzhiyun struct vm_area_struct *vma = walk->private;
194*4882a593Smuzhiyun unsigned long index;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (pmd_none_or_trans_huge_or_clear_bad(pmd))
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun for (index = start; index != end; index += PAGE_SIZE) {
200*4882a593Smuzhiyun pte_t pte;
201*4882a593Smuzhiyun swp_entry_t entry;
202*4882a593Smuzhiyun struct page *page;
203*4882a593Smuzhiyun spinlock_t *ptl;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
206*4882a593Smuzhiyun pte = *(orig_pte + ((index - start) / PAGE_SIZE));
207*4882a593Smuzhiyun pte_unmap_unlock(orig_pte, ptl);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (pte_present(pte) || pte_none(pte))
210*4882a593Smuzhiyun continue;
211*4882a593Smuzhiyun entry = pte_to_swp_entry(pte);
212*4882a593Smuzhiyun if (unlikely(non_swap_entry(entry)))
213*4882a593Smuzhiyun continue;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
216*4882a593Smuzhiyun vma, index, false);
217*4882a593Smuzhiyun if (page)
218*4882a593Smuzhiyun put_page(page);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun static const struct mm_walk_ops swapin_walk_ops = {
225*4882a593Smuzhiyun .pmd_entry = swapin_walk_pmd_entry,
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun
force_shm_swapin_readahead(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)228*4882a593Smuzhiyun static void force_shm_swapin_readahead(struct vm_area_struct *vma,
229*4882a593Smuzhiyun unsigned long start, unsigned long end,
230*4882a593Smuzhiyun struct address_space *mapping)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
233*4882a593Smuzhiyun pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
234*4882a593Smuzhiyun struct page *page;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun rcu_read_lock();
237*4882a593Smuzhiyun xas_for_each(&xas, page, end_index) {
238*4882a593Smuzhiyun swp_entry_t swap;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (!xa_is_value(page))
241*4882a593Smuzhiyun continue;
242*4882a593Smuzhiyun xas_pause(&xas);
243*4882a593Smuzhiyun rcu_read_unlock();
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun swap = radix_to_swp_entry(page);
246*4882a593Smuzhiyun page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
247*4882a593Smuzhiyun NULL, 0, false);
248*4882a593Smuzhiyun if (page)
249*4882a593Smuzhiyun put_page(page);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun rcu_read_lock();
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun rcu_read_unlock();
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun lru_add_drain(); /* Push any new pages onto the LRU now */
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun #endif /* CONFIG_SWAP */
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Schedule all required I/O operations. Do not wait for completion.
261*4882a593Smuzhiyun */
madvise_willneed(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)262*4882a593Smuzhiyun static long madvise_willneed(struct vm_area_struct *vma,
263*4882a593Smuzhiyun struct vm_area_struct **prev,
264*4882a593Smuzhiyun unsigned long start, unsigned long end)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
267*4882a593Smuzhiyun struct file *file = vma->vm_file;
268*4882a593Smuzhiyun loff_t offset;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun *prev = vma;
271*4882a593Smuzhiyun #ifdef CONFIG_SWAP
272*4882a593Smuzhiyun if (!file) {
273*4882a593Smuzhiyun walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
274*4882a593Smuzhiyun lru_add_drain(); /* Push any new pages onto the LRU now */
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (shmem_mapping(file->f_mapping)) {
279*4882a593Smuzhiyun force_shm_swapin_readahead(vma, start, end,
280*4882a593Smuzhiyun file->f_mapping);
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun #else
284*4882a593Smuzhiyun if (!file)
285*4882a593Smuzhiyun return -EBADF;
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (IS_DAX(file_inode(file))) {
289*4882a593Smuzhiyun /* no bad return value, but ignore advice */
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Filesystem's fadvise may need to take various locks. We need to
295*4882a593Smuzhiyun * explicitly grab a reference because the vma (and hence the
296*4882a593Smuzhiyun * vma's reference to the file) can go away as soon as we drop
297*4882a593Smuzhiyun * mmap_lock.
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun *prev = NULL; /* tell sys_madvise we drop mmap_lock */
300*4882a593Smuzhiyun get_file(file);
301*4882a593Smuzhiyun offset = (loff_t)(start - vma->vm_start)
302*4882a593Smuzhiyun + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
303*4882a593Smuzhiyun mmap_read_unlock(mm);
304*4882a593Smuzhiyun vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
305*4882a593Smuzhiyun fput(file);
306*4882a593Smuzhiyun mmap_read_lock(mm);
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)310*4882a593Smuzhiyun static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
311*4882a593Smuzhiyun unsigned long addr, unsigned long end,
312*4882a593Smuzhiyun struct mm_walk *walk)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct madvise_walk_private *private = walk->private;
315*4882a593Smuzhiyun struct mmu_gather *tlb = private->tlb;
316*4882a593Smuzhiyun bool pageout = private->pageout;
317*4882a593Smuzhiyun bool pageout_anon_only = pageout && !private->can_pageout_file;
318*4882a593Smuzhiyun struct mm_struct *mm = tlb->mm;
319*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
320*4882a593Smuzhiyun pte_t *orig_pte, *pte, ptent;
321*4882a593Smuzhiyun spinlock_t *ptl;
322*4882a593Smuzhiyun struct page *page = NULL;
323*4882a593Smuzhiyun LIST_HEAD(page_list);
324*4882a593Smuzhiyun bool allow_shared = false;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (fatal_signal_pending(current))
327*4882a593Smuzhiyun return -EINTR;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);
330*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
331*4882a593Smuzhiyun if (pmd_trans_huge(*pmd)) {
332*4882a593Smuzhiyun pmd_t orig_pmd;
333*4882a593Smuzhiyun unsigned long next = pmd_addr_end(addr, end);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
336*4882a593Smuzhiyun ptl = pmd_trans_huge_lock(pmd, vma);
337*4882a593Smuzhiyun if (!ptl)
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun orig_pmd = *pmd;
341*4882a593Smuzhiyun if (is_huge_zero_pmd(orig_pmd))
342*4882a593Smuzhiyun goto huge_unlock;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (unlikely(!pmd_present(orig_pmd))) {
345*4882a593Smuzhiyun VM_BUG_ON(thp_migration_supported() &&
346*4882a593Smuzhiyun !is_pmd_migration_entry(orig_pmd));
347*4882a593Smuzhiyun goto huge_unlock;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun page = pmd_page(orig_pmd);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Do not interfere with other mappings of this page */
353*4882a593Smuzhiyun if (page_mapcount(page) != 1)
354*4882a593Smuzhiyun goto huge_unlock;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (pageout_anon_only && !PageAnon(page))
357*4882a593Smuzhiyun goto huge_unlock;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (next - addr != HPAGE_PMD_SIZE) {
360*4882a593Smuzhiyun int err;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun get_page(page);
363*4882a593Smuzhiyun spin_unlock(ptl);
364*4882a593Smuzhiyun lock_page(page);
365*4882a593Smuzhiyun err = split_huge_page(page);
366*4882a593Smuzhiyun unlock_page(page);
367*4882a593Smuzhiyun put_page(page);
368*4882a593Smuzhiyun if (!err)
369*4882a593Smuzhiyun goto regular_page;
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (pmd_young(orig_pmd)) {
374*4882a593Smuzhiyun pmdp_invalidate(vma, addr, pmd);
375*4882a593Smuzhiyun orig_pmd = pmd_mkold(orig_pmd);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun set_pmd_at(mm, addr, pmd, orig_pmd);
378*4882a593Smuzhiyun tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun ClearPageReferenced(page);
382*4882a593Smuzhiyun test_and_clear_page_young(page);
383*4882a593Smuzhiyun if (pageout) {
384*4882a593Smuzhiyun if (!isolate_lru_page(page)) {
385*4882a593Smuzhiyun if (PageUnevictable(page))
386*4882a593Smuzhiyun putback_lru_page(page);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun list_add(&page->lru, &page_list);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun } else
391*4882a593Smuzhiyun deactivate_page(page);
392*4882a593Smuzhiyun huge_unlock:
393*4882a593Smuzhiyun spin_unlock(ptl);
394*4882a593Smuzhiyun if (pageout)
395*4882a593Smuzhiyun reclaim_pages(&page_list);
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun regular_page:
400*4882a593Smuzhiyun if (pmd_trans_unstable(pmd))
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun #endif
403*4882a593Smuzhiyun tlb_change_page_size(tlb, PAGE_SIZE);
404*4882a593Smuzhiyun orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
405*4882a593Smuzhiyun flush_tlb_batched_pending(mm);
406*4882a593Smuzhiyun arch_enter_lazy_mmu_mode();
407*4882a593Smuzhiyun for (; addr < end; pte++, addr += PAGE_SIZE) {
408*4882a593Smuzhiyun ptent = *pte;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (pte_none(ptent))
411*4882a593Smuzhiyun continue;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (!pte_present(ptent))
414*4882a593Smuzhiyun continue;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun page = vm_normal_page(vma, addr, ptent);
417*4882a593Smuzhiyun if (!page)
418*4882a593Smuzhiyun continue;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Creating a THP page is expensive so split it only if we
422*4882a593Smuzhiyun * are sure it's worth. Split it if we are only owner.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun if (PageTransCompound(page)) {
425*4882a593Smuzhiyun if (page_mapcount(page) != 1)
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun if (pageout_anon_only && !PageAnon(page))
428*4882a593Smuzhiyun break;
429*4882a593Smuzhiyun get_page(page);
430*4882a593Smuzhiyun if (!trylock_page(page)) {
431*4882a593Smuzhiyun put_page(page);
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun pte_unmap_unlock(orig_pte, ptl);
435*4882a593Smuzhiyun if (split_huge_page(page)) {
436*4882a593Smuzhiyun unlock_page(page);
437*4882a593Smuzhiyun put_page(page);
438*4882a593Smuzhiyun pte_offset_map_lock(mm, pmd, addr, &ptl);
439*4882a593Smuzhiyun break;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun unlock_page(page);
442*4882a593Smuzhiyun put_page(page);
443*4882a593Smuzhiyun pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
444*4882a593Smuzhiyun pte--;
445*4882a593Smuzhiyun addr -= PAGE_SIZE;
446*4882a593Smuzhiyun continue;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * Do not interfere with other mappings of this page and
451*4882a593Smuzhiyun * non-LRU page.
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun if (!allow_shared && (!PageLRU(page) || page_mapcount(page) != 1))
454*4882a593Smuzhiyun continue;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (pageout_anon_only && !PageAnon(page))
457*4882a593Smuzhiyun continue;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun VM_BUG_ON_PAGE(PageTransCompound(page), page);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (pte_young(ptent)) {
462*4882a593Smuzhiyun ptent = ptep_get_and_clear_full(mm, addr, pte,
463*4882a593Smuzhiyun tlb->fullmm);
464*4882a593Smuzhiyun ptent = pte_mkold(ptent);
465*4882a593Smuzhiyun set_pte_at(mm, addr, pte, ptent);
466*4882a593Smuzhiyun tlb_remove_tlb_entry(tlb, pte, addr);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * We are deactivating a page for accelerating reclaiming.
471*4882a593Smuzhiyun * VM couldn't reclaim the page unless we clear PG_young.
472*4882a593Smuzhiyun * As a side effect, it makes confuse idle-page tracking
473*4882a593Smuzhiyun * because they will miss recent referenced history.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun ClearPageReferenced(page);
476*4882a593Smuzhiyun test_and_clear_page_young(page);
477*4882a593Smuzhiyun if (pageout) {
478*4882a593Smuzhiyun if (!isolate_lru_page(page)) {
479*4882a593Smuzhiyun if (PageUnevictable(page))
480*4882a593Smuzhiyun putback_lru_page(page);
481*4882a593Smuzhiyun else {
482*4882a593Smuzhiyun list_add(&page->lru, &page_list);
483*4882a593Smuzhiyun trace_android_vh_page_isolated_for_reclaim(mm, page);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun } else
487*4882a593Smuzhiyun deactivate_page(page);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun arch_leave_lazy_mmu_mode();
491*4882a593Smuzhiyun pte_unmap_unlock(orig_pte, ptl);
492*4882a593Smuzhiyun if (pageout)
493*4882a593Smuzhiyun reclaim_pages(&page_list);
494*4882a593Smuzhiyun cond_resched();
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun static const struct mm_walk_ops cold_walk_ops = {
500*4882a593Smuzhiyun .pmd_entry = madvise_cold_or_pageout_pte_range,
501*4882a593Smuzhiyun };
502*4882a593Smuzhiyun
madvise_cold_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)503*4882a593Smuzhiyun static void madvise_cold_page_range(struct mmu_gather *tlb,
504*4882a593Smuzhiyun struct vm_area_struct *vma,
505*4882a593Smuzhiyun unsigned long addr, unsigned long end)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct madvise_walk_private walk_private = {
508*4882a593Smuzhiyun .pageout = false,
509*4882a593Smuzhiyun .tlb = tlb,
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun tlb_start_vma(tlb, vma);
513*4882a593Smuzhiyun walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
514*4882a593Smuzhiyun tlb_end_vma(tlb, vma);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
madvise_cold(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)517*4882a593Smuzhiyun static long madvise_cold(struct vm_area_struct *vma,
518*4882a593Smuzhiyun struct vm_area_struct **prev,
519*4882a593Smuzhiyun unsigned long start_addr, unsigned long end_addr)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
522*4882a593Smuzhiyun struct mmu_gather tlb;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun *prev = vma;
525*4882a593Smuzhiyun if (!can_madv_lru_vma(vma))
526*4882a593Smuzhiyun return -EINVAL;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun lru_add_drain();
529*4882a593Smuzhiyun tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
530*4882a593Smuzhiyun madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
531*4882a593Smuzhiyun tlb_finish_mmu(&tlb, start_addr, end_addr);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return 0;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,bool can_pageout_file)536*4882a593Smuzhiyun static void madvise_pageout_page_range(struct mmu_gather *tlb,
537*4882a593Smuzhiyun struct vm_area_struct *vma,
538*4882a593Smuzhiyun unsigned long addr, unsigned long end,
539*4882a593Smuzhiyun bool can_pageout_file)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct madvise_walk_private walk_private = {
542*4882a593Smuzhiyun .pageout = true,
543*4882a593Smuzhiyun .tlb = tlb,
544*4882a593Smuzhiyun .can_pageout_file = can_pageout_file,
545*4882a593Smuzhiyun };
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun tlb_start_vma(tlb, vma);
548*4882a593Smuzhiyun walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
549*4882a593Smuzhiyun tlb_end_vma(tlb, vma);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
can_do_file_pageout(struct vm_area_struct * vma)552*4882a593Smuzhiyun static inline bool can_do_file_pageout(struct vm_area_struct *vma)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun if (!vma->vm_file)
555*4882a593Smuzhiyun return false;
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * paging out pagecache only for non-anonymous mappings that correspond
558*4882a593Smuzhiyun * to the files the calling process could (if tried) open for writing;
559*4882a593Smuzhiyun * otherwise we'd be including shared non-exclusive mappings, which
560*4882a593Smuzhiyun * opens a side channel.
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun return inode_owner_or_capable(file_inode(vma->vm_file)) ||
563*4882a593Smuzhiyun inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
madvise_pageout(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)566*4882a593Smuzhiyun static long madvise_pageout(struct vm_area_struct *vma,
567*4882a593Smuzhiyun struct vm_area_struct **prev,
568*4882a593Smuzhiyun unsigned long start_addr, unsigned long end_addr)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
571*4882a593Smuzhiyun struct mmu_gather tlb;
572*4882a593Smuzhiyun bool can_pageout_file;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun *prev = vma;
575*4882a593Smuzhiyun if (!can_madv_lru_vma(vma))
576*4882a593Smuzhiyun return -EINVAL;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /*
579*4882a593Smuzhiyun * If the VMA belongs to a private file mapping, there can be private
580*4882a593Smuzhiyun * dirty pages which can be paged out if even this process is neither
581*4882a593Smuzhiyun * owner nor write capable of the file. Cache the file access check
582*4882a593Smuzhiyun * here and use it later during page walk.
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun can_pageout_file = can_do_file_pageout(vma);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun lru_add_drain();
587*4882a593Smuzhiyun tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
588*4882a593Smuzhiyun madvise_pageout_page_range(&tlb, vma, start_addr, end_addr, can_pageout_file);
589*4882a593Smuzhiyun tlb_finish_mmu(&tlb, start_addr, end_addr);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)594*4882a593Smuzhiyun static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
595*4882a593Smuzhiyun unsigned long end, struct mm_walk *walk)
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct mmu_gather *tlb = walk->private;
599*4882a593Smuzhiyun struct mm_struct *mm = tlb->mm;
600*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
601*4882a593Smuzhiyun spinlock_t *ptl;
602*4882a593Smuzhiyun pte_t *orig_pte, *pte, ptent;
603*4882a593Smuzhiyun struct page *page;
604*4882a593Smuzhiyun int nr_swap = 0;
605*4882a593Smuzhiyun unsigned long next;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun next = pmd_addr_end(addr, end);
608*4882a593Smuzhiyun if (pmd_trans_huge(*pmd))
609*4882a593Smuzhiyun if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
610*4882a593Smuzhiyun goto next;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (pmd_trans_unstable(pmd))
613*4882a593Smuzhiyun return 0;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun tlb_change_page_size(tlb, PAGE_SIZE);
616*4882a593Smuzhiyun orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
617*4882a593Smuzhiyun flush_tlb_batched_pending(mm);
618*4882a593Smuzhiyun arch_enter_lazy_mmu_mode();
619*4882a593Smuzhiyun for (; addr != end; pte++, addr += PAGE_SIZE) {
620*4882a593Smuzhiyun ptent = *pte;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (pte_none(ptent))
623*4882a593Smuzhiyun continue;
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun * If the pte has swp_entry, just clear page table to
626*4882a593Smuzhiyun * prevent swap-in which is more expensive rather than
627*4882a593Smuzhiyun * (page allocation + zeroing).
628*4882a593Smuzhiyun */
629*4882a593Smuzhiyun if (!pte_present(ptent)) {
630*4882a593Smuzhiyun swp_entry_t entry;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun entry = pte_to_swp_entry(ptent);
633*4882a593Smuzhiyun if (non_swap_entry(entry))
634*4882a593Smuzhiyun continue;
635*4882a593Smuzhiyun nr_swap--;
636*4882a593Smuzhiyun free_swap_and_cache(entry);
637*4882a593Smuzhiyun pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
638*4882a593Smuzhiyun continue;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun page = vm_normal_page(vma, addr, ptent);
642*4882a593Smuzhiyun if (!page)
643*4882a593Smuzhiyun continue;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun * If pmd isn't transhuge but the page is THP and
647*4882a593Smuzhiyun * is owned by only this process, split it and
648*4882a593Smuzhiyun * deactivate all pages.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun if (PageTransCompound(page)) {
651*4882a593Smuzhiyun if (page_mapcount(page) != 1)
652*4882a593Smuzhiyun goto out;
653*4882a593Smuzhiyun get_page(page);
654*4882a593Smuzhiyun if (!trylock_page(page)) {
655*4882a593Smuzhiyun put_page(page);
656*4882a593Smuzhiyun goto out;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun pte_unmap_unlock(orig_pte, ptl);
659*4882a593Smuzhiyun if (split_huge_page(page)) {
660*4882a593Smuzhiyun unlock_page(page);
661*4882a593Smuzhiyun put_page(page);
662*4882a593Smuzhiyun pte_offset_map_lock(mm, pmd, addr, &ptl);
663*4882a593Smuzhiyun goto out;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun unlock_page(page);
666*4882a593Smuzhiyun put_page(page);
667*4882a593Smuzhiyun pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
668*4882a593Smuzhiyun pte--;
669*4882a593Smuzhiyun addr -= PAGE_SIZE;
670*4882a593Smuzhiyun continue;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun VM_BUG_ON_PAGE(PageTransCompound(page), page);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (PageSwapCache(page) || PageDirty(page)) {
676*4882a593Smuzhiyun if (!trylock_page(page))
677*4882a593Smuzhiyun continue;
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * If page is shared with others, we couldn't clear
680*4882a593Smuzhiyun * PG_dirty of the page.
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun if (page_mapcount(page) != 1) {
683*4882a593Smuzhiyun unlock_page(page);
684*4882a593Smuzhiyun continue;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (PageSwapCache(page) && !try_to_free_swap(page)) {
688*4882a593Smuzhiyun unlock_page(page);
689*4882a593Smuzhiyun continue;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun ClearPageDirty(page);
693*4882a593Smuzhiyun unlock_page(page);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (pte_young(ptent) || pte_dirty(ptent)) {
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * Some of architecture(ex, PPC) don't update TLB
699*4882a593Smuzhiyun * with set_pte_at and tlb_remove_tlb_entry so for
700*4882a593Smuzhiyun * the portability, remap the pte with old|clean
701*4882a593Smuzhiyun * after pte clearing.
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun ptent = ptep_get_and_clear_full(mm, addr, pte,
704*4882a593Smuzhiyun tlb->fullmm);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ptent = pte_mkold(ptent);
707*4882a593Smuzhiyun ptent = pte_mkclean(ptent);
708*4882a593Smuzhiyun set_pte_at(mm, addr, pte, ptent);
709*4882a593Smuzhiyun tlb_remove_tlb_entry(tlb, pte, addr);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun mark_page_lazyfree(page);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun out:
714*4882a593Smuzhiyun if (nr_swap) {
715*4882a593Smuzhiyun if (current->mm == mm)
716*4882a593Smuzhiyun sync_mm_rss(mm);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun add_mm_counter(mm, MM_SWAPENTS, nr_swap);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun arch_leave_lazy_mmu_mode();
721*4882a593Smuzhiyun pte_unmap_unlock(orig_pte, ptl);
722*4882a593Smuzhiyun cond_resched();
723*4882a593Smuzhiyun next:
724*4882a593Smuzhiyun return 0;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun static const struct mm_walk_ops madvise_free_walk_ops = {
728*4882a593Smuzhiyun .pmd_entry = madvise_free_pte_range,
729*4882a593Smuzhiyun };
730*4882a593Smuzhiyun
madvise_free_single_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)731*4882a593Smuzhiyun static int madvise_free_single_vma(struct vm_area_struct *vma,
732*4882a593Smuzhiyun unsigned long start_addr, unsigned long end_addr)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
735*4882a593Smuzhiyun struct mmu_notifier_range range;
736*4882a593Smuzhiyun struct mmu_gather tlb;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* MADV_FREE works for only anon vma at the moment */
739*4882a593Smuzhiyun if (!vma_is_anonymous(vma))
740*4882a593Smuzhiyun return -EINVAL;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun range.start = max(vma->vm_start, start_addr);
743*4882a593Smuzhiyun if (range.start >= vma->vm_end)
744*4882a593Smuzhiyun return -EINVAL;
745*4882a593Smuzhiyun range.end = min(vma->vm_end, end_addr);
746*4882a593Smuzhiyun if (range.end <= vma->vm_start)
747*4882a593Smuzhiyun return -EINVAL;
748*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
749*4882a593Smuzhiyun range.start, range.end);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun lru_add_drain();
752*4882a593Smuzhiyun tlb_gather_mmu(&tlb, mm, range.start, range.end);
753*4882a593Smuzhiyun update_hiwater_rss(mm);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
756*4882a593Smuzhiyun tlb_start_vma(&tlb, vma);
757*4882a593Smuzhiyun walk_page_range(vma->vm_mm, range.start, range.end,
758*4882a593Smuzhiyun &madvise_free_walk_ops, &tlb);
759*4882a593Smuzhiyun tlb_end_vma(&tlb, vma);
760*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
761*4882a593Smuzhiyun tlb_finish_mmu(&tlb, range.start, range.end);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun return 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Application no longer needs these pages. If the pages are dirty,
768*4882a593Smuzhiyun * it's OK to just throw them away. The app will be more careful about
769*4882a593Smuzhiyun * data it wants to keep. Be sure to free swap resources too. The
770*4882a593Smuzhiyun * zap_page_range call sets things up for shrink_active_list to actually free
771*4882a593Smuzhiyun * these pages later if no one else has touched them in the meantime,
772*4882a593Smuzhiyun * although we could add these pages to a global reuse list for
773*4882a593Smuzhiyun * shrink_active_list to pick up before reclaiming other pages.
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * NB: This interface discards data rather than pushes it out to swap,
776*4882a593Smuzhiyun * as some implementations do. This has performance implications for
777*4882a593Smuzhiyun * applications like large transactional databases which want to discard
778*4882a593Smuzhiyun * pages in anonymous maps after committing to backing store the data
779*4882a593Smuzhiyun * that was kept in them. There is no reason to write this data out to
780*4882a593Smuzhiyun * the swap area if the application is discarding it.
781*4882a593Smuzhiyun *
782*4882a593Smuzhiyun * An interface that causes the system to free clean pages and flush
783*4882a593Smuzhiyun * dirty pages is already available as msync(MS_INVALIDATE).
784*4882a593Smuzhiyun */
madvise_dontneed_single_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)785*4882a593Smuzhiyun static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
786*4882a593Smuzhiyun unsigned long start, unsigned long end)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun zap_page_range(vma, start, end - start);
789*4882a593Smuzhiyun return 0;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
madvise_dontneed_free(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)792*4882a593Smuzhiyun static long madvise_dontneed_free(struct vm_area_struct *vma,
793*4882a593Smuzhiyun struct vm_area_struct **prev,
794*4882a593Smuzhiyun unsigned long start, unsigned long end,
795*4882a593Smuzhiyun int behavior)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun *prev = vma;
800*4882a593Smuzhiyun if (!can_madv_lru_vma(vma))
801*4882a593Smuzhiyun return -EINVAL;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (!userfaultfd_remove(vma, start, end)) {
804*4882a593Smuzhiyun *prev = NULL; /* mmap_lock has been dropped, prev is stale */
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun mmap_read_lock(mm);
807*4882a593Smuzhiyun vma = find_vma(mm, start);
808*4882a593Smuzhiyun if (!vma)
809*4882a593Smuzhiyun return -ENOMEM;
810*4882a593Smuzhiyun if (start < vma->vm_start) {
811*4882a593Smuzhiyun /*
812*4882a593Smuzhiyun * This "vma" under revalidation is the one
813*4882a593Smuzhiyun * with the lowest vma->vm_start where start
814*4882a593Smuzhiyun * is also < vma->vm_end. If start <
815*4882a593Smuzhiyun * vma->vm_start it means an hole materialized
816*4882a593Smuzhiyun * in the user address space within the
817*4882a593Smuzhiyun * virtual range passed to MADV_DONTNEED
818*4882a593Smuzhiyun * or MADV_FREE.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun return -ENOMEM;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun if (!can_madv_lru_vma(vma))
823*4882a593Smuzhiyun return -EINVAL;
824*4882a593Smuzhiyun if (end > vma->vm_end) {
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Don't fail if end > vma->vm_end. If the old
827*4882a593Smuzhiyun * vma was splitted while the mmap_lock was
828*4882a593Smuzhiyun * released the effect of the concurrent
829*4882a593Smuzhiyun * operation may not cause madvise() to
830*4882a593Smuzhiyun * have an undefined result. There may be an
831*4882a593Smuzhiyun * adjacent next vma that we'll walk
832*4882a593Smuzhiyun * next. userfaultfd_remove() will generate an
833*4882a593Smuzhiyun * UFFD_EVENT_REMOVE repetition on the
834*4882a593Smuzhiyun * end-vma->vm_end range, but the manager can
835*4882a593Smuzhiyun * handle a repetition fine.
836*4882a593Smuzhiyun */
837*4882a593Smuzhiyun end = vma->vm_end;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun VM_WARN_ON(start >= end);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun if (behavior == MADV_DONTNEED)
843*4882a593Smuzhiyun return madvise_dontneed_single_vma(vma, start, end);
844*4882a593Smuzhiyun else if (behavior == MADV_FREE)
845*4882a593Smuzhiyun return madvise_free_single_vma(vma, start, end);
846*4882a593Smuzhiyun else
847*4882a593Smuzhiyun return -EINVAL;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /*
851*4882a593Smuzhiyun * Application wants to free up the pages and associated backing store.
852*4882a593Smuzhiyun * This is effectively punching a hole into the middle of a file.
853*4882a593Smuzhiyun */
madvise_remove(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)854*4882a593Smuzhiyun static long madvise_remove(struct vm_area_struct *vma,
855*4882a593Smuzhiyun struct vm_area_struct **prev,
856*4882a593Smuzhiyun unsigned long start, unsigned long end)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun loff_t offset;
859*4882a593Smuzhiyun int error;
860*4882a593Smuzhiyun struct file *f;
861*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun *prev = NULL; /* tell sys_madvise we drop mmap_lock */
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (vma->vm_flags & VM_LOCKED)
866*4882a593Smuzhiyun return -EINVAL;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun f = vma->vm_file;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (!f || !f->f_mapping || !f->f_mapping->host) {
871*4882a593Smuzhiyun return -EINVAL;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
875*4882a593Smuzhiyun return -EACCES;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun offset = (loff_t)(start - vma->vm_start)
878*4882a593Smuzhiyun + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * Filesystem's fallocate may need to take i_mutex. We need to
882*4882a593Smuzhiyun * explicitly grab a reference because the vma (and hence the
883*4882a593Smuzhiyun * vma's reference to the file) can go away as soon as we drop
884*4882a593Smuzhiyun * mmap_lock.
885*4882a593Smuzhiyun */
886*4882a593Smuzhiyun get_file(f);
887*4882a593Smuzhiyun if (userfaultfd_remove(vma, start, end)) {
888*4882a593Smuzhiyun /* mmap_lock was not released by userfaultfd_remove() */
889*4882a593Smuzhiyun mmap_read_unlock(mm);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun error = vfs_fallocate(f,
892*4882a593Smuzhiyun FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
893*4882a593Smuzhiyun offset, end - start);
894*4882a593Smuzhiyun fput(f);
895*4882a593Smuzhiyun mmap_read_lock(mm);
896*4882a593Smuzhiyun return error;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
900*4882a593Smuzhiyun /*
901*4882a593Smuzhiyun * Error injection support for memory error handling.
902*4882a593Smuzhiyun */
madvise_inject_error(int behavior,unsigned long start,unsigned long end)903*4882a593Smuzhiyun static int madvise_inject_error(int behavior,
904*4882a593Smuzhiyun unsigned long start, unsigned long end)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun struct zone *zone;
907*4882a593Smuzhiyun unsigned long size;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
910*4882a593Smuzhiyun return -EPERM;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun for (; start < end; start += size) {
914*4882a593Smuzhiyun unsigned long pfn;
915*4882a593Smuzhiyun struct page *page;
916*4882a593Smuzhiyun int ret;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun ret = get_user_pages_fast(start, 1, 0, &page);
919*4882a593Smuzhiyun if (ret != 1)
920*4882a593Smuzhiyun return ret;
921*4882a593Smuzhiyun pfn = page_to_pfn(page);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /*
924*4882a593Smuzhiyun * When soft offlining hugepages, after migrating the page
925*4882a593Smuzhiyun * we dissolve it, therefore in the second loop "page" will
926*4882a593Smuzhiyun * no longer be a compound page.
927*4882a593Smuzhiyun */
928*4882a593Smuzhiyun size = page_size(compound_head(page));
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (behavior == MADV_SOFT_OFFLINE) {
931*4882a593Smuzhiyun pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
932*4882a593Smuzhiyun pfn, start);
933*4882a593Smuzhiyun ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
934*4882a593Smuzhiyun } else {
935*4882a593Smuzhiyun pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
936*4882a593Smuzhiyun pfn, start);
937*4882a593Smuzhiyun ret = memory_failure(pfn, MF_COUNT_INCREASED);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (ret)
941*4882a593Smuzhiyun return ret;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Ensure that all poisoned pages are removed from per-cpu lists */
945*4882a593Smuzhiyun for_each_populated_zone(zone)
946*4882a593Smuzhiyun drain_all_pages(zone);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun #endif
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun static long
madvise_vma(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)953*4882a593Smuzhiyun madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
954*4882a593Smuzhiyun unsigned long start, unsigned long end, int behavior)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun switch (behavior) {
957*4882a593Smuzhiyun case MADV_REMOVE:
958*4882a593Smuzhiyun return madvise_remove(vma, prev, start, end);
959*4882a593Smuzhiyun case MADV_WILLNEED:
960*4882a593Smuzhiyun return madvise_willneed(vma, prev, start, end);
961*4882a593Smuzhiyun case MADV_COLD:
962*4882a593Smuzhiyun return madvise_cold(vma, prev, start, end);
963*4882a593Smuzhiyun case MADV_PAGEOUT:
964*4882a593Smuzhiyun return madvise_pageout(vma, prev, start, end);
965*4882a593Smuzhiyun case MADV_FREE:
966*4882a593Smuzhiyun case MADV_DONTNEED:
967*4882a593Smuzhiyun return madvise_dontneed_free(vma, prev, start, end, behavior);
968*4882a593Smuzhiyun default:
969*4882a593Smuzhiyun return madvise_behavior(vma, prev, start, end, behavior);
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun static bool
madvise_behavior_valid(int behavior)974*4882a593Smuzhiyun madvise_behavior_valid(int behavior)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun switch (behavior) {
977*4882a593Smuzhiyun case MADV_DOFORK:
978*4882a593Smuzhiyun case MADV_DONTFORK:
979*4882a593Smuzhiyun case MADV_NORMAL:
980*4882a593Smuzhiyun case MADV_SEQUENTIAL:
981*4882a593Smuzhiyun case MADV_RANDOM:
982*4882a593Smuzhiyun case MADV_REMOVE:
983*4882a593Smuzhiyun case MADV_WILLNEED:
984*4882a593Smuzhiyun case MADV_DONTNEED:
985*4882a593Smuzhiyun case MADV_FREE:
986*4882a593Smuzhiyun case MADV_COLD:
987*4882a593Smuzhiyun case MADV_PAGEOUT:
988*4882a593Smuzhiyun #ifdef CONFIG_KSM
989*4882a593Smuzhiyun case MADV_MERGEABLE:
990*4882a593Smuzhiyun case MADV_UNMERGEABLE:
991*4882a593Smuzhiyun #endif
992*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
993*4882a593Smuzhiyun case MADV_HUGEPAGE:
994*4882a593Smuzhiyun case MADV_NOHUGEPAGE:
995*4882a593Smuzhiyun #endif
996*4882a593Smuzhiyun case MADV_DONTDUMP:
997*4882a593Smuzhiyun case MADV_DODUMP:
998*4882a593Smuzhiyun case MADV_WIPEONFORK:
999*4882a593Smuzhiyun case MADV_KEEPONFORK:
1000*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
1001*4882a593Smuzhiyun case MADV_SOFT_OFFLINE:
1002*4882a593Smuzhiyun case MADV_HWPOISON:
1003*4882a593Smuzhiyun #endif
1004*4882a593Smuzhiyun return true;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun default:
1007*4882a593Smuzhiyun return false;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun static bool
process_madvise_behavior_valid(int behavior)1012*4882a593Smuzhiyun process_madvise_behavior_valid(int behavior)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun switch (behavior) {
1015*4882a593Smuzhiyun case MADV_COLD:
1016*4882a593Smuzhiyun case MADV_PAGEOUT:
1017*4882a593Smuzhiyun case MADV_WILLNEED:
1018*4882a593Smuzhiyun return true;
1019*4882a593Smuzhiyun default:
1020*4882a593Smuzhiyun return false;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /*
1025*4882a593Smuzhiyun * The madvise(2) system call.
1026*4882a593Smuzhiyun *
1027*4882a593Smuzhiyun * Applications can use madvise() to advise the kernel how it should
1028*4882a593Smuzhiyun * handle paging I/O in this VM area. The idea is to help the kernel
1029*4882a593Smuzhiyun * use appropriate read-ahead and caching techniques. The information
1030*4882a593Smuzhiyun * provided is advisory only, and can be safely disregarded by the
1031*4882a593Smuzhiyun * kernel without affecting the correct operation of the application.
1032*4882a593Smuzhiyun *
1033*4882a593Smuzhiyun * behavior values:
1034*4882a593Smuzhiyun * MADV_NORMAL - the default behavior is to read clusters. This
1035*4882a593Smuzhiyun * results in some read-ahead and read-behind.
1036*4882a593Smuzhiyun * MADV_RANDOM - the system should read the minimum amount of data
1037*4882a593Smuzhiyun * on any access, since it is unlikely that the appli-
1038*4882a593Smuzhiyun * cation will need more than what it asks for.
1039*4882a593Smuzhiyun * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1040*4882a593Smuzhiyun * once, so they can be aggressively read ahead, and
1041*4882a593Smuzhiyun * can be freed soon after they are accessed.
1042*4882a593Smuzhiyun * MADV_WILLNEED - the application is notifying the system to read
1043*4882a593Smuzhiyun * some pages ahead.
1044*4882a593Smuzhiyun * MADV_DONTNEED - the application is finished with the given range,
1045*4882a593Smuzhiyun * so the kernel can free resources associated with it.
1046*4882a593Smuzhiyun * MADV_FREE - the application marks pages in the given range as lazy free,
1047*4882a593Smuzhiyun * where actual purges are postponed until memory pressure happens.
1048*4882a593Smuzhiyun * MADV_REMOVE - the application wants to free up the given range of
1049*4882a593Smuzhiyun * pages and associated backing store.
1050*4882a593Smuzhiyun * MADV_DONTFORK - omit this area from child's address space when forking:
1051*4882a593Smuzhiyun * typically, to avoid COWing pages pinned by get_user_pages().
1052*4882a593Smuzhiyun * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1053*4882a593Smuzhiyun * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1054*4882a593Smuzhiyun * range after a fork.
1055*4882a593Smuzhiyun * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1056*4882a593Smuzhiyun * MADV_HWPOISON - trigger memory error handler as if the given memory range
1057*4882a593Smuzhiyun * were corrupted by unrecoverable hardware memory failure.
1058*4882a593Smuzhiyun * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1059*4882a593Smuzhiyun * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1060*4882a593Smuzhiyun * this area with pages of identical content from other such areas.
1061*4882a593Smuzhiyun * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1062*4882a593Smuzhiyun * MADV_HUGEPAGE - the application wants to back the given range by transparent
1063*4882a593Smuzhiyun * huge pages in the future. Existing pages might be coalesced and
1064*4882a593Smuzhiyun * new pages might be allocated as THP.
1065*4882a593Smuzhiyun * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1066*4882a593Smuzhiyun * transparent huge pages so the existing pages will not be
1067*4882a593Smuzhiyun * coalesced into THP and new pages will not be allocated as THP.
1068*4882a593Smuzhiyun * MADV_DONTDUMP - the application wants to prevent pages in the given range
1069*4882a593Smuzhiyun * from being included in its core dump.
1070*4882a593Smuzhiyun * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1071*4882a593Smuzhiyun * MADV_COLD - the application is not expected to use this memory soon,
1072*4882a593Smuzhiyun * deactivate pages in this range so that they can be reclaimed
1073*4882a593Smuzhiyun * easily if memory pressure hanppens.
1074*4882a593Smuzhiyun * MADV_PAGEOUT - the application is not expected to use this memory soon,
1075*4882a593Smuzhiyun * page out the pages in this range immediately.
1076*4882a593Smuzhiyun *
1077*4882a593Smuzhiyun * return values:
1078*4882a593Smuzhiyun * zero - success
1079*4882a593Smuzhiyun * -EINVAL - start + len < 0, start is not page-aligned,
1080*4882a593Smuzhiyun * "behavior" is not a valid value, or application
1081*4882a593Smuzhiyun * is attempting to release locked or shared pages,
1082*4882a593Smuzhiyun * or the specified address range includes file, Huge TLB,
1083*4882a593Smuzhiyun * MAP_SHARED or VMPFNMAP range.
1084*4882a593Smuzhiyun * -ENOMEM - addresses in the specified range are not currently
1085*4882a593Smuzhiyun * mapped, or are outside the AS of the process.
1086*4882a593Smuzhiyun * -EIO - an I/O error occurred while paging in data.
1087*4882a593Smuzhiyun * -EBADF - map exists, but area maps something that isn't a file.
1088*4882a593Smuzhiyun * -EAGAIN - a kernel resource was temporarily unavailable.
1089*4882a593Smuzhiyun */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)1090*4882a593Smuzhiyun int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun unsigned long end, tmp;
1093*4882a593Smuzhiyun struct vm_area_struct *vma, *prev;
1094*4882a593Smuzhiyun int unmapped_error = 0;
1095*4882a593Smuzhiyun int error = -EINVAL;
1096*4882a593Smuzhiyun int write;
1097*4882a593Smuzhiyun size_t len;
1098*4882a593Smuzhiyun struct blk_plug plug;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun start = untagged_addr(start);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (!madvise_behavior_valid(behavior))
1103*4882a593Smuzhiyun return error;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (!PAGE_ALIGNED(start))
1106*4882a593Smuzhiyun return error;
1107*4882a593Smuzhiyun len = PAGE_ALIGN(len_in);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /* Check to see whether len was rounded up from small -ve to zero */
1110*4882a593Smuzhiyun if (len_in && !len)
1111*4882a593Smuzhiyun return error;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun end = start + len;
1114*4882a593Smuzhiyun if (end < start)
1115*4882a593Smuzhiyun return error;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun error = 0;
1118*4882a593Smuzhiyun if (end == start)
1119*4882a593Smuzhiyun return error;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_FAILURE
1122*4882a593Smuzhiyun if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1123*4882a593Smuzhiyun return madvise_inject_error(behavior, start, start + len_in);
1124*4882a593Smuzhiyun #endif
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun write = madvise_need_mmap_write(behavior);
1127*4882a593Smuzhiyun if (write) {
1128*4882a593Smuzhiyun if (mmap_write_lock_killable(mm))
1129*4882a593Smuzhiyun return -EINTR;
1130*4882a593Smuzhiyun } else {
1131*4882a593Smuzhiyun mmap_read_lock(mm);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /*
1135*4882a593Smuzhiyun * If the interval [start,end) covers some unmapped address
1136*4882a593Smuzhiyun * ranges, just ignore them, but return -ENOMEM at the end.
1137*4882a593Smuzhiyun * - different from the way of handling in mlock etc.
1138*4882a593Smuzhiyun */
1139*4882a593Smuzhiyun vma = find_vma_prev(mm, start, &prev);
1140*4882a593Smuzhiyun if (vma && start > vma->vm_start)
1141*4882a593Smuzhiyun prev = vma;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun blk_start_plug(&plug);
1144*4882a593Smuzhiyun for (;;) {
1145*4882a593Smuzhiyun /* Still start < end. */
1146*4882a593Smuzhiyun error = -ENOMEM;
1147*4882a593Smuzhiyun if (!vma)
1148*4882a593Smuzhiyun goto out;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun /* Here start < (end|vma->vm_end). */
1151*4882a593Smuzhiyun if (start < vma->vm_start) {
1152*4882a593Smuzhiyun unmapped_error = -ENOMEM;
1153*4882a593Smuzhiyun start = vma->vm_start;
1154*4882a593Smuzhiyun if (start >= end)
1155*4882a593Smuzhiyun goto out;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /* Here vma->vm_start <= start < (end|vma->vm_end) */
1159*4882a593Smuzhiyun tmp = vma->vm_end;
1160*4882a593Smuzhiyun if (end < tmp)
1161*4882a593Smuzhiyun tmp = end;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1164*4882a593Smuzhiyun error = madvise_vma(vma, &prev, start, tmp, behavior);
1165*4882a593Smuzhiyun if (error)
1166*4882a593Smuzhiyun goto out;
1167*4882a593Smuzhiyun start = tmp;
1168*4882a593Smuzhiyun if (prev && start < prev->vm_end)
1169*4882a593Smuzhiyun start = prev->vm_end;
1170*4882a593Smuzhiyun error = unmapped_error;
1171*4882a593Smuzhiyun if (start >= end)
1172*4882a593Smuzhiyun goto out;
1173*4882a593Smuzhiyun if (prev)
1174*4882a593Smuzhiyun vma = prev->vm_next;
1175*4882a593Smuzhiyun else /* madvise_remove dropped mmap_lock */
1176*4882a593Smuzhiyun vma = find_vma(mm, start);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun out:
1179*4882a593Smuzhiyun blk_finish_plug(&plug);
1180*4882a593Smuzhiyun if (write)
1181*4882a593Smuzhiyun mmap_write_unlock(mm);
1182*4882a593Smuzhiyun else
1183*4882a593Smuzhiyun mmap_read_unlock(mm);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun return error;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)1188*4882a593Smuzhiyun SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun return do_madvise(current->mm, start, len_in, behavior);
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)1193*4882a593Smuzhiyun SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1194*4882a593Smuzhiyun size_t, vlen, int, behavior, unsigned int, flags)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun ssize_t ret;
1197*4882a593Smuzhiyun struct iovec iovstack[UIO_FASTIOV], iovec;
1198*4882a593Smuzhiyun struct iovec *iov = iovstack;
1199*4882a593Smuzhiyun struct iov_iter iter;
1200*4882a593Smuzhiyun struct pid *pid;
1201*4882a593Smuzhiyun struct task_struct *task;
1202*4882a593Smuzhiyun struct mm_struct *mm;
1203*4882a593Smuzhiyun size_t total_len;
1204*4882a593Smuzhiyun unsigned int f_flags;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (flags != 0) {
1207*4882a593Smuzhiyun ret = -EINVAL;
1208*4882a593Smuzhiyun goto out;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1212*4882a593Smuzhiyun if (ret < 0)
1213*4882a593Smuzhiyun goto out;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun pid = pidfd_get_pid(pidfd, &f_flags);
1216*4882a593Smuzhiyun if (IS_ERR(pid)) {
1217*4882a593Smuzhiyun ret = PTR_ERR(pid);
1218*4882a593Smuzhiyun goto free_iov;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun task = get_pid_task(pid, PIDTYPE_PID);
1222*4882a593Smuzhiyun if (!task) {
1223*4882a593Smuzhiyun ret = -ESRCH;
1224*4882a593Smuzhiyun goto put_pid;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (!process_madvise_behavior_valid(behavior)) {
1228*4882a593Smuzhiyun ret = -EINVAL;
1229*4882a593Smuzhiyun goto release_task;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1233*4882a593Smuzhiyun mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1234*4882a593Smuzhiyun if (IS_ERR_OR_NULL(mm)) {
1235*4882a593Smuzhiyun ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1236*4882a593Smuzhiyun goto release_task;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /*
1240*4882a593Smuzhiyun * Require CAP_SYS_NICE for influencing process performance. Note that
1241*4882a593Smuzhiyun * only non-destructive hints are currently supported.
1242*4882a593Smuzhiyun */
1243*4882a593Smuzhiyun if (!capable(CAP_SYS_NICE)) {
1244*4882a593Smuzhiyun ret = -EPERM;
1245*4882a593Smuzhiyun goto release_mm;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun total_len = iov_iter_count(&iter);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun while (iov_iter_count(&iter)) {
1251*4882a593Smuzhiyun iovec = iov_iter_iovec(&iter);
1252*4882a593Smuzhiyun ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1253*4882a593Smuzhiyun iovec.iov_len, behavior);
1254*4882a593Smuzhiyun if (ret < 0)
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun iov_iter_advance(&iter, iovec.iov_len);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun ret = (total_len - iov_iter_count(&iter)) ? : ret;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun release_mm:
1262*4882a593Smuzhiyun mmput(mm);
1263*4882a593Smuzhiyun release_task:
1264*4882a593Smuzhiyun put_task_struct(task);
1265*4882a593Smuzhiyun put_pid:
1266*4882a593Smuzhiyun put_pid(pid);
1267*4882a593Smuzhiyun free_iov:
1268*4882a593Smuzhiyun kfree(iov);
1269*4882a593Smuzhiyun out:
1270*4882a593Smuzhiyun return ret;
1271*4882a593Smuzhiyun }
1272