xref: /OK3568_Linux_fs/kernel/mm/madvise.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/madvise.c
4  *
5  * Copyright (C) 1999  Linus Torvalds
6  * Copyright (C) 2002  Christoph Hellwig
7  */
8 
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/uio.h>
22 #include <linux/ksm.h>
23 #include <linux/fs.h>
24 #include <linux/file.h>
25 #include <linux/blkdev.h>
26 #include <linux/backing-dev.h>
27 #include <linux/pagewalk.h>
28 #include <linux/swap.h>
29 #include <linux/swapops.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/mmu_notifier.h>
32 #include <trace/hooks/mm.h>
33 
34 #include <asm/tlb.h>
35 
36 #include "internal.h"
37 
38 struct madvise_walk_private {
39 	struct mmu_gather *tlb;
40 	bool pageout;
41 	bool can_pageout_file;
42 };
43 
44 /*
45  * Any behaviour which results in changes to the vma->vm_flags needs to
46  * take mmap_lock for writing. Others, which simply traverse vmas, need
47  * to only take it for reading.
48  */
madvise_need_mmap_write(int behavior)49 static int madvise_need_mmap_write(int behavior)
50 {
51 	switch (behavior) {
52 	case MADV_REMOVE:
53 	case MADV_WILLNEED:
54 	case MADV_DONTNEED:
55 	case MADV_COLD:
56 	case MADV_PAGEOUT:
57 	case MADV_FREE:
58 		return 0;
59 	default:
60 		/* be safe, default to 1. list exceptions explicitly */
61 		return 1;
62 	}
63 }
64 
65 /*
66  * We can potentially split a vm area into separate
67  * areas, each area with its own behavior.
68  */
madvise_behavior(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)69 static long madvise_behavior(struct vm_area_struct *vma,
70 		     struct vm_area_struct **prev,
71 		     unsigned long start, unsigned long end, int behavior)
72 {
73 	struct mm_struct *mm = vma->vm_mm;
74 	int error = 0;
75 	pgoff_t pgoff;
76 	unsigned long new_flags = vma->vm_flags;
77 
78 	switch (behavior) {
79 	case MADV_NORMAL:
80 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81 		break;
82 	case MADV_SEQUENTIAL:
83 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
84 		break;
85 	case MADV_RANDOM:
86 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
87 		break;
88 	case MADV_DONTFORK:
89 		new_flags |= VM_DONTCOPY;
90 		break;
91 	case MADV_DOFORK:
92 		if (vma->vm_flags & VM_IO) {
93 			error = -EINVAL;
94 			goto out;
95 		}
96 		new_flags &= ~VM_DONTCOPY;
97 		break;
98 	case MADV_WIPEONFORK:
99 		/* MADV_WIPEONFORK is only supported on anonymous memory. */
100 		if (vma->vm_file || vma->vm_flags & VM_SHARED) {
101 			error = -EINVAL;
102 			goto out;
103 		}
104 		new_flags |= VM_WIPEONFORK;
105 		break;
106 	case MADV_KEEPONFORK:
107 		new_flags &= ~VM_WIPEONFORK;
108 		break;
109 	case MADV_DONTDUMP:
110 		new_flags |= VM_DONTDUMP;
111 		break;
112 	case MADV_DODUMP:
113 		if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
114 			error = -EINVAL;
115 			goto out;
116 		}
117 		new_flags &= ~VM_DONTDUMP;
118 		break;
119 	case MADV_MERGEABLE:
120 	case MADV_UNMERGEABLE:
121 		error = ksm_madvise(vma, start, end, behavior, &new_flags);
122 		if (error)
123 			goto out_convert_errno;
124 		break;
125 	case MADV_HUGEPAGE:
126 	case MADV_NOHUGEPAGE:
127 		error = hugepage_madvise(vma, &new_flags, behavior);
128 		if (error)
129 			goto out_convert_errno;
130 		break;
131 	}
132 
133 	if (new_flags == vma->vm_flags) {
134 		*prev = vma;
135 		goto out;
136 	}
137 
138 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
139 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
140 			  vma->vm_file, pgoff, vma_policy(vma),
141 			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
142 	if (*prev) {
143 		vma = *prev;
144 		goto success;
145 	}
146 
147 	*prev = vma;
148 
149 	if (start != vma->vm_start) {
150 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151 			error = -ENOMEM;
152 			goto out;
153 		}
154 		error = __split_vma(mm, vma, start, 1);
155 		if (error)
156 			goto out_convert_errno;
157 	}
158 
159 	if (end != vma->vm_end) {
160 		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
161 			error = -ENOMEM;
162 			goto out;
163 		}
164 		error = __split_vma(mm, vma, end, 0);
165 		if (error)
166 			goto out_convert_errno;
167 	}
168 
169 success:
170 	/*
171 	 * vm_flags is protected by the mmap_lock held in write mode.
172 	 */
173 	vm_write_begin(vma);
174 	WRITE_ONCE(vma->vm_flags, new_flags);
175 	vm_write_end(vma);
176 
177 out_convert_errno:
178 	/*
179 	 * madvise() returns EAGAIN if kernel resources, such as
180 	 * slab, are temporarily unavailable.
181 	 */
182 	if (error == -ENOMEM)
183 		error = -EAGAIN;
184 out:
185 	return error;
186 }
187 
188 #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)189 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
190 	unsigned long end, struct mm_walk *walk)
191 {
192 	pte_t *orig_pte;
193 	struct vm_area_struct *vma = walk->private;
194 	unsigned long index;
195 
196 	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
197 		return 0;
198 
199 	for (index = start; index != end; index += PAGE_SIZE) {
200 		pte_t pte;
201 		swp_entry_t entry;
202 		struct page *page;
203 		spinlock_t *ptl;
204 
205 		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
206 		pte = *(orig_pte + ((index - start) / PAGE_SIZE));
207 		pte_unmap_unlock(orig_pte, ptl);
208 
209 		if (pte_present(pte) || pte_none(pte))
210 			continue;
211 		entry = pte_to_swp_entry(pte);
212 		if (unlikely(non_swap_entry(entry)))
213 			continue;
214 
215 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
216 							vma, index, false);
217 		if (page)
218 			put_page(page);
219 	}
220 
221 	return 0;
222 }
223 
224 static const struct mm_walk_ops swapin_walk_ops = {
225 	.pmd_entry		= swapin_walk_pmd_entry,
226 };
227 
force_shm_swapin_readahead(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)228 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
229 		unsigned long start, unsigned long end,
230 		struct address_space *mapping)
231 {
232 	XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
233 	pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
234 	struct page *page;
235 
236 	rcu_read_lock();
237 	xas_for_each(&xas, page, end_index) {
238 		swp_entry_t swap;
239 
240 		if (!xa_is_value(page))
241 			continue;
242 		xas_pause(&xas);
243 		rcu_read_unlock();
244 
245 		swap = radix_to_swp_entry(page);
246 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
247 							NULL, 0, false);
248 		if (page)
249 			put_page(page);
250 
251 		rcu_read_lock();
252 	}
253 	rcu_read_unlock();
254 
255 	lru_add_drain();	/* Push any new pages onto the LRU now */
256 }
257 #endif		/* CONFIG_SWAP */
258 
259 /*
260  * Schedule all required I/O operations.  Do not wait for completion.
261  */
madvise_willneed(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)262 static long madvise_willneed(struct vm_area_struct *vma,
263 			     struct vm_area_struct **prev,
264 			     unsigned long start, unsigned long end)
265 {
266 	struct mm_struct *mm = vma->vm_mm;
267 	struct file *file = vma->vm_file;
268 	loff_t offset;
269 
270 	*prev = vma;
271 #ifdef CONFIG_SWAP
272 	if (!file) {
273 		walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
274 		lru_add_drain(); /* Push any new pages onto the LRU now */
275 		return 0;
276 	}
277 
278 	if (shmem_mapping(file->f_mapping)) {
279 		force_shm_swapin_readahead(vma, start, end,
280 					file->f_mapping);
281 		return 0;
282 	}
283 #else
284 	if (!file)
285 		return -EBADF;
286 #endif
287 
288 	if (IS_DAX(file_inode(file))) {
289 		/* no bad return value, but ignore advice */
290 		return 0;
291 	}
292 
293 	/*
294 	 * Filesystem's fadvise may need to take various locks.  We need to
295 	 * explicitly grab a reference because the vma (and hence the
296 	 * vma's reference to the file) can go away as soon as we drop
297 	 * mmap_lock.
298 	 */
299 	*prev = NULL;	/* tell sys_madvise we drop mmap_lock */
300 	get_file(file);
301 	offset = (loff_t)(start - vma->vm_start)
302 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
303 	mmap_read_unlock(mm);
304 	vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
305 	fput(file);
306 	mmap_read_lock(mm);
307 	return 0;
308 }
309 
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)310 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
311 				unsigned long addr, unsigned long end,
312 				struct mm_walk *walk)
313 {
314 	struct madvise_walk_private *private = walk->private;
315 	struct mmu_gather *tlb = private->tlb;
316 	bool pageout = private->pageout;
317 	bool pageout_anon_only = pageout && !private->can_pageout_file;
318 	struct mm_struct *mm = tlb->mm;
319 	struct vm_area_struct *vma = walk->vma;
320 	pte_t *orig_pte, *pte, ptent;
321 	spinlock_t *ptl;
322 	struct page *page = NULL;
323 	LIST_HEAD(page_list);
324 	bool allow_shared = false;
325 
326 	if (fatal_signal_pending(current))
327 		return -EINTR;
328 
329 	trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);
330 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
331 	if (pmd_trans_huge(*pmd)) {
332 		pmd_t orig_pmd;
333 		unsigned long next = pmd_addr_end(addr, end);
334 
335 		tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
336 		ptl = pmd_trans_huge_lock(pmd, vma);
337 		if (!ptl)
338 			return 0;
339 
340 		orig_pmd = *pmd;
341 		if (is_huge_zero_pmd(orig_pmd))
342 			goto huge_unlock;
343 
344 		if (unlikely(!pmd_present(orig_pmd))) {
345 			VM_BUG_ON(thp_migration_supported() &&
346 					!is_pmd_migration_entry(orig_pmd));
347 			goto huge_unlock;
348 		}
349 
350 		page = pmd_page(orig_pmd);
351 
352 		/* Do not interfere with other mappings of this page */
353 		if (page_mapcount(page) != 1)
354 			goto huge_unlock;
355 
356 		if (pageout_anon_only && !PageAnon(page))
357 			goto huge_unlock;
358 
359 		if (next - addr != HPAGE_PMD_SIZE) {
360 			int err;
361 
362 			get_page(page);
363 			spin_unlock(ptl);
364 			lock_page(page);
365 			err = split_huge_page(page);
366 			unlock_page(page);
367 			put_page(page);
368 			if (!err)
369 				goto regular_page;
370 			return 0;
371 		}
372 
373 		if (pmd_young(orig_pmd)) {
374 			pmdp_invalidate(vma, addr, pmd);
375 			orig_pmd = pmd_mkold(orig_pmd);
376 
377 			set_pmd_at(mm, addr, pmd, orig_pmd);
378 			tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
379 		}
380 
381 		ClearPageReferenced(page);
382 		test_and_clear_page_young(page);
383 		if (pageout) {
384 			if (!isolate_lru_page(page)) {
385 				if (PageUnevictable(page))
386 					putback_lru_page(page);
387 				else
388 					list_add(&page->lru, &page_list);
389 			}
390 		} else
391 			deactivate_page(page);
392 huge_unlock:
393 		spin_unlock(ptl);
394 		if (pageout)
395 			reclaim_pages(&page_list);
396 		return 0;
397 	}
398 
399 regular_page:
400 	if (pmd_trans_unstable(pmd))
401 		return 0;
402 #endif
403 	tlb_change_page_size(tlb, PAGE_SIZE);
404 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
405 	flush_tlb_batched_pending(mm);
406 	arch_enter_lazy_mmu_mode();
407 	for (; addr < end; pte++, addr += PAGE_SIZE) {
408 		ptent = *pte;
409 
410 		if (pte_none(ptent))
411 			continue;
412 
413 		if (!pte_present(ptent))
414 			continue;
415 
416 		page = vm_normal_page(vma, addr, ptent);
417 		if (!page)
418 			continue;
419 
420 		/*
421 		 * Creating a THP page is expensive so split it only if we
422 		 * are sure it's worth. Split it if we are only owner.
423 		 */
424 		if (PageTransCompound(page)) {
425 			if (page_mapcount(page) != 1)
426 				break;
427 			if (pageout_anon_only && !PageAnon(page))
428 				break;
429 			get_page(page);
430 			if (!trylock_page(page)) {
431 				put_page(page);
432 				break;
433 			}
434 			pte_unmap_unlock(orig_pte, ptl);
435 			if (split_huge_page(page)) {
436 				unlock_page(page);
437 				put_page(page);
438 				pte_offset_map_lock(mm, pmd, addr, &ptl);
439 				break;
440 			}
441 			unlock_page(page);
442 			put_page(page);
443 			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
444 			pte--;
445 			addr -= PAGE_SIZE;
446 			continue;
447 		}
448 
449 		/*
450 		 * Do not interfere with other mappings of this page and
451 		 * non-LRU page.
452 		 */
453 		if (!allow_shared && (!PageLRU(page) || page_mapcount(page) != 1))
454 			continue;
455 
456 		if (pageout_anon_only && !PageAnon(page))
457 			continue;
458 
459 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
460 
461 		if (pte_young(ptent)) {
462 			ptent = ptep_get_and_clear_full(mm, addr, pte,
463 							tlb->fullmm);
464 			ptent = pte_mkold(ptent);
465 			set_pte_at(mm, addr, pte, ptent);
466 			tlb_remove_tlb_entry(tlb, pte, addr);
467 		}
468 
469 		/*
470 		 * We are deactivating a page for accelerating reclaiming.
471 		 * VM couldn't reclaim the page unless we clear PG_young.
472 		 * As a side effect, it makes confuse idle-page tracking
473 		 * because they will miss recent referenced history.
474 		 */
475 		ClearPageReferenced(page);
476 		test_and_clear_page_young(page);
477 		if (pageout) {
478 			if (!isolate_lru_page(page)) {
479 				if (PageUnevictable(page))
480 					putback_lru_page(page);
481 				else {
482 					list_add(&page->lru, &page_list);
483 					trace_android_vh_page_isolated_for_reclaim(mm, page);
484 				}
485 			}
486 		} else
487 			deactivate_page(page);
488 	}
489 
490 	arch_leave_lazy_mmu_mode();
491 	pte_unmap_unlock(orig_pte, ptl);
492 	if (pageout)
493 		reclaim_pages(&page_list);
494 	cond_resched();
495 
496 	return 0;
497 }
498 
499 static const struct mm_walk_ops cold_walk_ops = {
500 	.pmd_entry = madvise_cold_or_pageout_pte_range,
501 };
502 
madvise_cold_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)503 static void madvise_cold_page_range(struct mmu_gather *tlb,
504 			     struct vm_area_struct *vma,
505 			     unsigned long addr, unsigned long end)
506 {
507 	struct madvise_walk_private walk_private = {
508 		.pageout = false,
509 		.tlb = tlb,
510 	};
511 
512 	tlb_start_vma(tlb, vma);
513 	walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
514 	tlb_end_vma(tlb, vma);
515 }
516 
madvise_cold(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)517 static long madvise_cold(struct vm_area_struct *vma,
518 			struct vm_area_struct **prev,
519 			unsigned long start_addr, unsigned long end_addr)
520 {
521 	struct mm_struct *mm = vma->vm_mm;
522 	struct mmu_gather tlb;
523 
524 	*prev = vma;
525 	if (!can_madv_lru_vma(vma))
526 		return -EINVAL;
527 
528 	lru_add_drain();
529 	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
530 	madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
531 	tlb_finish_mmu(&tlb, start_addr, end_addr);
532 
533 	return 0;
534 }
535 
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,bool can_pageout_file)536 static void madvise_pageout_page_range(struct mmu_gather *tlb,
537 			     struct vm_area_struct *vma,
538 			     unsigned long addr, unsigned long end,
539 			     bool can_pageout_file)
540 {
541 	struct madvise_walk_private walk_private = {
542 		.pageout = true,
543 		.tlb = tlb,
544 		.can_pageout_file = can_pageout_file,
545 	};
546 
547 	tlb_start_vma(tlb, vma);
548 	walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
549 	tlb_end_vma(tlb, vma);
550 }
551 
can_do_file_pageout(struct vm_area_struct * vma)552 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
553 {
554 	if (!vma->vm_file)
555 		return false;
556 	/*
557 	 * paging out pagecache only for non-anonymous mappings that correspond
558 	 * to the files the calling process could (if tried) open for writing;
559 	 * otherwise we'd be including shared non-exclusive mappings, which
560 	 * opens a side channel.
561 	 */
562 	return inode_owner_or_capable(file_inode(vma->vm_file)) ||
563 		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
564 }
565 
madvise_pageout(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)566 static long madvise_pageout(struct vm_area_struct *vma,
567 			struct vm_area_struct **prev,
568 			unsigned long start_addr, unsigned long end_addr)
569 {
570 	struct mm_struct *mm = vma->vm_mm;
571 	struct mmu_gather tlb;
572 	bool can_pageout_file;
573 
574 	*prev = vma;
575 	if (!can_madv_lru_vma(vma))
576 		return -EINVAL;
577 
578 	/*
579 	 * If the VMA belongs to a private file mapping, there can be private
580 	 * dirty pages which can be paged out if even this process is neither
581 	 * owner nor write capable of the file. Cache the file access check
582 	 * here and use it later during page walk.
583 	 */
584 	can_pageout_file = can_do_file_pageout(vma);
585 
586 	lru_add_drain();
587 	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
588 	madvise_pageout_page_range(&tlb, vma, start_addr, end_addr, can_pageout_file);
589 	tlb_finish_mmu(&tlb, start_addr, end_addr);
590 
591 	return 0;
592 }
593 
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)594 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
595 				unsigned long end, struct mm_walk *walk)
596 
597 {
598 	struct mmu_gather *tlb = walk->private;
599 	struct mm_struct *mm = tlb->mm;
600 	struct vm_area_struct *vma = walk->vma;
601 	spinlock_t *ptl;
602 	pte_t *orig_pte, *pte, ptent;
603 	struct page *page;
604 	int nr_swap = 0;
605 	unsigned long next;
606 
607 	next = pmd_addr_end(addr, end);
608 	if (pmd_trans_huge(*pmd))
609 		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
610 			goto next;
611 
612 	if (pmd_trans_unstable(pmd))
613 		return 0;
614 
615 	tlb_change_page_size(tlb, PAGE_SIZE);
616 	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
617 	flush_tlb_batched_pending(mm);
618 	arch_enter_lazy_mmu_mode();
619 	for (; addr != end; pte++, addr += PAGE_SIZE) {
620 		ptent = *pte;
621 
622 		if (pte_none(ptent))
623 			continue;
624 		/*
625 		 * If the pte has swp_entry, just clear page table to
626 		 * prevent swap-in which is more expensive rather than
627 		 * (page allocation + zeroing).
628 		 */
629 		if (!pte_present(ptent)) {
630 			swp_entry_t entry;
631 
632 			entry = pte_to_swp_entry(ptent);
633 			if (non_swap_entry(entry))
634 				continue;
635 			nr_swap--;
636 			free_swap_and_cache(entry);
637 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
638 			continue;
639 		}
640 
641 		page = vm_normal_page(vma, addr, ptent);
642 		if (!page)
643 			continue;
644 
645 		/*
646 		 * If pmd isn't transhuge but the page is THP and
647 		 * is owned by only this process, split it and
648 		 * deactivate all pages.
649 		 */
650 		if (PageTransCompound(page)) {
651 			if (page_mapcount(page) != 1)
652 				goto out;
653 			get_page(page);
654 			if (!trylock_page(page)) {
655 				put_page(page);
656 				goto out;
657 			}
658 			pte_unmap_unlock(orig_pte, ptl);
659 			if (split_huge_page(page)) {
660 				unlock_page(page);
661 				put_page(page);
662 				pte_offset_map_lock(mm, pmd, addr, &ptl);
663 				goto out;
664 			}
665 			unlock_page(page);
666 			put_page(page);
667 			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
668 			pte--;
669 			addr -= PAGE_SIZE;
670 			continue;
671 		}
672 
673 		VM_BUG_ON_PAGE(PageTransCompound(page), page);
674 
675 		if (PageSwapCache(page) || PageDirty(page)) {
676 			if (!trylock_page(page))
677 				continue;
678 			/*
679 			 * If page is shared with others, we couldn't clear
680 			 * PG_dirty of the page.
681 			 */
682 			if (page_mapcount(page) != 1) {
683 				unlock_page(page);
684 				continue;
685 			}
686 
687 			if (PageSwapCache(page) && !try_to_free_swap(page)) {
688 				unlock_page(page);
689 				continue;
690 			}
691 
692 			ClearPageDirty(page);
693 			unlock_page(page);
694 		}
695 
696 		if (pte_young(ptent) || pte_dirty(ptent)) {
697 			/*
698 			 * Some of architecture(ex, PPC) don't update TLB
699 			 * with set_pte_at and tlb_remove_tlb_entry so for
700 			 * the portability, remap the pte with old|clean
701 			 * after pte clearing.
702 			 */
703 			ptent = ptep_get_and_clear_full(mm, addr, pte,
704 							tlb->fullmm);
705 
706 			ptent = pte_mkold(ptent);
707 			ptent = pte_mkclean(ptent);
708 			set_pte_at(mm, addr, pte, ptent);
709 			tlb_remove_tlb_entry(tlb, pte, addr);
710 		}
711 		mark_page_lazyfree(page);
712 	}
713 out:
714 	if (nr_swap) {
715 		if (current->mm == mm)
716 			sync_mm_rss(mm);
717 
718 		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
719 	}
720 	arch_leave_lazy_mmu_mode();
721 	pte_unmap_unlock(orig_pte, ptl);
722 	cond_resched();
723 next:
724 	return 0;
725 }
726 
727 static const struct mm_walk_ops madvise_free_walk_ops = {
728 	.pmd_entry		= madvise_free_pte_range,
729 };
730 
madvise_free_single_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)731 static int madvise_free_single_vma(struct vm_area_struct *vma,
732 			unsigned long start_addr, unsigned long end_addr)
733 {
734 	struct mm_struct *mm = vma->vm_mm;
735 	struct mmu_notifier_range range;
736 	struct mmu_gather tlb;
737 
738 	/* MADV_FREE works for only anon vma at the moment */
739 	if (!vma_is_anonymous(vma))
740 		return -EINVAL;
741 
742 	range.start = max(vma->vm_start, start_addr);
743 	if (range.start >= vma->vm_end)
744 		return -EINVAL;
745 	range.end = min(vma->vm_end, end_addr);
746 	if (range.end <= vma->vm_start)
747 		return -EINVAL;
748 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
749 				range.start, range.end);
750 
751 	lru_add_drain();
752 	tlb_gather_mmu(&tlb, mm, range.start, range.end);
753 	update_hiwater_rss(mm);
754 
755 	mmu_notifier_invalidate_range_start(&range);
756 	tlb_start_vma(&tlb, vma);
757 	walk_page_range(vma->vm_mm, range.start, range.end,
758 			&madvise_free_walk_ops, &tlb);
759 	tlb_end_vma(&tlb, vma);
760 	mmu_notifier_invalidate_range_end(&range);
761 	tlb_finish_mmu(&tlb, range.start, range.end);
762 
763 	return 0;
764 }
765 
766 /*
767  * Application no longer needs these pages.  If the pages are dirty,
768  * it's OK to just throw them away.  The app will be more careful about
769  * data it wants to keep.  Be sure to free swap resources too.  The
770  * zap_page_range call sets things up for shrink_active_list to actually free
771  * these pages later if no one else has touched them in the meantime,
772  * although we could add these pages to a global reuse list for
773  * shrink_active_list to pick up before reclaiming other pages.
774  *
775  * NB: This interface discards data rather than pushes it out to swap,
776  * as some implementations do.  This has performance implications for
777  * applications like large transactional databases which want to discard
778  * pages in anonymous maps after committing to backing store the data
779  * that was kept in them.  There is no reason to write this data out to
780  * the swap area if the application is discarding it.
781  *
782  * An interface that causes the system to free clean pages and flush
783  * dirty pages is already available as msync(MS_INVALIDATE).
784  */
madvise_dontneed_single_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)785 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
786 					unsigned long start, unsigned long end)
787 {
788 	zap_page_range(vma, start, end - start);
789 	return 0;
790 }
791 
madvise_dontneed_free(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)792 static long madvise_dontneed_free(struct vm_area_struct *vma,
793 				  struct vm_area_struct **prev,
794 				  unsigned long start, unsigned long end,
795 				  int behavior)
796 {
797 	struct mm_struct *mm = vma->vm_mm;
798 
799 	*prev = vma;
800 	if (!can_madv_lru_vma(vma))
801 		return -EINVAL;
802 
803 	if (!userfaultfd_remove(vma, start, end)) {
804 		*prev = NULL; /* mmap_lock has been dropped, prev is stale */
805 
806 		mmap_read_lock(mm);
807 		vma = find_vma(mm, start);
808 		if (!vma)
809 			return -ENOMEM;
810 		if (start < vma->vm_start) {
811 			/*
812 			 * This "vma" under revalidation is the one
813 			 * with the lowest vma->vm_start where start
814 			 * is also < vma->vm_end. If start <
815 			 * vma->vm_start it means an hole materialized
816 			 * in the user address space within the
817 			 * virtual range passed to MADV_DONTNEED
818 			 * or MADV_FREE.
819 			 */
820 			return -ENOMEM;
821 		}
822 		if (!can_madv_lru_vma(vma))
823 			return -EINVAL;
824 		if (end > vma->vm_end) {
825 			/*
826 			 * Don't fail if end > vma->vm_end. If the old
827 			 * vma was splitted while the mmap_lock was
828 			 * released the effect of the concurrent
829 			 * operation may not cause madvise() to
830 			 * have an undefined result. There may be an
831 			 * adjacent next vma that we'll walk
832 			 * next. userfaultfd_remove() will generate an
833 			 * UFFD_EVENT_REMOVE repetition on the
834 			 * end-vma->vm_end range, but the manager can
835 			 * handle a repetition fine.
836 			 */
837 			end = vma->vm_end;
838 		}
839 		VM_WARN_ON(start >= end);
840 	}
841 
842 	if (behavior == MADV_DONTNEED)
843 		return madvise_dontneed_single_vma(vma, start, end);
844 	else if (behavior == MADV_FREE)
845 		return madvise_free_single_vma(vma, start, end);
846 	else
847 		return -EINVAL;
848 }
849 
850 /*
851  * Application wants to free up the pages and associated backing store.
852  * This is effectively punching a hole into the middle of a file.
853  */
madvise_remove(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)854 static long madvise_remove(struct vm_area_struct *vma,
855 				struct vm_area_struct **prev,
856 				unsigned long start, unsigned long end)
857 {
858 	loff_t offset;
859 	int error;
860 	struct file *f;
861 	struct mm_struct *mm = vma->vm_mm;
862 
863 	*prev = NULL;	/* tell sys_madvise we drop mmap_lock */
864 
865 	if (vma->vm_flags & VM_LOCKED)
866 		return -EINVAL;
867 
868 	f = vma->vm_file;
869 
870 	if (!f || !f->f_mapping || !f->f_mapping->host) {
871 			return -EINVAL;
872 	}
873 
874 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
875 		return -EACCES;
876 
877 	offset = (loff_t)(start - vma->vm_start)
878 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
879 
880 	/*
881 	 * Filesystem's fallocate may need to take i_mutex.  We need to
882 	 * explicitly grab a reference because the vma (and hence the
883 	 * vma's reference to the file) can go away as soon as we drop
884 	 * mmap_lock.
885 	 */
886 	get_file(f);
887 	if (userfaultfd_remove(vma, start, end)) {
888 		/* mmap_lock was not released by userfaultfd_remove() */
889 		mmap_read_unlock(mm);
890 	}
891 	error = vfs_fallocate(f,
892 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
893 				offset, end - start);
894 	fput(f);
895 	mmap_read_lock(mm);
896 	return error;
897 }
898 
899 #ifdef CONFIG_MEMORY_FAILURE
900 /*
901  * Error injection support for memory error handling.
902  */
madvise_inject_error(int behavior,unsigned long start,unsigned long end)903 static int madvise_inject_error(int behavior,
904 		unsigned long start, unsigned long end)
905 {
906 	struct zone *zone;
907 	unsigned long size;
908 
909 	if (!capable(CAP_SYS_ADMIN))
910 		return -EPERM;
911 
912 
913 	for (; start < end; start += size) {
914 		unsigned long pfn;
915 		struct page *page;
916 		int ret;
917 
918 		ret = get_user_pages_fast(start, 1, 0, &page);
919 		if (ret != 1)
920 			return ret;
921 		pfn = page_to_pfn(page);
922 
923 		/*
924 		 * When soft offlining hugepages, after migrating the page
925 		 * we dissolve it, therefore in the second loop "page" will
926 		 * no longer be a compound page.
927 		 */
928 		size = page_size(compound_head(page));
929 
930 		if (behavior == MADV_SOFT_OFFLINE) {
931 			pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
932 				 pfn, start);
933 			ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
934 		} else {
935 			pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
936 				 pfn, start);
937 			ret = memory_failure(pfn, MF_COUNT_INCREASED);
938 		}
939 
940 		if (ret)
941 			return ret;
942 	}
943 
944 	/* Ensure that all poisoned pages are removed from per-cpu lists */
945 	for_each_populated_zone(zone)
946 		drain_all_pages(zone);
947 
948 	return 0;
949 }
950 #endif
951 
952 static long
madvise_vma(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)953 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
954 		unsigned long start, unsigned long end, int behavior)
955 {
956 	switch (behavior) {
957 	case MADV_REMOVE:
958 		return madvise_remove(vma, prev, start, end);
959 	case MADV_WILLNEED:
960 		return madvise_willneed(vma, prev, start, end);
961 	case MADV_COLD:
962 		return madvise_cold(vma, prev, start, end);
963 	case MADV_PAGEOUT:
964 		return madvise_pageout(vma, prev, start, end);
965 	case MADV_FREE:
966 	case MADV_DONTNEED:
967 		return madvise_dontneed_free(vma, prev, start, end, behavior);
968 	default:
969 		return madvise_behavior(vma, prev, start, end, behavior);
970 	}
971 }
972 
973 static bool
madvise_behavior_valid(int behavior)974 madvise_behavior_valid(int behavior)
975 {
976 	switch (behavior) {
977 	case MADV_DOFORK:
978 	case MADV_DONTFORK:
979 	case MADV_NORMAL:
980 	case MADV_SEQUENTIAL:
981 	case MADV_RANDOM:
982 	case MADV_REMOVE:
983 	case MADV_WILLNEED:
984 	case MADV_DONTNEED:
985 	case MADV_FREE:
986 	case MADV_COLD:
987 	case MADV_PAGEOUT:
988 #ifdef CONFIG_KSM
989 	case MADV_MERGEABLE:
990 	case MADV_UNMERGEABLE:
991 #endif
992 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
993 	case MADV_HUGEPAGE:
994 	case MADV_NOHUGEPAGE:
995 #endif
996 	case MADV_DONTDUMP:
997 	case MADV_DODUMP:
998 	case MADV_WIPEONFORK:
999 	case MADV_KEEPONFORK:
1000 #ifdef CONFIG_MEMORY_FAILURE
1001 	case MADV_SOFT_OFFLINE:
1002 	case MADV_HWPOISON:
1003 #endif
1004 		return true;
1005 
1006 	default:
1007 		return false;
1008 	}
1009 }
1010 
1011 static bool
process_madvise_behavior_valid(int behavior)1012 process_madvise_behavior_valid(int behavior)
1013 {
1014 	switch (behavior) {
1015 	case MADV_COLD:
1016 	case MADV_PAGEOUT:
1017 	case MADV_WILLNEED:
1018 		return true;
1019 	default:
1020 		return false;
1021 	}
1022 }
1023 
1024 /*
1025  * The madvise(2) system call.
1026  *
1027  * Applications can use madvise() to advise the kernel how it should
1028  * handle paging I/O in this VM area.  The idea is to help the kernel
1029  * use appropriate read-ahead and caching techniques.  The information
1030  * provided is advisory only, and can be safely disregarded by the
1031  * kernel without affecting the correct operation of the application.
1032  *
1033  * behavior values:
1034  *  MADV_NORMAL - the default behavior is to read clusters.  This
1035  *		results in some read-ahead and read-behind.
1036  *  MADV_RANDOM - the system should read the minimum amount of data
1037  *		on any access, since it is unlikely that the appli-
1038  *		cation will need more than what it asks for.
1039  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
1040  *		once, so they can be aggressively read ahead, and
1041  *		can be freed soon after they are accessed.
1042  *  MADV_WILLNEED - the application is notifying the system to read
1043  *		some pages ahead.
1044  *  MADV_DONTNEED - the application is finished with the given range,
1045  *		so the kernel can free resources associated with it.
1046  *  MADV_FREE - the application marks pages in the given range as lazy free,
1047  *		where actual purges are postponed until memory pressure happens.
1048  *  MADV_REMOVE - the application wants to free up the given range of
1049  *		pages and associated backing store.
1050  *  MADV_DONTFORK - omit this area from child's address space when forking:
1051  *		typically, to avoid COWing pages pinned by get_user_pages().
1052  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1053  *  MADV_WIPEONFORK - present the child process with zero-filled memory in this
1054  *              range after a fork.
1055  *  MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1056  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
1057  *		were corrupted by unrecoverable hardware memory failure.
1058  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1059  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1060  *		this area with pages of identical content from other such areas.
1061  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1062  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
1063  *		huge pages in the future. Existing pages might be coalesced and
1064  *		new pages might be allocated as THP.
1065  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1066  *		transparent huge pages so the existing pages will not be
1067  *		coalesced into THP and new pages will not be allocated as THP.
1068  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
1069  *		from being included in its core dump.
1070  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1071  *  MADV_COLD - the application is not expected to use this memory soon,
1072  *		deactivate pages in this range so that they can be reclaimed
1073  *		easily if memory pressure hanppens.
1074  *  MADV_PAGEOUT - the application is not expected to use this memory soon,
1075  *		page out the pages in this range immediately.
1076  *
1077  * return values:
1078  *  zero    - success
1079  *  -EINVAL - start + len < 0, start is not page-aligned,
1080  *		"behavior" is not a valid value, or application
1081  *		is attempting to release locked or shared pages,
1082  *		or the specified address range includes file, Huge TLB,
1083  *		MAP_SHARED or VMPFNMAP range.
1084  *  -ENOMEM - addresses in the specified range are not currently
1085  *		mapped, or are outside the AS of the process.
1086  *  -EIO    - an I/O error occurred while paging in data.
1087  *  -EBADF  - map exists, but area maps something that isn't a file.
1088  *  -EAGAIN - a kernel resource was temporarily unavailable.
1089  */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)1090 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1091 {
1092 	unsigned long end, tmp;
1093 	struct vm_area_struct *vma, *prev;
1094 	int unmapped_error = 0;
1095 	int error = -EINVAL;
1096 	int write;
1097 	size_t len;
1098 	struct blk_plug plug;
1099 
1100 	start = untagged_addr(start);
1101 
1102 	if (!madvise_behavior_valid(behavior))
1103 		return error;
1104 
1105 	if (!PAGE_ALIGNED(start))
1106 		return error;
1107 	len = PAGE_ALIGN(len_in);
1108 
1109 	/* Check to see whether len was rounded up from small -ve to zero */
1110 	if (len_in && !len)
1111 		return error;
1112 
1113 	end = start + len;
1114 	if (end < start)
1115 		return error;
1116 
1117 	error = 0;
1118 	if (end == start)
1119 		return error;
1120 
1121 #ifdef CONFIG_MEMORY_FAILURE
1122 	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1123 		return madvise_inject_error(behavior, start, start + len_in);
1124 #endif
1125 
1126 	write = madvise_need_mmap_write(behavior);
1127 	if (write) {
1128 		if (mmap_write_lock_killable(mm))
1129 			return -EINTR;
1130 	} else {
1131 		mmap_read_lock(mm);
1132 	}
1133 
1134 	/*
1135 	 * If the interval [start,end) covers some unmapped address
1136 	 * ranges, just ignore them, but return -ENOMEM at the end.
1137 	 * - different from the way of handling in mlock etc.
1138 	 */
1139 	vma = find_vma_prev(mm, start, &prev);
1140 	if (vma && start > vma->vm_start)
1141 		prev = vma;
1142 
1143 	blk_start_plug(&plug);
1144 	for (;;) {
1145 		/* Still start < end. */
1146 		error = -ENOMEM;
1147 		if (!vma)
1148 			goto out;
1149 
1150 		/* Here start < (end|vma->vm_end). */
1151 		if (start < vma->vm_start) {
1152 			unmapped_error = -ENOMEM;
1153 			start = vma->vm_start;
1154 			if (start >= end)
1155 				goto out;
1156 		}
1157 
1158 		/* Here vma->vm_start <= start < (end|vma->vm_end) */
1159 		tmp = vma->vm_end;
1160 		if (end < tmp)
1161 			tmp = end;
1162 
1163 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1164 		error = madvise_vma(vma, &prev, start, tmp, behavior);
1165 		if (error)
1166 			goto out;
1167 		start = tmp;
1168 		if (prev && start < prev->vm_end)
1169 			start = prev->vm_end;
1170 		error = unmapped_error;
1171 		if (start >= end)
1172 			goto out;
1173 		if (prev)
1174 			vma = prev->vm_next;
1175 		else	/* madvise_remove dropped mmap_lock */
1176 			vma = find_vma(mm, start);
1177 	}
1178 out:
1179 	blk_finish_plug(&plug);
1180 	if (write)
1181 		mmap_write_unlock(mm);
1182 	else
1183 		mmap_read_unlock(mm);
1184 
1185 	return error;
1186 }
1187 
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)1188 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1189 {
1190 	return do_madvise(current->mm, start, len_in, behavior);
1191 }
1192 
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)1193 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1194 		size_t, vlen, int, behavior, unsigned int, flags)
1195 {
1196 	ssize_t ret;
1197 	struct iovec iovstack[UIO_FASTIOV], iovec;
1198 	struct iovec *iov = iovstack;
1199 	struct iov_iter iter;
1200 	struct pid *pid;
1201 	struct task_struct *task;
1202 	struct mm_struct *mm;
1203 	size_t total_len;
1204 	unsigned int f_flags;
1205 
1206 	if (flags != 0) {
1207 		ret = -EINVAL;
1208 		goto out;
1209 	}
1210 
1211 	ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1212 	if (ret < 0)
1213 		goto out;
1214 
1215 	pid = pidfd_get_pid(pidfd, &f_flags);
1216 	if (IS_ERR(pid)) {
1217 		ret = PTR_ERR(pid);
1218 		goto free_iov;
1219 	}
1220 
1221 	task = get_pid_task(pid, PIDTYPE_PID);
1222 	if (!task) {
1223 		ret = -ESRCH;
1224 		goto put_pid;
1225 	}
1226 
1227 	if (!process_madvise_behavior_valid(behavior)) {
1228 		ret = -EINVAL;
1229 		goto release_task;
1230 	}
1231 
1232 	/* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1233 	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1234 	if (IS_ERR_OR_NULL(mm)) {
1235 		ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1236 		goto release_task;
1237 	}
1238 
1239 	/*
1240 	 * Require CAP_SYS_NICE for influencing process performance. Note that
1241 	 * only non-destructive hints are currently supported.
1242 	 */
1243 	if (!capable(CAP_SYS_NICE)) {
1244 		ret = -EPERM;
1245 		goto release_mm;
1246 	}
1247 
1248 	total_len = iov_iter_count(&iter);
1249 
1250 	while (iov_iter_count(&iter)) {
1251 		iovec = iov_iter_iovec(&iter);
1252 		ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1253 					iovec.iov_len, behavior);
1254 		if (ret < 0)
1255 			break;
1256 		iov_iter_advance(&iter, iovec.iov_len);
1257 	}
1258 
1259 	ret = (total_len - iov_iter_count(&iter)) ? : ret;
1260 
1261 release_mm:
1262 	mmput(mm);
1263 release_task:
1264 	put_task_struct(task);
1265 put_pid:
1266 	put_pid(pid);
1267 free_iov:
1268 	kfree(iov);
1269 out:
1270 	return ret;
1271 }
1272