xref: /OK3568_Linux_fs/kernel/mm/mremap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	mm/mremap.c
4  *
5  *	(C) Copyright 1996 Linus Torvalds
6  *
7  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
18 #include <linux/fs.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26 #include <linux/userfaultfd_k.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30 
31 #include "internal.h"
32 
get_old_pud(struct mm_struct * mm,unsigned long addr)33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34 {
35 	pgd_t *pgd;
36 	p4d_t *p4d;
37 	pud_t *pud;
38 
39 	pgd = pgd_offset(mm, addr);
40 	if (pgd_none_or_clear_bad(pgd))
41 		return NULL;
42 
43 	p4d = p4d_offset(pgd, addr);
44 	if (p4d_none_or_clear_bad(p4d))
45 		return NULL;
46 
47 	pud = pud_offset(p4d, addr);
48 	if (pud_none_or_clear_bad(pud))
49 		return NULL;
50 
51 	return pud;
52 }
53 
get_old_pmd(struct mm_struct * mm,unsigned long addr)54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55 {
56 	pud_t *pud;
57 	pmd_t *pmd;
58 
59 	pud = get_old_pud(mm, addr);
60 	if (!pud)
61 		return NULL;
62 
63 	pmd = pmd_offset(pud, addr);
64 	if (pmd_none(*pmd))
65 		return NULL;
66 
67 	return pmd;
68 }
69 
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71 			    unsigned long addr)
72 {
73 	pgd_t *pgd;
74 	p4d_t *p4d;
75 
76 	pgd = pgd_offset(mm, addr);
77 	p4d = p4d_alloc(mm, pgd, addr);
78 	if (!p4d)
79 		return NULL;
80 
81 	return pud_alloc(mm, p4d, addr);
82 }
83 
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 			    unsigned long addr)
86 {
87 	pud_t *pud;
88 	pmd_t *pmd;
89 
90 	pud = alloc_new_pud(mm, vma, addr);
91 	if (!pud)
92 		return NULL;
93 
94 	pmd = pmd_alloc(mm, pud, addr);
95 	if (!pmd)
96 		return NULL;
97 
98 	VM_BUG_ON(pmd_trans_huge(*pmd));
99 
100 	return pmd;
101 }
102 
take_rmap_locks(struct vm_area_struct * vma)103 static void take_rmap_locks(struct vm_area_struct *vma)
104 {
105 	if (vma->vm_file)
106 		i_mmap_lock_write(vma->vm_file->f_mapping);
107 	if (vma->anon_vma)
108 		anon_vma_lock_write(vma->anon_vma);
109 }
110 
drop_rmap_locks(struct vm_area_struct * vma)111 static void drop_rmap_locks(struct vm_area_struct *vma)
112 {
113 	if (vma->anon_vma)
114 		anon_vma_unlock_write(vma->anon_vma);
115 	if (vma->vm_file)
116 		i_mmap_unlock_write(vma->vm_file->f_mapping);
117 }
118 
move_soft_dirty_pte(pte_t pte)119 static pte_t move_soft_dirty_pte(pte_t pte)
120 {
121 	/*
122 	 * Set soft dirty bit so we can notice
123 	 * in userspace the ptes were moved.
124 	 */
125 #ifdef CONFIG_MEM_SOFT_DIRTY
126 	if (pte_present(pte))
127 		pte = pte_mksoft_dirty(pte);
128 	else if (is_swap_pte(pte))
129 		pte = pte_swp_mksoft_dirty(pte);
130 #endif
131 	return pte;
132 }
133 
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 		unsigned long old_addr, unsigned long old_end,
136 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
137 		unsigned long new_addr, bool need_rmap_locks)
138 {
139 	struct mm_struct *mm = vma->vm_mm;
140 	pte_t *old_pte, *new_pte, pte;
141 	spinlock_t *old_ptl, *new_ptl;
142 	bool force_flush = false;
143 	unsigned long len = old_end - old_addr;
144 
145 	/*
146 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
147 	 * locks to ensure that rmap will always observe either the old or the
148 	 * new ptes. This is the easiest way to avoid races with
149 	 * truncate_pagecache(), page migration, etc...
150 	 *
151 	 * When need_rmap_locks is false, we use other ways to avoid
152 	 * such races:
153 	 *
154 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
155 	 *   which rmap call sites look for using vma_is_temporary_stack().
156 	 *
157 	 * - During mremap(), new_vma is often known to be placed after vma
158 	 *   in rmap traversal order. This ensures rmap will always observe
159 	 *   either the old pte, or the new pte, or both (the page table locks
160 	 *   serialize access to individual ptes, but only rmap traversal
161 	 *   order guarantees that we won't miss both the old and new ptes).
162 	 */
163 	if (need_rmap_locks)
164 		take_rmap_locks(vma);
165 
166 	/*
167 	 * We don't have to worry about the ordering of src and dst
168 	 * pte locks because exclusive mmap_lock prevents deadlock.
169 	 */
170 	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171 	new_pte = pte_offset_map(new_pmd, new_addr);
172 	new_ptl = pte_lockptr(mm, new_pmd);
173 	if (new_ptl != old_ptl)
174 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175 	flush_tlb_batched_pending(vma->vm_mm);
176 	arch_enter_lazy_mmu_mode();
177 
178 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 				   new_pte++, new_addr += PAGE_SIZE) {
180 		if (pte_none(*old_pte))
181 			continue;
182 
183 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
184 		/*
185 		 * If we are remapping a valid PTE, make sure
186 		 * to flush TLB before we drop the PTL for the
187 		 * PTE.
188 		 *
189 		 * NOTE! Both old and new PTL matter: the old one
190 		 * for racing with page_mkclean(), the new one to
191 		 * make sure the physical page stays valid until
192 		 * the TLB entry for the old mapping has been
193 		 * flushed.
194 		 */
195 		if (pte_present(pte))
196 			force_flush = true;
197 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198 		pte = move_soft_dirty_pte(pte);
199 		set_pte_at(mm, new_addr, new_pte, pte);
200 	}
201 
202 	arch_leave_lazy_mmu_mode();
203 	if (force_flush)
204 		flush_tlb_range(vma, old_end - len, old_end);
205 	if (new_ptl != old_ptl)
206 		spin_unlock(new_ptl);
207 	pte_unmap(new_pte - 1);
208 	pte_unmap_unlock(old_pte - 1, old_ptl);
209 	if (need_rmap_locks)
210 		drop_rmap_locks(vma);
211 }
212 
213 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
trylock_vma_ref_count(struct vm_area_struct * vma)214 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
215 {
216 	/*
217 	 * If we have the only reference, swap the refcount to -1. This
218 	 * will prevent other concurrent references by get_vma() for SPFs.
219 	 */
220 	return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1;
221 }
222 
223 /*
224  * Restore the VMA reference count to 1 after a fast mremap.
225  */
unlock_vma_ref_count(struct vm_area_struct * vma)226 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
227 {
228 	/*
229 	 * This should only be called after a corresponding,
230 	 * successful trylock_vma_ref_count().
231 	 */
232 	VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1,
233 		      vma);
234 }
235 #else	/* !CONFIG_SPECULATIVE_PAGE_FAULT */
trylock_vma_ref_count(struct vm_area_struct * vma)236 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
237 {
238 	return true;
239 }
unlock_vma_ref_count(struct vm_area_struct * vma)240 static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
241 {
242 }
243 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
244 
245 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)246 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
247 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
248 {
249 	spinlock_t *old_ptl, *new_ptl;
250 	struct mm_struct *mm = vma->vm_mm;
251 	pmd_t pmd;
252 
253 	/*
254 	 * The destination pmd shouldn't be established, free_pgtables()
255 	 * should have released it.
256 	 *
257 	 * However, there's a case during execve() where we use mremap
258 	 * to move the initial stack, and in that case the target area
259 	 * may overlap the source area (always moving down).
260 	 *
261 	 * If everything is PMD-aligned, that works fine, as moving
262 	 * each pmd down will clear the source pmd. But if we first
263 	 * have a few 4kB-only pages that get moved down, and then
264 	 * hit the "now the rest is PMD-aligned, let's do everything
265 	 * one pmd at a time", we will still have the old (now empty
266 	 * of any 4kB pages, but still there) PMD in the page table
267 	 * tree.
268 	 *
269 	 * Warn on it once - because we really should try to figure
270 	 * out how to do this better - but then say "I won't move
271 	 * this pmd".
272 	 *
273 	 * One alternative might be to just unmap the target pmd at
274 	 * this point, and verify that it really is empty. We'll see.
275 	 */
276 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
277 		return false;
278 
279 	/*
280 	 * We hold both exclusive mmap_lock and rmap_lock at this point and
281 	 * cannot block. If we cannot immediately take exclusive ownership
282 	 * of the VMA fallback to the move_ptes().
283 	 */
284 	if (!trylock_vma_ref_count(vma))
285 		return false;
286 
287 	/*
288 	 * We don't have to worry about the ordering of src and dst
289 	 * ptlocks because exclusive mmap_lock prevents deadlock.
290 	 */
291 	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
292 	new_ptl = pmd_lockptr(mm, new_pmd);
293 	if (new_ptl != old_ptl)
294 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
295 
296 	/* Clear the pmd */
297 	pmd = *old_pmd;
298 	pmd_clear(old_pmd);
299 
300 	VM_BUG_ON(!pmd_none(*new_pmd));
301 
302 	/* Set the new pmd */
303 	set_pmd_at(mm, new_addr, new_pmd, pmd);
304 	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
305 	if (new_ptl != old_ptl)
306 		spin_unlock(new_ptl);
307 	spin_unlock(old_ptl);
308 
309 	unlock_vma_ref_count(vma);
310 	return true;
311 }
312 #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)313 static inline bool move_normal_pmd(struct vm_area_struct *vma,
314 		unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
315 		pmd_t *new_pmd)
316 {
317 	return false;
318 }
319 #endif
320 
321 #ifdef CONFIG_HAVE_MOVE_PUD
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)322 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
323 		  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
324 {
325 	spinlock_t *old_ptl, *new_ptl;
326 	struct mm_struct *mm = vma->vm_mm;
327 	pud_t pud;
328 
329 	/*
330 	 * The destination pud shouldn't be established, free_pgtables()
331 	 * should have released it.
332 	 */
333 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
334 		return false;
335 
336 	/*
337 	 * We hold both exclusive mmap_lock and rmap_lock at this point and
338 	 * cannot block. If we cannot immediately take exclusive ownership
339 	 * of the VMA fallback to the move_ptes().
340 	 */
341 	if (!trylock_vma_ref_count(vma))
342 		return false;
343 
344 	/*
345 	 * We don't have to worry about the ordering of src and dst
346 	 * ptlocks because exclusive mmap_lock prevents deadlock.
347 	 */
348 	old_ptl = pud_lock(vma->vm_mm, old_pud);
349 	new_ptl = pud_lockptr(mm, new_pud);
350 	if (new_ptl != old_ptl)
351 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
352 
353 	/* Clear the pud */
354 	pud = *old_pud;
355 	pud_clear(old_pud);
356 
357 	VM_BUG_ON(!pud_none(*new_pud));
358 
359 	/* Set the new pud */
360 	set_pud_at(mm, new_addr, new_pud, pud);
361 	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
362 	if (new_ptl != old_ptl)
363 		spin_unlock(new_ptl);
364 	spin_unlock(old_ptl);
365 
366 	unlock_vma_ref_count(vma);
367 	return true;
368 }
369 #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)370 static inline bool move_normal_pud(struct vm_area_struct *vma,
371 		unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
372 		pud_t *new_pud)
373 {
374 	return false;
375 }
376 #endif
377 
378 enum pgt_entry {
379 	NORMAL_PMD,
380 	HPAGE_PMD,
381 	NORMAL_PUD,
382 };
383 
384 /*
385  * Returns an extent of the corresponding size for the pgt_entry specified if
386  * valid. Else returns a smaller extent bounded by the end of the source and
387  * destination pgt_entry.
388  */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)389 static __always_inline unsigned long get_extent(enum pgt_entry entry,
390 			unsigned long old_addr, unsigned long old_end,
391 			unsigned long new_addr)
392 {
393 	unsigned long next, extent, mask, size;
394 
395 	switch (entry) {
396 	case HPAGE_PMD:
397 	case NORMAL_PMD:
398 		mask = PMD_MASK;
399 		size = PMD_SIZE;
400 		break;
401 	case NORMAL_PUD:
402 		mask = PUD_MASK;
403 		size = PUD_SIZE;
404 		break;
405 	default:
406 		BUILD_BUG();
407 		break;
408 	}
409 
410 	next = (old_addr + size) & mask;
411 	/* even if next overflowed, extent below will be ok */
412 	extent = next - old_addr;
413 	if (extent > old_end - old_addr)
414 		extent = old_end - old_addr;
415 	next = (new_addr + size) & mask;
416 	if (extent > next - new_addr)
417 		extent = next - new_addr;
418 	return extent;
419 }
420 
421 /*
422  * Attempts to speedup the move by moving entry at the level corresponding to
423  * pgt_entry. Returns true if the move was successful, else false.
424  */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)425 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
426 			unsigned long old_addr, unsigned long new_addr,
427 			void *old_entry, void *new_entry, bool need_rmap_locks)
428 {
429 	bool moved = false;
430 
431 	/* See comment in move_ptes() */
432 	if (need_rmap_locks)
433 		take_rmap_locks(vma);
434 
435 	switch (entry) {
436 	case NORMAL_PMD:
437 		moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
438 					new_entry);
439 		break;
440 	case NORMAL_PUD:
441 		moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
442 					new_entry);
443 		break;
444 	case HPAGE_PMD:
445 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
446 			move_huge_pmd(vma, old_addr, new_addr, old_entry,
447 				      new_entry);
448 		break;
449 	default:
450 		WARN_ON_ONCE(1);
451 		break;
452 	}
453 
454 	if (need_rmap_locks)
455 		drop_rmap_locks(vma);
456 
457 	return moved;
458 }
459 
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks)460 unsigned long move_page_tables(struct vm_area_struct *vma,
461 		unsigned long old_addr, struct vm_area_struct *new_vma,
462 		unsigned long new_addr, unsigned long len,
463 		bool need_rmap_locks)
464 {
465 	unsigned long extent, old_end;
466 	struct mmu_notifier_range range;
467 	pmd_t *old_pmd, *new_pmd;
468 
469 	if (!len)
470 		return 0;
471 
472 	old_end = old_addr + len;
473 	flush_cache_range(vma, old_addr, old_end);
474 
475 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
476 				old_addr, old_end);
477 	mmu_notifier_invalidate_range_start(&range);
478 
479 	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
480 		cond_resched();
481 		/*
482 		 * If extent is PUD-sized try to speed up the move by moving at the
483 		 * PUD level if possible.
484 		 */
485 		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
486 		if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
487 			pud_t *old_pud, *new_pud;
488 
489 			old_pud = get_old_pud(vma->vm_mm, old_addr);
490 			if (!old_pud)
491 				continue;
492 			new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
493 			if (!new_pud)
494 				break;
495 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
496 					   old_pud, new_pud, true))
497 				continue;
498 		}
499 
500 		extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
501 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
502 		if (!old_pmd)
503 			continue;
504 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
505 		if (!new_pmd)
506 			break;
507 		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
508 		    pmd_devmap(*old_pmd)) {
509 			if (extent == HPAGE_PMD_SIZE &&
510 			    move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
511 					   old_pmd, new_pmd, need_rmap_locks))
512 				continue;
513 			split_huge_pmd(vma, old_pmd, old_addr);
514 			if (pmd_trans_unstable(old_pmd))
515 				continue;
516 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
517 			   extent == PMD_SIZE) {
518 			/*
519 			 * If the extent is PMD-sized, try to speed the move by
520 			 * moving at the PMD level if possible.
521 			 */
522 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
523 					   old_pmd, new_pmd, true))
524 				continue;
525 		}
526 
527 		if (pte_alloc(new_vma->vm_mm, new_pmd))
528 			break;
529 		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
530 			  new_pmd, new_addr, need_rmap_locks);
531 	}
532 
533 	mmu_notifier_invalidate_range_end(&range);
534 
535 	return len + old_addr - old_end;	/* how much done */
536 }
537 
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)538 static unsigned long move_vma(struct vm_area_struct *vma,
539 		unsigned long old_addr, unsigned long old_len,
540 		unsigned long new_len, unsigned long new_addr,
541 		bool *locked, unsigned long flags,
542 		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
543 {
544 	struct mm_struct *mm = vma->vm_mm;
545 	struct vm_area_struct *new_vma;
546 	unsigned long vm_flags = vma->vm_flags;
547 	unsigned long new_pgoff;
548 	unsigned long moved_len;
549 	unsigned long excess = 0;
550 	unsigned long hiwater_vm;
551 	int split = 0;
552 	int err;
553 	bool need_rmap_locks;
554 
555 	/*
556 	 * We'd prefer to avoid failure later on in do_munmap:
557 	 * which may split one vma into three before unmapping.
558 	 */
559 	if (mm->map_count >= sysctl_max_map_count - 3)
560 		return -ENOMEM;
561 
562 	/*
563 	 * Advise KSM to break any KSM pages in the area to be moved:
564 	 * it would be confusing if they were to turn up at the new
565 	 * location, where they happen to coincide with different KSM
566 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
567 	 * so KSM can come around to merge on vma and new_vma afterwards.
568 	 */
569 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
570 						MADV_UNMERGEABLE, &vm_flags);
571 	if (err)
572 		return err;
573 
574 	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
575 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
576 			   &need_rmap_locks);
577 	if (!new_vma)
578 		return -ENOMEM;
579 
580 	/* new_vma is returned protected by copy_vma, to prevent speculative
581 	 * page fault to be done in the destination area before we move the pte.
582 	 * Now, we must also protect the source VMA since we don't want pages
583 	 * to be mapped in our back while we are copying the PTEs.
584 	 */
585 	if (vma != new_vma)
586 		vm_write_begin(vma);
587 
588 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
589 				     need_rmap_locks);
590 	if (moved_len < old_len) {
591 		err = -ENOMEM;
592 	} else if (vma->vm_ops && vma->vm_ops->mremap) {
593 		err = vma->vm_ops->mremap(new_vma);
594 	}
595 
596 	if (unlikely(err)) {
597 		/*
598 		 * On error, move entries back from new area to old,
599 		 * which will succeed since page tables still there,
600 		 * and then proceed to unmap new area instead of old.
601 		 */
602 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
603 				 true);
604 		if (vma != new_vma)
605 			vm_write_end(vma);
606 		vma = new_vma;
607 		old_len = new_len;
608 		old_addr = new_addr;
609 		new_addr = err;
610 	} else {
611 		mremap_userfaultfd_prep(new_vma, uf);
612 		arch_remap(mm, old_addr, old_addr + old_len,
613 			   new_addr, new_addr + new_len);
614 		if (vma != new_vma)
615 			vm_write_end(vma);
616 	}
617 	vm_write_end(new_vma);
618 
619 	/* Conceal VM_ACCOUNT so old reservation is not undone */
620 	if (vm_flags & VM_ACCOUNT) {
621 		vma->vm_flags &= ~VM_ACCOUNT;
622 		excess = vma->vm_end - vma->vm_start - old_len;
623 		if (old_addr > vma->vm_start &&
624 		    old_addr + old_len < vma->vm_end)
625 			split = 1;
626 	}
627 
628 	/*
629 	 * If we failed to move page tables we still do total_vm increment
630 	 * since do_munmap() will decrement it by old_len == new_len.
631 	 *
632 	 * Since total_vm is about to be raised artificially high for a
633 	 * moment, we need to restore high watermark afterwards: if stats
634 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
635 	 * If this were a serious issue, we'd add a flag to do_munmap().
636 	 */
637 	hiwater_vm = mm->hiwater_vm;
638 	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
639 
640 	/* Tell pfnmap has moved from this vma */
641 	if (unlikely(vma->vm_flags & VM_PFNMAP))
642 		untrack_pfn_moved(vma);
643 
644 	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
645 		if (vm_flags & VM_ACCOUNT) {
646 			/* Always put back VM_ACCOUNT since we won't unmap */
647 			vma->vm_flags |= VM_ACCOUNT;
648 
649 			vm_acct_memory(new_len >> PAGE_SHIFT);
650 		}
651 
652 		/*
653 		 * VMAs can actually be merged back together in copy_vma
654 		 * calling merge_vma. This can happen with anonymous vmas
655 		 * which have not yet been faulted, so if we were to consider
656 		 * this VMA split we'll end up adding VM_ACCOUNT on the
657 		 * next VMA, which is completely unrelated if this VMA
658 		 * was re-merged.
659 		 */
660 		if (split && new_vma == vma)
661 			split = 0;
662 
663 		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
664 		vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
665 
666 		/* Because we won't unmap we don't need to touch locked_vm */
667 		goto out;
668 	}
669 
670 	if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
671 		/* OOM: unable to split vma, just get accounts right */
672 		vm_unacct_memory(excess >> PAGE_SHIFT);
673 		excess = 0;
674 	}
675 
676 	if (vm_flags & VM_LOCKED) {
677 		mm->locked_vm += new_len >> PAGE_SHIFT;
678 		*locked = true;
679 	}
680 out:
681 	mm->hiwater_vm = hiwater_vm;
682 
683 	/* Restore VM_ACCOUNT if one or two pieces of vma left */
684 	if (excess) {
685 		vma->vm_flags |= VM_ACCOUNT;
686 		if (split)
687 			vma->vm_next->vm_flags |= VM_ACCOUNT;
688 	}
689 
690 	return new_addr;
691 }
692 
vma_to_resize(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long * p)693 static struct vm_area_struct *vma_to_resize(unsigned long addr,
694 	unsigned long old_len, unsigned long new_len, unsigned long flags,
695 	unsigned long *p)
696 {
697 	struct mm_struct *mm = current->mm;
698 	struct vm_area_struct *vma = find_vma(mm, addr);
699 	unsigned long pgoff;
700 
701 	if (!vma || vma->vm_start > addr)
702 		return ERR_PTR(-EFAULT);
703 
704 	/*
705 	 * !old_len is a special case where an attempt is made to 'duplicate'
706 	 * a mapping.  This makes no sense for private mappings as it will
707 	 * instead create a fresh/new mapping unrelated to the original.  This
708 	 * is contrary to the basic idea of mremap which creates new mappings
709 	 * based on the original.  There are no known use cases for this
710 	 * behavior.  As a result, fail such attempts.
711 	 */
712 	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
713 		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
714 		return ERR_PTR(-EINVAL);
715 	}
716 
717 	if ((flags & MREMAP_DONTUNMAP) &&
718 			(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
719 		return ERR_PTR(-EINVAL);
720 
721 	if (is_vm_hugetlb_page(vma))
722 		return ERR_PTR(-EINVAL);
723 
724 	/* We can't remap across vm area boundaries */
725 	if (old_len > vma->vm_end - addr)
726 		return ERR_PTR(-EFAULT);
727 
728 	if (new_len == old_len)
729 		return vma;
730 
731 	/* Need to be careful about a growing mapping */
732 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
733 	pgoff += vma->vm_pgoff;
734 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
735 		return ERR_PTR(-EINVAL);
736 
737 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
738 		return ERR_PTR(-EFAULT);
739 
740 	if (vma->vm_flags & VM_LOCKED) {
741 		unsigned long locked, lock_limit;
742 		locked = mm->locked_vm << PAGE_SHIFT;
743 		lock_limit = rlimit(RLIMIT_MEMLOCK);
744 		locked += new_len - old_len;
745 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
746 			return ERR_PTR(-EAGAIN);
747 	}
748 
749 	if (!may_expand_vm(mm, vma->vm_flags,
750 				(new_len - old_len) >> PAGE_SHIFT))
751 		return ERR_PTR(-ENOMEM);
752 
753 	if (vma->vm_flags & VM_ACCOUNT) {
754 		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
755 		if (security_vm_enough_memory_mm(mm, charged))
756 			return ERR_PTR(-ENOMEM);
757 		*p = charged;
758 	}
759 
760 	return vma;
761 }
762 
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)763 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
764 		unsigned long new_addr, unsigned long new_len, bool *locked,
765 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
766 		struct list_head *uf_unmap_early,
767 		struct list_head *uf_unmap)
768 {
769 	struct mm_struct *mm = current->mm;
770 	struct vm_area_struct *vma;
771 	unsigned long ret = -EINVAL;
772 	unsigned long charged = 0;
773 	unsigned long map_flags = 0;
774 
775 	if (offset_in_page(new_addr))
776 		goto out;
777 
778 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
779 		goto out;
780 
781 	/* Ensure the old/new locations do not overlap */
782 	if (addr + old_len > new_addr && new_addr + new_len > addr)
783 		goto out;
784 
785 	/*
786 	 * move_vma() need us to stay 4 maps below the threshold, otherwise
787 	 * it will bail out at the very beginning.
788 	 * That is a problem if we have already unmaped the regions here
789 	 * (new_addr, and old_addr), because userspace will not know the
790 	 * state of the vma's after it gets -ENOMEM.
791 	 * So, to avoid such scenario we can pre-compute if the whole
792 	 * operation has high chances to success map-wise.
793 	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
794 	 * split in 3 before unmaping it.
795 	 * That means 2 more maps (1 for each) to the ones we already hold.
796 	 * Check whether current map count plus 2 still leads us to 4 maps below
797 	 * the threshold, otherwise return -ENOMEM here to be more safe.
798 	 */
799 	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
800 		return -ENOMEM;
801 
802 	if (flags & MREMAP_FIXED) {
803 		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
804 		if (ret)
805 			goto out;
806 	}
807 
808 	if (old_len >= new_len) {
809 		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
810 		if (ret && old_len != new_len)
811 			goto out;
812 		old_len = new_len;
813 	}
814 
815 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
816 	if (IS_ERR(vma)) {
817 		ret = PTR_ERR(vma);
818 		goto out;
819 	}
820 
821 	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
822 	if (flags & MREMAP_DONTUNMAP &&
823 		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
824 		ret = -ENOMEM;
825 		goto out;
826 	}
827 
828 	if (flags & MREMAP_FIXED)
829 		map_flags |= MAP_FIXED;
830 
831 	if (vma->vm_flags & VM_MAYSHARE)
832 		map_flags |= MAP_SHARED;
833 
834 	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
835 				((addr - vma->vm_start) >> PAGE_SHIFT),
836 				map_flags);
837 	if (IS_ERR_VALUE(ret))
838 		goto out1;
839 
840 	/* We got a new mapping */
841 	if (!(flags & MREMAP_FIXED))
842 		new_addr = ret;
843 
844 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
845 		       uf_unmap);
846 
847 	if (!(offset_in_page(ret)))
848 		goto out;
849 
850 out1:
851 	vm_unacct_memory(charged);
852 
853 out:
854 	return ret;
855 }
856 
vma_expandable(struct vm_area_struct * vma,unsigned long delta)857 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
858 {
859 	unsigned long end = vma->vm_end + delta;
860 	if (end < vma->vm_end) /* overflow */
861 		return 0;
862 	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
863 		return 0;
864 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
865 			      0, MAP_FIXED) & ~PAGE_MASK)
866 		return 0;
867 	return 1;
868 }
869 
870 /*
871  * Expand (or shrink) an existing mapping, potentially moving it at the
872  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
873  *
874  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
875  * This option implies MREMAP_MAYMOVE.
876  */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)877 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
878 		unsigned long, new_len, unsigned long, flags,
879 		unsigned long, new_addr)
880 {
881 	struct mm_struct *mm = current->mm;
882 	struct vm_area_struct *vma;
883 	unsigned long ret = -EINVAL;
884 	unsigned long charged = 0;
885 	bool locked = false;
886 	bool downgraded = false;
887 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
888 	LIST_HEAD(uf_unmap_early);
889 	LIST_HEAD(uf_unmap);
890 
891 	/*
892 	 * There is a deliberate asymmetry here: we strip the pointer tag
893 	 * from the old address but leave the new address alone. This is
894 	 * for consistency with mmap(), where we prevent the creation of
895 	 * aliasing mappings in userspace by leaving the tag bits of the
896 	 * mapping address intact. A non-zero tag will cause the subsequent
897 	 * range checks to reject the address as invalid.
898 	 *
899 	 * See Documentation/arm64/tagged-address-abi.rst for more information.
900 	 */
901 	addr = untagged_addr(addr);
902 
903 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
904 		return ret;
905 
906 	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
907 		return ret;
908 
909 	/*
910 	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
911 	 * in the process.
912 	 */
913 	if (flags & MREMAP_DONTUNMAP &&
914 			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
915 		return ret;
916 
917 
918 	if (offset_in_page(addr))
919 		return ret;
920 
921 	old_len = PAGE_ALIGN(old_len);
922 	new_len = PAGE_ALIGN(new_len);
923 
924 	/*
925 	 * We allow a zero old-len as a special case
926 	 * for DOS-emu "duplicate shm area" thing. But
927 	 * a zero new-len is nonsensical.
928 	 */
929 	if (!new_len)
930 		return ret;
931 
932 	if (mmap_write_lock_killable(current->mm))
933 		return -EINTR;
934 
935 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
936 		ret = mremap_to(addr, old_len, new_addr, new_len,
937 				&locked, flags, &uf, &uf_unmap_early,
938 				&uf_unmap);
939 		goto out;
940 	}
941 
942 	/*
943 	 * Always allow a shrinking remap: that just unmaps
944 	 * the unnecessary pages..
945 	 * __do_munmap does all the needed commit accounting, and
946 	 * downgrades mmap_lock to read if so directed.
947 	 */
948 	if (old_len >= new_len) {
949 		int retval;
950 
951 		retval = __do_munmap(mm, addr+new_len, old_len - new_len,
952 				  &uf_unmap, true);
953 		if (retval < 0 && old_len != new_len) {
954 			ret = retval;
955 			goto out;
956 		/* Returning 1 indicates mmap_lock is downgraded to read. */
957 		} else if (retval == 1)
958 			downgraded = true;
959 		ret = addr;
960 		goto out;
961 	}
962 
963 	/*
964 	 * Ok, we need to grow..
965 	 */
966 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
967 	if (IS_ERR(vma)) {
968 		ret = PTR_ERR(vma);
969 		goto out;
970 	}
971 
972 	/* old_len exactly to the end of the area..
973 	 */
974 	if (old_len == vma->vm_end - addr) {
975 		/* can we just expand the current mapping? */
976 		if (vma_expandable(vma, new_len - old_len)) {
977 			int pages = (new_len - old_len) >> PAGE_SHIFT;
978 
979 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
980 				       vma->vm_pgoff, NULL)) {
981 				ret = -ENOMEM;
982 				goto out;
983 			}
984 
985 			vm_stat_account(mm, vma->vm_flags, pages);
986 			if (vma->vm_flags & VM_LOCKED) {
987 				mm->locked_vm += pages;
988 				locked = true;
989 				new_addr = addr;
990 			}
991 			ret = addr;
992 			goto out;
993 		}
994 	}
995 
996 	/*
997 	 * We weren't able to just expand or shrink the area,
998 	 * we need to create a new one and move it..
999 	 */
1000 	ret = -ENOMEM;
1001 	if (flags & MREMAP_MAYMOVE) {
1002 		unsigned long map_flags = 0;
1003 		if (vma->vm_flags & VM_MAYSHARE)
1004 			map_flags |= MAP_SHARED;
1005 
1006 		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1007 					vma->vm_pgoff +
1008 					((addr - vma->vm_start) >> PAGE_SHIFT),
1009 					map_flags);
1010 		if (IS_ERR_VALUE(new_addr)) {
1011 			ret = new_addr;
1012 			goto out;
1013 		}
1014 
1015 		ret = move_vma(vma, addr, old_len, new_len, new_addr,
1016 			       &locked, flags, &uf, &uf_unmap);
1017 	}
1018 out:
1019 	if (offset_in_page(ret)) {
1020 		vm_unacct_memory(charged);
1021 		locked = false;
1022 	}
1023 	if (downgraded)
1024 		mmap_read_unlock(current->mm);
1025 	else
1026 		mmap_write_unlock(current->mm);
1027 	if (locked && new_len > old_len)
1028 		mm_populate(new_addr + old_len, new_len - old_len);
1029 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
1030 	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1031 	userfaultfd_unmap_complete(mm, &uf_unmap);
1032 	return ret;
1033 }
1034