1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * mm/mremap.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright 1996 Linus Torvalds
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8*4882a593Smuzhiyun * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/hugetlb.h>
13*4882a593Smuzhiyun #include <linux/shm.h>
14*4882a593Smuzhiyun #include <linux/ksm.h>
15*4882a593Smuzhiyun #include <linux/mman.h>
16*4882a593Smuzhiyun #include <linux/swap.h>
17*4882a593Smuzhiyun #include <linux/capability.h>
18*4882a593Smuzhiyun #include <linux/fs.h>
19*4882a593Smuzhiyun #include <linux/swapops.h>
20*4882a593Smuzhiyun #include <linux/highmem.h>
21*4882a593Smuzhiyun #include <linux/security.h>
22*4882a593Smuzhiyun #include <linux/syscalls.h>
23*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <linux/mm-arch-hooks.h>
26*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/cacheflush.h>
29*4882a593Smuzhiyun #include <asm/tlbflush.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "internal.h"
32*4882a593Smuzhiyun
get_old_pud(struct mm_struct * mm,unsigned long addr)33*4882a593Smuzhiyun static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun pgd_t *pgd;
36*4882a593Smuzhiyun p4d_t *p4d;
37*4882a593Smuzhiyun pud_t *pud;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
40*4882a593Smuzhiyun if (pgd_none_or_clear_bad(pgd))
41*4882a593Smuzhiyun return NULL;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun p4d = p4d_offset(pgd, addr);
44*4882a593Smuzhiyun if (p4d_none_or_clear_bad(p4d))
45*4882a593Smuzhiyun return NULL;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun pud = pud_offset(p4d, addr);
48*4882a593Smuzhiyun if (pud_none_or_clear_bad(pud))
49*4882a593Smuzhiyun return NULL;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun return pud;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
get_old_pmd(struct mm_struct * mm,unsigned long addr)54*4882a593Smuzhiyun static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun pud_t *pud;
57*4882a593Smuzhiyun pmd_t *pmd;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun pud = get_old_pud(mm, addr);
60*4882a593Smuzhiyun if (!pud)
61*4882a593Smuzhiyun return NULL;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun pmd = pmd_offset(pud, addr);
64*4882a593Smuzhiyun if (pmd_none(*pmd))
65*4882a593Smuzhiyun return NULL;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return pmd;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)70*4882a593Smuzhiyun static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71*4882a593Smuzhiyun unsigned long addr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun pgd_t *pgd;
74*4882a593Smuzhiyun p4d_t *p4d;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun pgd = pgd_offset(mm, addr);
77*4882a593Smuzhiyun p4d = p4d_alloc(mm, pgd, addr);
78*4882a593Smuzhiyun if (!p4d)
79*4882a593Smuzhiyun return NULL;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return pud_alloc(mm, p4d, addr);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)84*4882a593Smuzhiyun static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85*4882a593Smuzhiyun unsigned long addr)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun pud_t *pud;
88*4882a593Smuzhiyun pmd_t *pmd;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun pud = alloc_new_pud(mm, vma, addr);
91*4882a593Smuzhiyun if (!pud)
92*4882a593Smuzhiyun return NULL;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun pmd = pmd_alloc(mm, pud, addr);
95*4882a593Smuzhiyun if (!pmd)
96*4882a593Smuzhiyun return NULL;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun VM_BUG_ON(pmd_trans_huge(*pmd));
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return pmd;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
take_rmap_locks(struct vm_area_struct * vma)103*4882a593Smuzhiyun static void take_rmap_locks(struct vm_area_struct *vma)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun if (vma->vm_file)
106*4882a593Smuzhiyun i_mmap_lock_write(vma->vm_file->f_mapping);
107*4882a593Smuzhiyun if (vma->anon_vma)
108*4882a593Smuzhiyun anon_vma_lock_write(vma->anon_vma);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
drop_rmap_locks(struct vm_area_struct * vma)111*4882a593Smuzhiyun static void drop_rmap_locks(struct vm_area_struct *vma)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun if (vma->anon_vma)
114*4882a593Smuzhiyun anon_vma_unlock_write(vma->anon_vma);
115*4882a593Smuzhiyun if (vma->vm_file)
116*4882a593Smuzhiyun i_mmap_unlock_write(vma->vm_file->f_mapping);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
move_soft_dirty_pte(pte_t pte)119*4882a593Smuzhiyun static pte_t move_soft_dirty_pte(pte_t pte)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Set soft dirty bit so we can notice
123*4882a593Smuzhiyun * in userspace the ptes were moved.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
126*4882a593Smuzhiyun if (pte_present(pte))
127*4882a593Smuzhiyun pte = pte_mksoft_dirty(pte);
128*4882a593Smuzhiyun else if (is_swap_pte(pte))
129*4882a593Smuzhiyun pte = pte_swp_mksoft_dirty(pte);
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun return pte;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)134*4882a593Smuzhiyun static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135*4882a593Smuzhiyun unsigned long old_addr, unsigned long old_end,
136*4882a593Smuzhiyun struct vm_area_struct *new_vma, pmd_t *new_pmd,
137*4882a593Smuzhiyun unsigned long new_addr, bool need_rmap_locks)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
140*4882a593Smuzhiyun pte_t *old_pte, *new_pte, pte;
141*4882a593Smuzhiyun spinlock_t *old_ptl, *new_ptl;
142*4882a593Smuzhiyun bool force_flush = false;
143*4882a593Smuzhiyun unsigned long len = old_end - old_addr;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
147*4882a593Smuzhiyun * locks to ensure that rmap will always observe either the old or the
148*4882a593Smuzhiyun * new ptes. This is the easiest way to avoid races with
149*4882a593Smuzhiyun * truncate_pagecache(), page migration, etc...
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * When need_rmap_locks is false, we use other ways to avoid
152*4882a593Smuzhiyun * such races:
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * - During exec() shift_arg_pages(), we use a specially tagged vma
155*4882a593Smuzhiyun * which rmap call sites look for using vma_is_temporary_stack().
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * - During mremap(), new_vma is often known to be placed after vma
158*4882a593Smuzhiyun * in rmap traversal order. This ensures rmap will always observe
159*4882a593Smuzhiyun * either the old pte, or the new pte, or both (the page table locks
160*4882a593Smuzhiyun * serialize access to individual ptes, but only rmap traversal
161*4882a593Smuzhiyun * order guarantees that we won't miss both the old and new ptes).
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun if (need_rmap_locks)
164*4882a593Smuzhiyun take_rmap_locks(vma);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * We don't have to worry about the ordering of src and dst
168*4882a593Smuzhiyun * pte locks because exclusive mmap_lock prevents deadlock.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171*4882a593Smuzhiyun new_pte = pte_offset_map(new_pmd, new_addr);
172*4882a593Smuzhiyun new_ptl = pte_lockptr(mm, new_pmd);
173*4882a593Smuzhiyun if (new_ptl != old_ptl)
174*4882a593Smuzhiyun spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175*4882a593Smuzhiyun flush_tlb_batched_pending(vma->vm_mm);
176*4882a593Smuzhiyun arch_enter_lazy_mmu_mode();
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179*4882a593Smuzhiyun new_pte++, new_addr += PAGE_SIZE) {
180*4882a593Smuzhiyun if (pte_none(*old_pte))
181*4882a593Smuzhiyun continue;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun pte = ptep_get_and_clear(mm, old_addr, old_pte);
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * If we are remapping a valid PTE, make sure
186*4882a593Smuzhiyun * to flush TLB before we drop the PTL for the
187*4882a593Smuzhiyun * PTE.
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun * NOTE! Both old and new PTL matter: the old one
190*4882a593Smuzhiyun * for racing with page_mkclean(), the new one to
191*4882a593Smuzhiyun * make sure the physical page stays valid until
192*4882a593Smuzhiyun * the TLB entry for the old mapping has been
193*4882a593Smuzhiyun * flushed.
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun if (pte_present(pte))
196*4882a593Smuzhiyun force_flush = true;
197*4882a593Smuzhiyun pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198*4882a593Smuzhiyun pte = move_soft_dirty_pte(pte);
199*4882a593Smuzhiyun set_pte_at(mm, new_addr, new_pte, pte);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun arch_leave_lazy_mmu_mode();
203*4882a593Smuzhiyun if (force_flush)
204*4882a593Smuzhiyun flush_tlb_range(vma, old_end - len, old_end);
205*4882a593Smuzhiyun if (new_ptl != old_ptl)
206*4882a593Smuzhiyun spin_unlock(new_ptl);
207*4882a593Smuzhiyun pte_unmap(new_pte - 1);
208*4882a593Smuzhiyun pte_unmap_unlock(old_pte - 1, old_ptl);
209*4882a593Smuzhiyun if (need_rmap_locks)
210*4882a593Smuzhiyun drop_rmap_locks(vma);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
trylock_vma_ref_count(struct vm_area_struct * vma)214*4882a593Smuzhiyun static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * If we have the only reference, swap the refcount to -1. This
218*4882a593Smuzhiyun * will prevent other concurrent references by get_vma() for SPFs.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun * Restore the VMA reference count to 1 after a fast mremap.
225*4882a593Smuzhiyun */
unlock_vma_ref_count(struct vm_area_struct * vma)226*4882a593Smuzhiyun static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * This should only be called after a corresponding,
230*4882a593Smuzhiyun * successful trylock_vma_ref_count().
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1,
233*4882a593Smuzhiyun vma);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun #else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
trylock_vma_ref_count(struct vm_area_struct * vma)236*4882a593Smuzhiyun static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun return true;
239*4882a593Smuzhiyun }
unlock_vma_ref_count(struct vm_area_struct * vma)240*4882a593Smuzhiyun static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)246*4882a593Smuzhiyun static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
247*4882a593Smuzhiyun unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun spinlock_t *old_ptl, *new_ptl;
250*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
251*4882a593Smuzhiyun pmd_t pmd;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * The destination pmd shouldn't be established, free_pgtables()
255*4882a593Smuzhiyun * should have released it.
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * However, there's a case during execve() where we use mremap
258*4882a593Smuzhiyun * to move the initial stack, and in that case the target area
259*4882a593Smuzhiyun * may overlap the source area (always moving down).
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * If everything is PMD-aligned, that works fine, as moving
262*4882a593Smuzhiyun * each pmd down will clear the source pmd. But if we first
263*4882a593Smuzhiyun * have a few 4kB-only pages that get moved down, and then
264*4882a593Smuzhiyun * hit the "now the rest is PMD-aligned, let's do everything
265*4882a593Smuzhiyun * one pmd at a time", we will still have the old (now empty
266*4882a593Smuzhiyun * of any 4kB pages, but still there) PMD in the page table
267*4882a593Smuzhiyun * tree.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * Warn on it once - because we really should try to figure
270*4882a593Smuzhiyun * out how to do this better - but then say "I won't move
271*4882a593Smuzhiyun * this pmd".
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * One alternative might be to just unmap the target pmd at
274*4882a593Smuzhiyun * this point, and verify that it really is empty. We'll see.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
277*4882a593Smuzhiyun return false;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * We hold both exclusive mmap_lock and rmap_lock at this point and
281*4882a593Smuzhiyun * cannot block. If we cannot immediately take exclusive ownership
282*4882a593Smuzhiyun * of the VMA fallback to the move_ptes().
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun if (!trylock_vma_ref_count(vma))
285*4882a593Smuzhiyun return false;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * We don't have to worry about the ordering of src and dst
289*4882a593Smuzhiyun * ptlocks because exclusive mmap_lock prevents deadlock.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun old_ptl = pmd_lock(vma->vm_mm, old_pmd);
292*4882a593Smuzhiyun new_ptl = pmd_lockptr(mm, new_pmd);
293*4882a593Smuzhiyun if (new_ptl != old_ptl)
294*4882a593Smuzhiyun spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Clear the pmd */
297*4882a593Smuzhiyun pmd = *old_pmd;
298*4882a593Smuzhiyun pmd_clear(old_pmd);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun VM_BUG_ON(!pmd_none(*new_pmd));
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Set the new pmd */
303*4882a593Smuzhiyun set_pmd_at(mm, new_addr, new_pmd, pmd);
304*4882a593Smuzhiyun flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
305*4882a593Smuzhiyun if (new_ptl != old_ptl)
306*4882a593Smuzhiyun spin_unlock(new_ptl);
307*4882a593Smuzhiyun spin_unlock(old_ptl);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun unlock_vma_ref_count(vma);
310*4882a593Smuzhiyun return true;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)313*4882a593Smuzhiyun static inline bool move_normal_pmd(struct vm_area_struct *vma,
314*4882a593Smuzhiyun unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
315*4882a593Smuzhiyun pmd_t *new_pmd)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun return false;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MOVE_PUD
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)322*4882a593Smuzhiyun static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
323*4882a593Smuzhiyun unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun spinlock_t *old_ptl, *new_ptl;
326*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
327*4882a593Smuzhiyun pud_t pud;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * The destination pud shouldn't be established, free_pgtables()
331*4882a593Smuzhiyun * should have released it.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun if (WARN_ON_ONCE(!pud_none(*new_pud)))
334*4882a593Smuzhiyun return false;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * We hold both exclusive mmap_lock and rmap_lock at this point and
338*4882a593Smuzhiyun * cannot block. If we cannot immediately take exclusive ownership
339*4882a593Smuzhiyun * of the VMA fallback to the move_ptes().
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun if (!trylock_vma_ref_count(vma))
342*4882a593Smuzhiyun return false;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * We don't have to worry about the ordering of src and dst
346*4882a593Smuzhiyun * ptlocks because exclusive mmap_lock prevents deadlock.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun old_ptl = pud_lock(vma->vm_mm, old_pud);
349*4882a593Smuzhiyun new_ptl = pud_lockptr(mm, new_pud);
350*4882a593Smuzhiyun if (new_ptl != old_ptl)
351*4882a593Smuzhiyun spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Clear the pud */
354*4882a593Smuzhiyun pud = *old_pud;
355*4882a593Smuzhiyun pud_clear(old_pud);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun VM_BUG_ON(!pud_none(*new_pud));
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* Set the new pud */
360*4882a593Smuzhiyun set_pud_at(mm, new_addr, new_pud, pud);
361*4882a593Smuzhiyun flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
362*4882a593Smuzhiyun if (new_ptl != old_ptl)
363*4882a593Smuzhiyun spin_unlock(new_ptl);
364*4882a593Smuzhiyun spin_unlock(old_ptl);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun unlock_vma_ref_count(vma);
367*4882a593Smuzhiyun return true;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)370*4882a593Smuzhiyun static inline bool move_normal_pud(struct vm_area_struct *vma,
371*4882a593Smuzhiyun unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
372*4882a593Smuzhiyun pud_t *new_pud)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun return false;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun #endif
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun enum pgt_entry {
379*4882a593Smuzhiyun NORMAL_PMD,
380*4882a593Smuzhiyun HPAGE_PMD,
381*4882a593Smuzhiyun NORMAL_PUD,
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * Returns an extent of the corresponding size for the pgt_entry specified if
386*4882a593Smuzhiyun * valid. Else returns a smaller extent bounded by the end of the source and
387*4882a593Smuzhiyun * destination pgt_entry.
388*4882a593Smuzhiyun */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)389*4882a593Smuzhiyun static __always_inline unsigned long get_extent(enum pgt_entry entry,
390*4882a593Smuzhiyun unsigned long old_addr, unsigned long old_end,
391*4882a593Smuzhiyun unsigned long new_addr)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun unsigned long next, extent, mask, size;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun switch (entry) {
396*4882a593Smuzhiyun case HPAGE_PMD:
397*4882a593Smuzhiyun case NORMAL_PMD:
398*4882a593Smuzhiyun mask = PMD_MASK;
399*4882a593Smuzhiyun size = PMD_SIZE;
400*4882a593Smuzhiyun break;
401*4882a593Smuzhiyun case NORMAL_PUD:
402*4882a593Smuzhiyun mask = PUD_MASK;
403*4882a593Smuzhiyun size = PUD_SIZE;
404*4882a593Smuzhiyun break;
405*4882a593Smuzhiyun default:
406*4882a593Smuzhiyun BUILD_BUG();
407*4882a593Smuzhiyun break;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun next = (old_addr + size) & mask;
411*4882a593Smuzhiyun /* even if next overflowed, extent below will be ok */
412*4882a593Smuzhiyun extent = next - old_addr;
413*4882a593Smuzhiyun if (extent > old_end - old_addr)
414*4882a593Smuzhiyun extent = old_end - old_addr;
415*4882a593Smuzhiyun next = (new_addr + size) & mask;
416*4882a593Smuzhiyun if (extent > next - new_addr)
417*4882a593Smuzhiyun extent = next - new_addr;
418*4882a593Smuzhiyun return extent;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun * Attempts to speedup the move by moving entry at the level corresponding to
423*4882a593Smuzhiyun * pgt_entry. Returns true if the move was successful, else false.
424*4882a593Smuzhiyun */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)425*4882a593Smuzhiyun static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
426*4882a593Smuzhiyun unsigned long old_addr, unsigned long new_addr,
427*4882a593Smuzhiyun void *old_entry, void *new_entry, bool need_rmap_locks)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun bool moved = false;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* See comment in move_ptes() */
432*4882a593Smuzhiyun if (need_rmap_locks)
433*4882a593Smuzhiyun take_rmap_locks(vma);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun switch (entry) {
436*4882a593Smuzhiyun case NORMAL_PMD:
437*4882a593Smuzhiyun moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
438*4882a593Smuzhiyun new_entry);
439*4882a593Smuzhiyun break;
440*4882a593Smuzhiyun case NORMAL_PUD:
441*4882a593Smuzhiyun moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
442*4882a593Smuzhiyun new_entry);
443*4882a593Smuzhiyun break;
444*4882a593Smuzhiyun case HPAGE_PMD:
445*4882a593Smuzhiyun moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
446*4882a593Smuzhiyun move_huge_pmd(vma, old_addr, new_addr, old_entry,
447*4882a593Smuzhiyun new_entry);
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun default:
450*4882a593Smuzhiyun WARN_ON_ONCE(1);
451*4882a593Smuzhiyun break;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (need_rmap_locks)
455*4882a593Smuzhiyun drop_rmap_locks(vma);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return moved;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks)460*4882a593Smuzhiyun unsigned long move_page_tables(struct vm_area_struct *vma,
461*4882a593Smuzhiyun unsigned long old_addr, struct vm_area_struct *new_vma,
462*4882a593Smuzhiyun unsigned long new_addr, unsigned long len,
463*4882a593Smuzhiyun bool need_rmap_locks)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun unsigned long extent, old_end;
466*4882a593Smuzhiyun struct mmu_notifier_range range;
467*4882a593Smuzhiyun pmd_t *old_pmd, *new_pmd;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (!len)
470*4882a593Smuzhiyun return 0;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun old_end = old_addr + len;
473*4882a593Smuzhiyun flush_cache_range(vma, old_addr, old_end);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
476*4882a593Smuzhiyun old_addr, old_end);
477*4882a593Smuzhiyun mmu_notifier_invalidate_range_start(&range);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
480*4882a593Smuzhiyun cond_resched();
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * If extent is PUD-sized try to speed up the move by moving at the
483*4882a593Smuzhiyun * PUD level if possible.
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
486*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
487*4882a593Smuzhiyun pud_t *old_pud, *new_pud;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun old_pud = get_old_pud(vma->vm_mm, old_addr);
490*4882a593Smuzhiyun if (!old_pud)
491*4882a593Smuzhiyun continue;
492*4882a593Smuzhiyun new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
493*4882a593Smuzhiyun if (!new_pud)
494*4882a593Smuzhiyun break;
495*4882a593Smuzhiyun if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
496*4882a593Smuzhiyun old_pud, new_pud, true))
497*4882a593Smuzhiyun continue;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
501*4882a593Smuzhiyun old_pmd = get_old_pmd(vma->vm_mm, old_addr);
502*4882a593Smuzhiyun if (!old_pmd)
503*4882a593Smuzhiyun continue;
504*4882a593Smuzhiyun new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
505*4882a593Smuzhiyun if (!new_pmd)
506*4882a593Smuzhiyun break;
507*4882a593Smuzhiyun if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
508*4882a593Smuzhiyun pmd_devmap(*old_pmd)) {
509*4882a593Smuzhiyun if (extent == HPAGE_PMD_SIZE &&
510*4882a593Smuzhiyun move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
511*4882a593Smuzhiyun old_pmd, new_pmd, need_rmap_locks))
512*4882a593Smuzhiyun continue;
513*4882a593Smuzhiyun split_huge_pmd(vma, old_pmd, old_addr);
514*4882a593Smuzhiyun if (pmd_trans_unstable(old_pmd))
515*4882a593Smuzhiyun continue;
516*4882a593Smuzhiyun } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
517*4882a593Smuzhiyun extent == PMD_SIZE) {
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * If the extent is PMD-sized, try to speed the move by
520*4882a593Smuzhiyun * moving at the PMD level if possible.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
523*4882a593Smuzhiyun old_pmd, new_pmd, true))
524*4882a593Smuzhiyun continue;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (pte_alloc(new_vma->vm_mm, new_pmd))
528*4882a593Smuzhiyun break;
529*4882a593Smuzhiyun move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
530*4882a593Smuzhiyun new_pmd, new_addr, need_rmap_locks);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return len + old_addr - old_end; /* how much done */
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)538*4882a593Smuzhiyun static unsigned long move_vma(struct vm_area_struct *vma,
539*4882a593Smuzhiyun unsigned long old_addr, unsigned long old_len,
540*4882a593Smuzhiyun unsigned long new_len, unsigned long new_addr,
541*4882a593Smuzhiyun bool *locked, unsigned long flags,
542*4882a593Smuzhiyun struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
545*4882a593Smuzhiyun struct vm_area_struct *new_vma;
546*4882a593Smuzhiyun unsigned long vm_flags = vma->vm_flags;
547*4882a593Smuzhiyun unsigned long new_pgoff;
548*4882a593Smuzhiyun unsigned long moved_len;
549*4882a593Smuzhiyun unsigned long excess = 0;
550*4882a593Smuzhiyun unsigned long hiwater_vm;
551*4882a593Smuzhiyun int split = 0;
552*4882a593Smuzhiyun int err;
553*4882a593Smuzhiyun bool need_rmap_locks;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * We'd prefer to avoid failure later on in do_munmap:
557*4882a593Smuzhiyun * which may split one vma into three before unmapping.
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun if (mm->map_count >= sysctl_max_map_count - 3)
560*4882a593Smuzhiyun return -ENOMEM;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * Advise KSM to break any KSM pages in the area to be moved:
564*4882a593Smuzhiyun * it would be confusing if they were to turn up at the new
565*4882a593Smuzhiyun * location, where they happen to coincide with different KSM
566*4882a593Smuzhiyun * pages recently unmapped. But leave vma->vm_flags as it was,
567*4882a593Smuzhiyun * so KSM can come around to merge on vma and new_vma afterwards.
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun err = ksm_madvise(vma, old_addr, old_addr + old_len,
570*4882a593Smuzhiyun MADV_UNMERGEABLE, &vm_flags);
571*4882a593Smuzhiyun if (err)
572*4882a593Smuzhiyun return err;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
575*4882a593Smuzhiyun new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
576*4882a593Smuzhiyun &need_rmap_locks);
577*4882a593Smuzhiyun if (!new_vma)
578*4882a593Smuzhiyun return -ENOMEM;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* new_vma is returned protected by copy_vma, to prevent speculative
581*4882a593Smuzhiyun * page fault to be done in the destination area before we move the pte.
582*4882a593Smuzhiyun * Now, we must also protect the source VMA since we don't want pages
583*4882a593Smuzhiyun * to be mapped in our back while we are copying the PTEs.
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun if (vma != new_vma)
586*4882a593Smuzhiyun vm_write_begin(vma);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
589*4882a593Smuzhiyun need_rmap_locks);
590*4882a593Smuzhiyun if (moved_len < old_len) {
591*4882a593Smuzhiyun err = -ENOMEM;
592*4882a593Smuzhiyun } else if (vma->vm_ops && vma->vm_ops->mremap) {
593*4882a593Smuzhiyun err = vma->vm_ops->mremap(new_vma);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (unlikely(err)) {
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * On error, move entries back from new area to old,
599*4882a593Smuzhiyun * which will succeed since page tables still there,
600*4882a593Smuzhiyun * and then proceed to unmap new area instead of old.
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
603*4882a593Smuzhiyun true);
604*4882a593Smuzhiyun if (vma != new_vma)
605*4882a593Smuzhiyun vm_write_end(vma);
606*4882a593Smuzhiyun vma = new_vma;
607*4882a593Smuzhiyun old_len = new_len;
608*4882a593Smuzhiyun old_addr = new_addr;
609*4882a593Smuzhiyun new_addr = err;
610*4882a593Smuzhiyun } else {
611*4882a593Smuzhiyun mremap_userfaultfd_prep(new_vma, uf);
612*4882a593Smuzhiyun arch_remap(mm, old_addr, old_addr + old_len,
613*4882a593Smuzhiyun new_addr, new_addr + new_len);
614*4882a593Smuzhiyun if (vma != new_vma)
615*4882a593Smuzhiyun vm_write_end(vma);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun vm_write_end(new_vma);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /* Conceal VM_ACCOUNT so old reservation is not undone */
620*4882a593Smuzhiyun if (vm_flags & VM_ACCOUNT) {
621*4882a593Smuzhiyun vma->vm_flags &= ~VM_ACCOUNT;
622*4882a593Smuzhiyun excess = vma->vm_end - vma->vm_start - old_len;
623*4882a593Smuzhiyun if (old_addr > vma->vm_start &&
624*4882a593Smuzhiyun old_addr + old_len < vma->vm_end)
625*4882a593Smuzhiyun split = 1;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * If we failed to move page tables we still do total_vm increment
630*4882a593Smuzhiyun * since do_munmap() will decrement it by old_len == new_len.
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Since total_vm is about to be raised artificially high for a
633*4882a593Smuzhiyun * moment, we need to restore high watermark afterwards: if stats
634*4882a593Smuzhiyun * are taken meanwhile, total_vm and hiwater_vm appear too high.
635*4882a593Smuzhiyun * If this were a serious issue, we'd add a flag to do_munmap().
636*4882a593Smuzhiyun */
637*4882a593Smuzhiyun hiwater_vm = mm->hiwater_vm;
638*4882a593Smuzhiyun vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* Tell pfnmap has moved from this vma */
641*4882a593Smuzhiyun if (unlikely(vma->vm_flags & VM_PFNMAP))
642*4882a593Smuzhiyun untrack_pfn_moved(vma);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
645*4882a593Smuzhiyun if (vm_flags & VM_ACCOUNT) {
646*4882a593Smuzhiyun /* Always put back VM_ACCOUNT since we won't unmap */
647*4882a593Smuzhiyun vma->vm_flags |= VM_ACCOUNT;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun vm_acct_memory(new_len >> PAGE_SHIFT);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /*
653*4882a593Smuzhiyun * VMAs can actually be merged back together in copy_vma
654*4882a593Smuzhiyun * calling merge_vma. This can happen with anonymous vmas
655*4882a593Smuzhiyun * which have not yet been faulted, so if we were to consider
656*4882a593Smuzhiyun * this VMA split we'll end up adding VM_ACCOUNT on the
657*4882a593Smuzhiyun * next VMA, which is completely unrelated if this VMA
658*4882a593Smuzhiyun * was re-merged.
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun if (split && new_vma == vma)
661*4882a593Smuzhiyun split = 0;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* We always clear VM_LOCKED[ONFAULT] on the old vma */
664*4882a593Smuzhiyun vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* Because we won't unmap we don't need to touch locked_vm */
667*4882a593Smuzhiyun goto out;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
671*4882a593Smuzhiyun /* OOM: unable to split vma, just get accounts right */
672*4882a593Smuzhiyun vm_unacct_memory(excess >> PAGE_SHIFT);
673*4882a593Smuzhiyun excess = 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (vm_flags & VM_LOCKED) {
677*4882a593Smuzhiyun mm->locked_vm += new_len >> PAGE_SHIFT;
678*4882a593Smuzhiyun *locked = true;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun out:
681*4882a593Smuzhiyun mm->hiwater_vm = hiwater_vm;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Restore VM_ACCOUNT if one or two pieces of vma left */
684*4882a593Smuzhiyun if (excess) {
685*4882a593Smuzhiyun vma->vm_flags |= VM_ACCOUNT;
686*4882a593Smuzhiyun if (split)
687*4882a593Smuzhiyun vma->vm_next->vm_flags |= VM_ACCOUNT;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return new_addr;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
vma_to_resize(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags,unsigned long * p)693*4882a593Smuzhiyun static struct vm_area_struct *vma_to_resize(unsigned long addr,
694*4882a593Smuzhiyun unsigned long old_len, unsigned long new_len, unsigned long flags,
695*4882a593Smuzhiyun unsigned long *p)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
698*4882a593Smuzhiyun struct vm_area_struct *vma = find_vma(mm, addr);
699*4882a593Smuzhiyun unsigned long pgoff;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (!vma || vma->vm_start > addr)
702*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * !old_len is a special case where an attempt is made to 'duplicate'
706*4882a593Smuzhiyun * a mapping. This makes no sense for private mappings as it will
707*4882a593Smuzhiyun * instead create a fresh/new mapping unrelated to the original. This
708*4882a593Smuzhiyun * is contrary to the basic idea of mremap which creates new mappings
709*4882a593Smuzhiyun * based on the original. There are no known use cases for this
710*4882a593Smuzhiyun * behavior. As a result, fail such attempts.
711*4882a593Smuzhiyun */
712*4882a593Smuzhiyun if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
713*4882a593Smuzhiyun pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
714*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if ((flags & MREMAP_DONTUNMAP) &&
718*4882a593Smuzhiyun (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
719*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (is_vm_hugetlb_page(vma))
722*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* We can't remap across vm area boundaries */
725*4882a593Smuzhiyun if (old_len > vma->vm_end - addr)
726*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (new_len == old_len)
729*4882a593Smuzhiyun return vma;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* Need to be careful about a growing mapping */
732*4882a593Smuzhiyun pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
733*4882a593Smuzhiyun pgoff += vma->vm_pgoff;
734*4882a593Smuzhiyun if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
735*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
738*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (vma->vm_flags & VM_LOCKED) {
741*4882a593Smuzhiyun unsigned long locked, lock_limit;
742*4882a593Smuzhiyun locked = mm->locked_vm << PAGE_SHIFT;
743*4882a593Smuzhiyun lock_limit = rlimit(RLIMIT_MEMLOCK);
744*4882a593Smuzhiyun locked += new_len - old_len;
745*4882a593Smuzhiyun if (locked > lock_limit && !capable(CAP_IPC_LOCK))
746*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (!may_expand_vm(mm, vma->vm_flags,
750*4882a593Smuzhiyun (new_len - old_len) >> PAGE_SHIFT))
751*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (vma->vm_flags & VM_ACCOUNT) {
754*4882a593Smuzhiyun unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
755*4882a593Smuzhiyun if (security_vm_enough_memory_mm(mm, charged))
756*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
757*4882a593Smuzhiyun *p = charged;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return vma;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)763*4882a593Smuzhiyun static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
764*4882a593Smuzhiyun unsigned long new_addr, unsigned long new_len, bool *locked,
765*4882a593Smuzhiyun unsigned long flags, struct vm_userfaultfd_ctx *uf,
766*4882a593Smuzhiyun struct list_head *uf_unmap_early,
767*4882a593Smuzhiyun struct list_head *uf_unmap)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
770*4882a593Smuzhiyun struct vm_area_struct *vma;
771*4882a593Smuzhiyun unsigned long ret = -EINVAL;
772*4882a593Smuzhiyun unsigned long charged = 0;
773*4882a593Smuzhiyun unsigned long map_flags = 0;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (offset_in_page(new_addr))
776*4882a593Smuzhiyun goto out;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
779*4882a593Smuzhiyun goto out;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* Ensure the old/new locations do not overlap */
782*4882a593Smuzhiyun if (addr + old_len > new_addr && new_addr + new_len > addr)
783*4882a593Smuzhiyun goto out;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun * move_vma() need us to stay 4 maps below the threshold, otherwise
787*4882a593Smuzhiyun * it will bail out at the very beginning.
788*4882a593Smuzhiyun * That is a problem if we have already unmaped the regions here
789*4882a593Smuzhiyun * (new_addr, and old_addr), because userspace will not know the
790*4882a593Smuzhiyun * state of the vma's after it gets -ENOMEM.
791*4882a593Smuzhiyun * So, to avoid such scenario we can pre-compute if the whole
792*4882a593Smuzhiyun * operation has high chances to success map-wise.
793*4882a593Smuzhiyun * Worst-scenario case is when both vma's (new_addr and old_addr) get
794*4882a593Smuzhiyun * split in 3 before unmaping it.
795*4882a593Smuzhiyun * That means 2 more maps (1 for each) to the ones we already hold.
796*4882a593Smuzhiyun * Check whether current map count plus 2 still leads us to 4 maps below
797*4882a593Smuzhiyun * the threshold, otherwise return -ENOMEM here to be more safe.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
800*4882a593Smuzhiyun return -ENOMEM;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (flags & MREMAP_FIXED) {
803*4882a593Smuzhiyun ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
804*4882a593Smuzhiyun if (ret)
805*4882a593Smuzhiyun goto out;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (old_len >= new_len) {
809*4882a593Smuzhiyun ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
810*4882a593Smuzhiyun if (ret && old_len != new_len)
811*4882a593Smuzhiyun goto out;
812*4882a593Smuzhiyun old_len = new_len;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
816*4882a593Smuzhiyun if (IS_ERR(vma)) {
817*4882a593Smuzhiyun ret = PTR_ERR(vma);
818*4882a593Smuzhiyun goto out;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
822*4882a593Smuzhiyun if (flags & MREMAP_DONTUNMAP &&
823*4882a593Smuzhiyun !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
824*4882a593Smuzhiyun ret = -ENOMEM;
825*4882a593Smuzhiyun goto out;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (flags & MREMAP_FIXED)
829*4882a593Smuzhiyun map_flags |= MAP_FIXED;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE)
832*4882a593Smuzhiyun map_flags |= MAP_SHARED;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
835*4882a593Smuzhiyun ((addr - vma->vm_start) >> PAGE_SHIFT),
836*4882a593Smuzhiyun map_flags);
837*4882a593Smuzhiyun if (IS_ERR_VALUE(ret))
838*4882a593Smuzhiyun goto out1;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* We got a new mapping */
841*4882a593Smuzhiyun if (!(flags & MREMAP_FIXED))
842*4882a593Smuzhiyun new_addr = ret;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
845*4882a593Smuzhiyun uf_unmap);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (!(offset_in_page(ret)))
848*4882a593Smuzhiyun goto out;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun out1:
851*4882a593Smuzhiyun vm_unacct_memory(charged);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun out:
854*4882a593Smuzhiyun return ret;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
vma_expandable(struct vm_area_struct * vma,unsigned long delta)857*4882a593Smuzhiyun static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun unsigned long end = vma->vm_end + delta;
860*4882a593Smuzhiyun if (end < vma->vm_end) /* overflow */
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
863*4882a593Smuzhiyun return 0;
864*4882a593Smuzhiyun if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
865*4882a593Smuzhiyun 0, MAP_FIXED) & ~PAGE_MASK)
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun return 1;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /*
871*4882a593Smuzhiyun * Expand (or shrink) an existing mapping, potentially moving it at the
872*4882a593Smuzhiyun * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
873*4882a593Smuzhiyun *
874*4882a593Smuzhiyun * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
875*4882a593Smuzhiyun * This option implies MREMAP_MAYMOVE.
876*4882a593Smuzhiyun */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)877*4882a593Smuzhiyun SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
878*4882a593Smuzhiyun unsigned long, new_len, unsigned long, flags,
879*4882a593Smuzhiyun unsigned long, new_addr)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
882*4882a593Smuzhiyun struct vm_area_struct *vma;
883*4882a593Smuzhiyun unsigned long ret = -EINVAL;
884*4882a593Smuzhiyun unsigned long charged = 0;
885*4882a593Smuzhiyun bool locked = false;
886*4882a593Smuzhiyun bool downgraded = false;
887*4882a593Smuzhiyun struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
888*4882a593Smuzhiyun LIST_HEAD(uf_unmap_early);
889*4882a593Smuzhiyun LIST_HEAD(uf_unmap);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /*
892*4882a593Smuzhiyun * There is a deliberate asymmetry here: we strip the pointer tag
893*4882a593Smuzhiyun * from the old address but leave the new address alone. This is
894*4882a593Smuzhiyun * for consistency with mmap(), where we prevent the creation of
895*4882a593Smuzhiyun * aliasing mappings in userspace by leaving the tag bits of the
896*4882a593Smuzhiyun * mapping address intact. A non-zero tag will cause the subsequent
897*4882a593Smuzhiyun * range checks to reject the address as invalid.
898*4882a593Smuzhiyun *
899*4882a593Smuzhiyun * See Documentation/arm64/tagged-address-abi.rst for more information.
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun addr = untagged_addr(addr);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
904*4882a593Smuzhiyun return ret;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
907*4882a593Smuzhiyun return ret;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /*
910*4882a593Smuzhiyun * MREMAP_DONTUNMAP is always a move and it does not allow resizing
911*4882a593Smuzhiyun * in the process.
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun if (flags & MREMAP_DONTUNMAP &&
914*4882a593Smuzhiyun (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
915*4882a593Smuzhiyun return ret;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun if (offset_in_page(addr))
919*4882a593Smuzhiyun return ret;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun old_len = PAGE_ALIGN(old_len);
922*4882a593Smuzhiyun new_len = PAGE_ALIGN(new_len);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /*
925*4882a593Smuzhiyun * We allow a zero old-len as a special case
926*4882a593Smuzhiyun * for DOS-emu "duplicate shm area" thing. But
927*4882a593Smuzhiyun * a zero new-len is nonsensical.
928*4882a593Smuzhiyun */
929*4882a593Smuzhiyun if (!new_len)
930*4882a593Smuzhiyun return ret;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (mmap_write_lock_killable(current->mm))
933*4882a593Smuzhiyun return -EINTR;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
936*4882a593Smuzhiyun ret = mremap_to(addr, old_len, new_addr, new_len,
937*4882a593Smuzhiyun &locked, flags, &uf, &uf_unmap_early,
938*4882a593Smuzhiyun &uf_unmap);
939*4882a593Smuzhiyun goto out;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /*
943*4882a593Smuzhiyun * Always allow a shrinking remap: that just unmaps
944*4882a593Smuzhiyun * the unnecessary pages..
945*4882a593Smuzhiyun * __do_munmap does all the needed commit accounting, and
946*4882a593Smuzhiyun * downgrades mmap_lock to read if so directed.
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun if (old_len >= new_len) {
949*4882a593Smuzhiyun int retval;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun retval = __do_munmap(mm, addr+new_len, old_len - new_len,
952*4882a593Smuzhiyun &uf_unmap, true);
953*4882a593Smuzhiyun if (retval < 0 && old_len != new_len) {
954*4882a593Smuzhiyun ret = retval;
955*4882a593Smuzhiyun goto out;
956*4882a593Smuzhiyun /* Returning 1 indicates mmap_lock is downgraded to read. */
957*4882a593Smuzhiyun } else if (retval == 1)
958*4882a593Smuzhiyun downgraded = true;
959*4882a593Smuzhiyun ret = addr;
960*4882a593Smuzhiyun goto out;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /*
964*4882a593Smuzhiyun * Ok, we need to grow..
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
967*4882a593Smuzhiyun if (IS_ERR(vma)) {
968*4882a593Smuzhiyun ret = PTR_ERR(vma);
969*4882a593Smuzhiyun goto out;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* old_len exactly to the end of the area..
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun if (old_len == vma->vm_end - addr) {
975*4882a593Smuzhiyun /* can we just expand the current mapping? */
976*4882a593Smuzhiyun if (vma_expandable(vma, new_len - old_len)) {
977*4882a593Smuzhiyun int pages = (new_len - old_len) >> PAGE_SHIFT;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun if (vma_adjust(vma, vma->vm_start, addr + new_len,
980*4882a593Smuzhiyun vma->vm_pgoff, NULL)) {
981*4882a593Smuzhiyun ret = -ENOMEM;
982*4882a593Smuzhiyun goto out;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun vm_stat_account(mm, vma->vm_flags, pages);
986*4882a593Smuzhiyun if (vma->vm_flags & VM_LOCKED) {
987*4882a593Smuzhiyun mm->locked_vm += pages;
988*4882a593Smuzhiyun locked = true;
989*4882a593Smuzhiyun new_addr = addr;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun ret = addr;
992*4882a593Smuzhiyun goto out;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * We weren't able to just expand or shrink the area,
998*4882a593Smuzhiyun * we need to create a new one and move it..
999*4882a593Smuzhiyun */
1000*4882a593Smuzhiyun ret = -ENOMEM;
1001*4882a593Smuzhiyun if (flags & MREMAP_MAYMOVE) {
1002*4882a593Smuzhiyun unsigned long map_flags = 0;
1003*4882a593Smuzhiyun if (vma->vm_flags & VM_MAYSHARE)
1004*4882a593Smuzhiyun map_flags |= MAP_SHARED;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1007*4882a593Smuzhiyun vma->vm_pgoff +
1008*4882a593Smuzhiyun ((addr - vma->vm_start) >> PAGE_SHIFT),
1009*4882a593Smuzhiyun map_flags);
1010*4882a593Smuzhiyun if (IS_ERR_VALUE(new_addr)) {
1011*4882a593Smuzhiyun ret = new_addr;
1012*4882a593Smuzhiyun goto out;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun ret = move_vma(vma, addr, old_len, new_len, new_addr,
1016*4882a593Smuzhiyun &locked, flags, &uf, &uf_unmap);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun out:
1019*4882a593Smuzhiyun if (offset_in_page(ret)) {
1020*4882a593Smuzhiyun vm_unacct_memory(charged);
1021*4882a593Smuzhiyun locked = false;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun if (downgraded)
1024*4882a593Smuzhiyun mmap_read_unlock(current->mm);
1025*4882a593Smuzhiyun else
1026*4882a593Smuzhiyun mmap_write_unlock(current->mm);
1027*4882a593Smuzhiyun if (locked && new_len > old_len)
1028*4882a593Smuzhiyun mm_populate(new_addr + old_len, new_len - old_len);
1029*4882a593Smuzhiyun userfaultfd_unmap_complete(mm, &uf_unmap_early);
1030*4882a593Smuzhiyun mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1031*4882a593Smuzhiyun userfaultfd_unmap_complete(mm, &uf_unmap);
1032*4882a593Smuzhiyun return ret;
1033*4882a593Smuzhiyun }
1034