1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <linux/mm.h>
3*4882a593Smuzhiyun #include <linux/slab.h>
4*4882a593Smuzhiyun #include <linux/string.h>
5*4882a593Smuzhiyun #include <linux/compiler.h>
6*4882a593Smuzhiyun #include <linux/export.h>
7*4882a593Smuzhiyun #include <linux/err.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/sched/mm.h>
10*4882a593Smuzhiyun #include <linux/sched/signal.h>
11*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
12*4882a593Smuzhiyun #include <linux/security.h>
13*4882a593Smuzhiyun #include <linux/swap.h>
14*4882a593Smuzhiyun #include <linux/swapops.h>
15*4882a593Smuzhiyun #include <linux/mman.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
19*4882a593Smuzhiyun #include <linux/elf.h>
20*4882a593Smuzhiyun #include <linux/elf-randomize.h>
21*4882a593Smuzhiyun #include <linux/personality.h>
22*4882a593Smuzhiyun #include <linux/random.h>
23*4882a593Smuzhiyun #include <linux/processor.h>
24*4882a593Smuzhiyun #include <linux/sizes.h>
25*4882a593Smuzhiyun #include <linux/compat.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <linux/uaccess.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "internal.h"
30*4882a593Smuzhiyun #ifndef __GENKSYMS__
31*4882a593Smuzhiyun #include <trace/hooks/syscall_check.h>
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /**
35*4882a593Smuzhiyun * kfree_const - conditionally free memory
36*4882a593Smuzhiyun * @x: pointer to the memory
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Function calls kfree only if @x is not in .rodata section.
39*4882a593Smuzhiyun */
kfree_const(const void * x)40*4882a593Smuzhiyun void kfree_const(const void *x)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (!is_kernel_rodata((unsigned long)x))
43*4882a593Smuzhiyun kfree(x);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun EXPORT_SYMBOL(kfree_const);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * kstrdup - allocate space for and copy an existing string
49*4882a593Smuzhiyun * @s: the string to duplicate
50*4882a593Smuzhiyun * @gfp: the GFP mask used in the kmalloc() call when allocating memory
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Return: newly allocated copy of @s or %NULL in case of error
53*4882a593Smuzhiyun */
kstrdup(const char * s,gfp_t gfp)54*4882a593Smuzhiyun char *kstrdup(const char *s, gfp_t gfp)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun size_t len;
57*4882a593Smuzhiyun char *buf;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (!s)
60*4882a593Smuzhiyun return NULL;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun len = strlen(s) + 1;
63*4882a593Smuzhiyun buf = kmalloc_track_caller(len, gfp);
64*4882a593Smuzhiyun if (buf)
65*4882a593Smuzhiyun memcpy(buf, s, len);
66*4882a593Smuzhiyun return buf;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun EXPORT_SYMBOL(kstrdup);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun * kstrdup_const - conditionally duplicate an existing const string
72*4882a593Smuzhiyun * @s: the string to duplicate
73*4882a593Smuzhiyun * @gfp: the GFP mask used in the kmalloc() call when allocating memory
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
76*4882a593Smuzhiyun * must not be passed to krealloc().
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Return: source string if it is in .rodata section otherwise
79*4882a593Smuzhiyun * fallback to kstrdup.
80*4882a593Smuzhiyun */
kstrdup_const(const char * s,gfp_t gfp)81*4882a593Smuzhiyun const char *kstrdup_const(const char *s, gfp_t gfp)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun if (is_kernel_rodata((unsigned long)s))
84*4882a593Smuzhiyun return s;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return kstrdup(s, gfp);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun EXPORT_SYMBOL(kstrdup_const);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * kstrndup - allocate space for and copy an existing string
92*4882a593Smuzhiyun * @s: the string to duplicate
93*4882a593Smuzhiyun * @max: read at most @max chars from @s
94*4882a593Smuzhiyun * @gfp: the GFP mask used in the kmalloc() call when allocating memory
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Note: Use kmemdup_nul() instead if the size is known exactly.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Return: newly allocated copy of @s or %NULL in case of error
99*4882a593Smuzhiyun */
kstrndup(const char * s,size_t max,gfp_t gfp)100*4882a593Smuzhiyun char *kstrndup(const char *s, size_t max, gfp_t gfp)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun size_t len;
103*4882a593Smuzhiyun char *buf;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (!s)
106*4882a593Smuzhiyun return NULL;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun len = strnlen(s, max);
109*4882a593Smuzhiyun buf = kmalloc_track_caller(len+1, gfp);
110*4882a593Smuzhiyun if (buf) {
111*4882a593Smuzhiyun memcpy(buf, s, len);
112*4882a593Smuzhiyun buf[len] = '\0';
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun return buf;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL(kstrndup);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * kmemdup - duplicate region of memory
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun * @src: memory region to duplicate
122*4882a593Smuzhiyun * @len: memory region length
123*4882a593Smuzhiyun * @gfp: GFP mask to use
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * Return: newly allocated copy of @src or %NULL in case of error
126*4882a593Smuzhiyun */
kmemdup(const void * src,size_t len,gfp_t gfp)127*4882a593Smuzhiyun void *kmemdup(const void *src, size_t len, gfp_t gfp)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun void *p;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun p = kmalloc_track_caller(len, gfp);
132*4882a593Smuzhiyun if (p)
133*4882a593Smuzhiyun memcpy(p, src, len);
134*4882a593Smuzhiyun return p;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun EXPORT_SYMBOL(kmemdup);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * kmemdup_nul - Create a NUL-terminated string from unterminated data
140*4882a593Smuzhiyun * @s: The data to stringify
141*4882a593Smuzhiyun * @len: The size of the data
142*4882a593Smuzhiyun * @gfp: the GFP mask used in the kmalloc() call when allocating memory
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Return: newly allocated copy of @s with NUL-termination or %NULL in
145*4882a593Smuzhiyun * case of error
146*4882a593Smuzhiyun */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)147*4882a593Smuzhiyun char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun char *buf;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (!s)
152*4882a593Smuzhiyun return NULL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun buf = kmalloc_track_caller(len + 1, gfp);
155*4882a593Smuzhiyun if (buf) {
156*4882a593Smuzhiyun memcpy(buf, s, len);
157*4882a593Smuzhiyun buf[len] = '\0';
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun return buf;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EXPORT_SYMBOL(kmemdup_nul);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun * memdup_user - duplicate memory region from user space
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * @src: source address in user space
167*4882a593Smuzhiyun * @len: number of bytes to copy
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Return: an ERR_PTR() on failure. Result is physically
170*4882a593Smuzhiyun * contiguous, to be freed by kfree().
171*4882a593Smuzhiyun */
memdup_user(const void __user * src,size_t len)172*4882a593Smuzhiyun void *memdup_user(const void __user *src, size_t len)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun void *p;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
177*4882a593Smuzhiyun if (!p)
178*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (copy_from_user(p, src, len)) {
181*4882a593Smuzhiyun kfree(p);
182*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return p;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun EXPORT_SYMBOL(memdup_user);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * vmemdup_user - duplicate memory region from user space
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * @src: source address in user space
193*4882a593Smuzhiyun * @len: number of bytes to copy
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Return: an ERR_PTR() on failure. Result may be not
196*4882a593Smuzhiyun * physically contiguous. Use kvfree() to free.
197*4882a593Smuzhiyun */
vmemdup_user(const void __user * src,size_t len)198*4882a593Smuzhiyun void *vmemdup_user(const void __user *src, size_t len)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun void *p;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun p = kvmalloc(len, GFP_USER);
203*4882a593Smuzhiyun if (!p)
204*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (copy_from_user(p, src, len)) {
207*4882a593Smuzhiyun kvfree(p);
208*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return p;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun EXPORT_SYMBOL(vmemdup_user);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun * strndup_user - duplicate an existing string from user space
217*4882a593Smuzhiyun * @s: The string to duplicate
218*4882a593Smuzhiyun * @n: Maximum number of bytes to copy, including the trailing NUL.
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * Return: newly allocated copy of @s or an ERR_PTR() in case of error
221*4882a593Smuzhiyun */
strndup_user(const char __user * s,long n)222*4882a593Smuzhiyun char *strndup_user(const char __user *s, long n)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun char *p;
225*4882a593Smuzhiyun long length;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun length = strnlen_user(s, n);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (!length)
230*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (length > n)
233*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun p = memdup_user(s, length);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (IS_ERR(p))
238*4882a593Smuzhiyun return p;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun p[length - 1] = '\0';
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return p;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL(strndup_user);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun * memdup_user_nul - duplicate memory region from user space and NUL-terminate
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * @src: source address in user space
250*4882a593Smuzhiyun * @len: number of bytes to copy
251*4882a593Smuzhiyun *
252*4882a593Smuzhiyun * Return: an ERR_PTR() on failure.
253*4882a593Smuzhiyun */
memdup_user_nul(const void __user * src,size_t len)254*4882a593Smuzhiyun void *memdup_user_nul(const void __user *src, size_t len)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun char *p;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * Always use GFP_KERNEL, since copy_from_user() can sleep and
260*4882a593Smuzhiyun * cause pagefault, which makes it pointless to use GFP_NOFS
261*4882a593Smuzhiyun * or GFP_ATOMIC.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun p = kmalloc_track_caller(len + 1, GFP_KERNEL);
264*4882a593Smuzhiyun if (!p)
265*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (copy_from_user(p, src, len)) {
268*4882a593Smuzhiyun kfree(p);
269*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun p[len] = '\0';
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return p;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun EXPORT_SYMBOL(memdup_user_nul);
276*4882a593Smuzhiyun
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev)277*4882a593Smuzhiyun void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
278*4882a593Smuzhiyun struct vm_area_struct *prev)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct vm_area_struct *next;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun vma->vm_prev = prev;
283*4882a593Smuzhiyun if (prev) {
284*4882a593Smuzhiyun next = prev->vm_next;
285*4882a593Smuzhiyun prev->vm_next = vma;
286*4882a593Smuzhiyun } else {
287*4882a593Smuzhiyun next = mm->mmap;
288*4882a593Smuzhiyun mm->mmap = vma;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun vma->vm_next = next;
291*4882a593Smuzhiyun if (next)
292*4882a593Smuzhiyun next->vm_prev = vma;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
__vma_unlink_list(struct mm_struct * mm,struct vm_area_struct * vma)295*4882a593Smuzhiyun void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct vm_area_struct *prev, *next;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun next = vma->vm_next;
300*4882a593Smuzhiyun prev = vma->vm_prev;
301*4882a593Smuzhiyun if (prev)
302*4882a593Smuzhiyun prev->vm_next = next;
303*4882a593Smuzhiyun else
304*4882a593Smuzhiyun mm->mmap = next;
305*4882a593Smuzhiyun if (next)
306*4882a593Smuzhiyun next->vm_prev = prev;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)310*4882a593Smuzhiyun int vma_is_stack_for_current(struct vm_area_struct *vma)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct task_struct * __maybe_unused t = current;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #ifndef STACK_RND_MASK
318*4882a593Smuzhiyun #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun
randomize_stack_top(unsigned long stack_top)321*4882a593Smuzhiyun unsigned long randomize_stack_top(unsigned long stack_top)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun unsigned long random_variable = 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (current->flags & PF_RANDOMIZE) {
326*4882a593Smuzhiyun random_variable = get_random_long();
327*4882a593Smuzhiyun random_variable &= STACK_RND_MASK;
328*4882a593Smuzhiyun random_variable <<= PAGE_SHIFT;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun #ifdef CONFIG_STACK_GROWSUP
331*4882a593Smuzhiyun return PAGE_ALIGN(stack_top) + random_variable;
332*4882a593Smuzhiyun #else
333*4882a593Smuzhiyun return PAGE_ALIGN(stack_top) - random_variable;
334*4882a593Smuzhiyun #endif
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * randomize_page - Generate a random, page aligned address
339*4882a593Smuzhiyun * @start: The smallest acceptable address the caller will take.
340*4882a593Smuzhiyun * @range: The size of the area, starting at @start, within which the
341*4882a593Smuzhiyun * random address must fall.
342*4882a593Smuzhiyun *
343*4882a593Smuzhiyun * If @start + @range would overflow, @range is capped.
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * NOTE: Historical use of randomize_range, which this replaces, presumed that
346*4882a593Smuzhiyun * @start was already page aligned. We now align it regardless.
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * Return: A page aligned address within [start, start + range). On error,
349*4882a593Smuzhiyun * @start is returned.
350*4882a593Smuzhiyun */
randomize_page(unsigned long start,unsigned long range)351*4882a593Smuzhiyun unsigned long randomize_page(unsigned long start, unsigned long range)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun if (!PAGE_ALIGNED(start)) {
354*4882a593Smuzhiyun range -= PAGE_ALIGN(start) - start;
355*4882a593Smuzhiyun start = PAGE_ALIGN(start);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (start > ULONG_MAX - range)
359*4882a593Smuzhiyun range = ULONG_MAX - start;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun range >>= PAGE_SHIFT;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (range == 0)
364*4882a593Smuzhiyun return start;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return start + (get_random_long() % range << PAGE_SHIFT);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)370*4882a593Smuzhiyun unsigned long arch_randomize_brk(struct mm_struct *mm)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun /* Is the current task 32bit ? */
373*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
374*4882a593Smuzhiyun return randomize_page(mm->brk, SZ_32M);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return randomize_page(mm->brk, SZ_1G);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
arch_mmap_rnd(void)379*4882a593Smuzhiyun unsigned long arch_mmap_rnd(void)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun unsigned long rnd;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
384*4882a593Smuzhiyun if (is_compat_task())
385*4882a593Smuzhiyun rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
386*4882a593Smuzhiyun else
387*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
388*4882a593Smuzhiyun rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return rnd << PAGE_SHIFT;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arch_mmap_rnd);
393*4882a593Smuzhiyun
mmap_is_legacy(struct rlimit * rlim_stack)394*4882a593Smuzhiyun static int mmap_is_legacy(struct rlimit *rlim_stack)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun if (current->personality & ADDR_COMPAT_LAYOUT)
397*4882a593Smuzhiyun return 1;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (rlim_stack->rlim_cur == RLIM_INFINITY)
400*4882a593Smuzhiyun return 1;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return sysctl_legacy_va_layout;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun * Leave enough space between the mmap area and the stack to honour ulimit in
407*4882a593Smuzhiyun * the face of randomisation.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun #define MIN_GAP (SZ_128M)
410*4882a593Smuzhiyun #define MAX_GAP (STACK_TOP / 6 * 5)
411*4882a593Smuzhiyun
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)412*4882a593Smuzhiyun static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun unsigned long gap = rlim_stack->rlim_cur;
415*4882a593Smuzhiyun unsigned long pad = stack_guard_gap;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* Account for stack randomization if necessary */
418*4882a593Smuzhiyun if (current->flags & PF_RANDOMIZE)
419*4882a593Smuzhiyun pad += (STACK_RND_MASK << PAGE_SHIFT);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Values close to RLIM_INFINITY can overflow. */
422*4882a593Smuzhiyun if (gap + pad > gap)
423*4882a593Smuzhiyun gap += pad;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (gap < MIN_GAP)
426*4882a593Smuzhiyun gap = MIN_GAP;
427*4882a593Smuzhiyun else if (gap > MAX_GAP)
428*4882a593Smuzhiyun gap = MAX_GAP;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun return PAGE_ALIGN(STACK_TOP - gap - rnd);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)433*4882a593Smuzhiyun void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun unsigned long random_factor = 0UL;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (current->flags & PF_RANDOMIZE)
438*4882a593Smuzhiyun random_factor = arch_mmap_rnd();
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (mmap_is_legacy(rlim_stack)) {
441*4882a593Smuzhiyun mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
442*4882a593Smuzhiyun mm->get_unmapped_area = arch_get_unmapped_area;
443*4882a593Smuzhiyun } else {
444*4882a593Smuzhiyun mm->mmap_base = mmap_base(random_factor, rlim_stack);
445*4882a593Smuzhiyun mm->get_unmapped_area = arch_get_unmapped_area_topdown;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)449*4882a593Smuzhiyun void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun mm->mmap_base = TASK_UNMAPPED_BASE;
452*4882a593Smuzhiyun mm->get_unmapped_area = arch_get_unmapped_area;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /**
457*4882a593Smuzhiyun * __account_locked_vm - account locked pages to an mm's locked_vm
458*4882a593Smuzhiyun * @mm: mm to account against
459*4882a593Smuzhiyun * @pages: number of pages to account
460*4882a593Smuzhiyun * @inc: %true if @pages should be considered positive, %false if not
461*4882a593Smuzhiyun * @task: task used to check RLIMIT_MEMLOCK
462*4882a593Smuzhiyun * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
463*4882a593Smuzhiyun *
464*4882a593Smuzhiyun * Assumes @task and @mm are valid (i.e. at least one reference on each), and
465*4882a593Smuzhiyun * that mmap_lock is held as writer.
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * Return:
468*4882a593Smuzhiyun * * 0 on success
469*4882a593Smuzhiyun * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
470*4882a593Smuzhiyun */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)471*4882a593Smuzhiyun int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
472*4882a593Smuzhiyun struct task_struct *task, bool bypass_rlim)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun unsigned long locked_vm, limit;
475*4882a593Smuzhiyun int ret = 0;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun mmap_assert_write_locked(mm);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun locked_vm = mm->locked_vm;
480*4882a593Smuzhiyun if (inc) {
481*4882a593Smuzhiyun if (!bypass_rlim) {
482*4882a593Smuzhiyun limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
483*4882a593Smuzhiyun if (locked_vm + pages > limit)
484*4882a593Smuzhiyun ret = -ENOMEM;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun if (!ret)
487*4882a593Smuzhiyun mm->locked_vm = locked_vm + pages;
488*4882a593Smuzhiyun } else {
489*4882a593Smuzhiyun WARN_ON_ONCE(pages > locked_vm);
490*4882a593Smuzhiyun mm->locked_vm = locked_vm - pages;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
494*4882a593Smuzhiyun (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
495*4882a593Smuzhiyun locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
496*4882a593Smuzhiyun ret ? " - exceeded" : "");
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return ret;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__account_locked_vm);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * account_locked_vm - account locked pages to an mm's locked_vm
504*4882a593Smuzhiyun * @mm: mm to account against, may be NULL
505*4882a593Smuzhiyun * @pages: number of pages to account
506*4882a593Smuzhiyun * @inc: %true if @pages should be considered positive, %false if not
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
509*4882a593Smuzhiyun *
510*4882a593Smuzhiyun * Return:
511*4882a593Smuzhiyun * * 0 on success, or if mm is NULL
512*4882a593Smuzhiyun * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
513*4882a593Smuzhiyun */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)514*4882a593Smuzhiyun int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun int ret;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (pages == 0 || !mm)
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun mmap_write_lock(mm);
522*4882a593Smuzhiyun ret = __account_locked_vm(mm, pages, inc, current,
523*4882a593Smuzhiyun capable(CAP_IPC_LOCK));
524*4882a593Smuzhiyun mmap_write_unlock(mm);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun return ret;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(account_locked_vm);
529*4882a593Smuzhiyun
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)530*4882a593Smuzhiyun unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
531*4882a593Smuzhiyun unsigned long len, unsigned long prot,
532*4882a593Smuzhiyun unsigned long flag, unsigned long pgoff)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun unsigned long ret;
535*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
536*4882a593Smuzhiyun unsigned long populate;
537*4882a593Smuzhiyun LIST_HEAD(uf);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun ret = security_mmap_file(file, prot, flag);
540*4882a593Smuzhiyun if (!ret) {
541*4882a593Smuzhiyun if (mmap_write_lock_killable(mm))
542*4882a593Smuzhiyun return -EINTR;
543*4882a593Smuzhiyun ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
544*4882a593Smuzhiyun &uf);
545*4882a593Smuzhiyun mmap_write_unlock(mm);
546*4882a593Smuzhiyun userfaultfd_unmap_complete(mm, &uf);
547*4882a593Smuzhiyun if (populate)
548*4882a593Smuzhiyun mm_populate(ret, populate);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun trace_android_vh_check_mmap_file(file, prot, flag, ret);
551*4882a593Smuzhiyun return ret;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)554*4882a593Smuzhiyun unsigned long vm_mmap(struct file *file, unsigned long addr,
555*4882a593Smuzhiyun unsigned long len, unsigned long prot,
556*4882a593Smuzhiyun unsigned long flag, unsigned long offset)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun if (unlikely(offset + PAGE_ALIGN(len) < offset))
559*4882a593Smuzhiyun return -EINVAL;
560*4882a593Smuzhiyun if (unlikely(offset_in_page(offset)))
561*4882a593Smuzhiyun return -EINVAL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun EXPORT_SYMBOL(vm_mmap);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /**
568*4882a593Smuzhiyun * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
569*4882a593Smuzhiyun * failure, fall back to non-contiguous (vmalloc) allocation.
570*4882a593Smuzhiyun * @size: size of the request.
571*4882a593Smuzhiyun * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
572*4882a593Smuzhiyun * @node: numa node to allocate from
573*4882a593Smuzhiyun *
574*4882a593Smuzhiyun * Uses kmalloc to get the memory but if the allocation fails then falls back
575*4882a593Smuzhiyun * to the vmalloc allocator. Use kvfree for freeing the memory.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
578*4882a593Smuzhiyun * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
579*4882a593Smuzhiyun * preferable to the vmalloc fallback, due to visible performance drawbacks.
580*4882a593Smuzhiyun *
581*4882a593Smuzhiyun * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
582*4882a593Smuzhiyun * fall back to vmalloc.
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * Return: pointer to the allocated memory of %NULL in case of failure
585*4882a593Smuzhiyun */
kvmalloc_node(size_t size,gfp_t flags,int node)586*4882a593Smuzhiyun void *kvmalloc_node(size_t size, gfp_t flags, int node)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun gfp_t kmalloc_flags = flags;
589*4882a593Smuzhiyun void *ret;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
593*4882a593Smuzhiyun * so the given set of flags has to be compatible.
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun if ((flags & GFP_KERNEL) != GFP_KERNEL)
596*4882a593Smuzhiyun return kmalloc_node(size, flags, node);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun * We want to attempt a large physically contiguous block first because
600*4882a593Smuzhiyun * it is less likely to fragment multiple larger blocks and therefore
601*4882a593Smuzhiyun * contribute to a long term fragmentation less than vmalloc fallback.
602*4882a593Smuzhiyun * However make sure that larger requests are not too disruptive - no
603*4882a593Smuzhiyun * OOM killer and no allocation failure warnings as we have a fallback.
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun if (size > PAGE_SIZE) {
606*4882a593Smuzhiyun kmalloc_flags |= __GFP_NOWARN;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
609*4882a593Smuzhiyun kmalloc_flags |= __GFP_NORETRY;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun ret = kmalloc_node(size, kmalloc_flags, node);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * It doesn't really make sense to fallback to vmalloc for sub page
616*4882a593Smuzhiyun * requests
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun if (ret || size <= PAGE_SIZE)
619*4882a593Smuzhiyun return ret;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Don't even allow crazy sizes */
622*4882a593Smuzhiyun if (unlikely(size > INT_MAX)) {
623*4882a593Smuzhiyun WARN_ON_ONCE(!(flags & __GFP_NOWARN));
624*4882a593Smuzhiyun return NULL;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun return __vmalloc_node(size, 1, flags, node,
628*4882a593Smuzhiyun __builtin_return_address(0));
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun EXPORT_SYMBOL(kvmalloc_node);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /**
633*4882a593Smuzhiyun * kvfree() - Free memory.
634*4882a593Smuzhiyun * @addr: Pointer to allocated memory.
635*4882a593Smuzhiyun *
636*4882a593Smuzhiyun * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
637*4882a593Smuzhiyun * It is slightly more efficient to use kfree() or vfree() if you are certain
638*4882a593Smuzhiyun * that you know which one to use.
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun * Context: Either preemptible task context or not-NMI interrupt.
641*4882a593Smuzhiyun */
kvfree(const void * addr)642*4882a593Smuzhiyun void kvfree(const void *addr)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun if (is_vmalloc_addr(addr))
645*4882a593Smuzhiyun vfree(addr);
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun kfree(addr);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun EXPORT_SYMBOL(kvfree);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /**
652*4882a593Smuzhiyun * kvfree_sensitive - Free a data object containing sensitive information.
653*4882a593Smuzhiyun * @addr: address of the data object to be freed.
654*4882a593Smuzhiyun * @len: length of the data object.
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Use the special memzero_explicit() function to clear the content of a
657*4882a593Smuzhiyun * kvmalloc'ed object containing sensitive data to make sure that the
658*4882a593Smuzhiyun * compiler won't optimize out the data clearing.
659*4882a593Smuzhiyun */
kvfree_sensitive(const void * addr,size_t len)660*4882a593Smuzhiyun void kvfree_sensitive(const void *addr, size_t len)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun if (likely(!ZERO_OR_NULL_PTR(addr))) {
663*4882a593Smuzhiyun memzero_explicit((void *)addr, len);
664*4882a593Smuzhiyun kvfree(addr);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun EXPORT_SYMBOL(kvfree_sensitive);
668*4882a593Smuzhiyun
kvrealloc(const void * p,size_t oldsize,size_t newsize,gfp_t flags)669*4882a593Smuzhiyun void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun void *newp;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (oldsize >= newsize)
674*4882a593Smuzhiyun return (void *)p;
675*4882a593Smuzhiyun newp = kvmalloc(newsize, flags);
676*4882a593Smuzhiyun if (!newp)
677*4882a593Smuzhiyun return NULL;
678*4882a593Smuzhiyun memcpy(newp, p, oldsize);
679*4882a593Smuzhiyun kvfree(p);
680*4882a593Smuzhiyun return newp;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun EXPORT_SYMBOL(kvrealloc);
683*4882a593Smuzhiyun
__page_rmapping(struct page * page)684*4882a593Smuzhiyun static inline void *__page_rmapping(struct page *page)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun unsigned long mapping;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun mapping = (unsigned long)page->mapping;
689*4882a593Smuzhiyun mapping &= ~PAGE_MAPPING_FLAGS;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun return (void *)mapping;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* Neutral page->mapping pointer to address_space or anon_vma or other */
page_rmapping(struct page * page)695*4882a593Smuzhiyun void *page_rmapping(struct page *page)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun page = compound_head(page);
698*4882a593Smuzhiyun return __page_rmapping(page);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /*
702*4882a593Smuzhiyun * Return true if this page is mapped into pagetables.
703*4882a593Smuzhiyun * For compound page it returns true if any subpage of compound page is mapped.
704*4882a593Smuzhiyun */
page_mapped(struct page * page)705*4882a593Smuzhiyun bool page_mapped(struct page *page)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun int i;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (likely(!PageCompound(page)))
710*4882a593Smuzhiyun return atomic_read(&page->_mapcount) >= 0;
711*4882a593Smuzhiyun page = compound_head(page);
712*4882a593Smuzhiyun if (atomic_read(compound_mapcount_ptr(page)) >= 0)
713*4882a593Smuzhiyun return true;
714*4882a593Smuzhiyun if (PageHuge(page))
715*4882a593Smuzhiyun return false;
716*4882a593Smuzhiyun for (i = 0; i < compound_nr(page); i++) {
717*4882a593Smuzhiyun if (atomic_read(&page[i]._mapcount) >= 0)
718*4882a593Smuzhiyun return true;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun return false;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun EXPORT_SYMBOL(page_mapped);
723*4882a593Smuzhiyun
page_anon_vma(struct page * page)724*4882a593Smuzhiyun struct anon_vma *page_anon_vma(struct page *page)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun unsigned long mapping;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun page = compound_head(page);
729*4882a593Smuzhiyun mapping = (unsigned long)page->mapping;
730*4882a593Smuzhiyun if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
731*4882a593Smuzhiyun return NULL;
732*4882a593Smuzhiyun return __page_rmapping(page);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
page_mapping(struct page * page)735*4882a593Smuzhiyun struct address_space *page_mapping(struct page *page)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun struct address_space *mapping;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun page = compound_head(page);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* This happens if someone calls flush_dcache_page on slab page */
742*4882a593Smuzhiyun if (unlikely(PageSlab(page)))
743*4882a593Smuzhiyun return NULL;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (unlikely(PageSwapCache(page))) {
746*4882a593Smuzhiyun swp_entry_t entry;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun entry.val = page_private(page);
749*4882a593Smuzhiyun return swap_address_space(entry);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun mapping = page->mapping;
753*4882a593Smuzhiyun if ((unsigned long)mapping & PAGE_MAPPING_ANON)
754*4882a593Smuzhiyun return NULL;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun EXPORT_SYMBOL(page_mapping);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * For file cache pages, return the address_space, otherwise return NULL
762*4882a593Smuzhiyun */
page_mapping_file(struct page * page)763*4882a593Smuzhiyun struct address_space *page_mapping_file(struct page *page)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun if (unlikely(PageSwapCache(page)))
766*4882a593Smuzhiyun return NULL;
767*4882a593Smuzhiyun return page_mapping(page);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /* Slow path of page_mapcount() for compound pages */
__page_mapcount(struct page * page)771*4882a593Smuzhiyun int __page_mapcount(struct page *page)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun int ret;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun ret = atomic_read(&page->_mapcount) + 1;
776*4882a593Smuzhiyun /*
777*4882a593Smuzhiyun * For file THP page->_mapcount contains total number of mapping
778*4882a593Smuzhiyun * of the page: no need to look into compound_mapcount.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun if (!PageAnon(page) && !PageHuge(page))
781*4882a593Smuzhiyun return ret;
782*4882a593Smuzhiyun page = compound_head(page);
783*4882a593Smuzhiyun ret += atomic_read(compound_mapcount_ptr(page)) + 1;
784*4882a593Smuzhiyun if (PageDoubleMap(page))
785*4882a593Smuzhiyun ret--;
786*4882a593Smuzhiyun return ret;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__page_mapcount);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
791*4882a593Smuzhiyun int sysctl_overcommit_ratio __read_mostly = 50;
792*4882a593Smuzhiyun unsigned long sysctl_overcommit_kbytes __read_mostly;
793*4882a593Smuzhiyun int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
794*4882a593Smuzhiyun unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
795*4882a593Smuzhiyun unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
796*4882a593Smuzhiyun
overcommit_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)797*4882a593Smuzhiyun int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
798*4882a593Smuzhiyun size_t *lenp, loff_t *ppos)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun int ret;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun ret = proc_dointvec(table, write, buffer, lenp, ppos);
803*4882a593Smuzhiyun if (ret == 0 && write)
804*4882a593Smuzhiyun sysctl_overcommit_kbytes = 0;
805*4882a593Smuzhiyun return ret;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
sync_overcommit_as(struct work_struct * dummy)808*4882a593Smuzhiyun static void sync_overcommit_as(struct work_struct *dummy)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun percpu_counter_sync(&vm_committed_as);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
overcommit_policy_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)813*4882a593Smuzhiyun int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
814*4882a593Smuzhiyun size_t *lenp, loff_t *ppos)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct ctl_table t;
817*4882a593Smuzhiyun int new_policy = -1;
818*4882a593Smuzhiyun int ret;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /*
821*4882a593Smuzhiyun * The deviation of sync_overcommit_as could be big with loose policy
822*4882a593Smuzhiyun * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
823*4882a593Smuzhiyun * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
824*4882a593Smuzhiyun * with the strict "NEVER", and to avoid possible race condtion (even
825*4882a593Smuzhiyun * though user usually won't too frequently do the switching to policy
826*4882a593Smuzhiyun * OVERCOMMIT_NEVER), the switch is done in the following order:
827*4882a593Smuzhiyun * 1. changing the batch
828*4882a593Smuzhiyun * 2. sync percpu count on each CPU
829*4882a593Smuzhiyun * 3. switch the policy
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun if (write) {
832*4882a593Smuzhiyun t = *table;
833*4882a593Smuzhiyun t.data = &new_policy;
834*4882a593Smuzhiyun ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
835*4882a593Smuzhiyun if (ret || new_policy == -1)
836*4882a593Smuzhiyun return ret;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun mm_compute_batch(new_policy);
839*4882a593Smuzhiyun if (new_policy == OVERCOMMIT_NEVER)
840*4882a593Smuzhiyun schedule_on_each_cpu(sync_overcommit_as);
841*4882a593Smuzhiyun sysctl_overcommit_memory = new_policy;
842*4882a593Smuzhiyun } else {
843*4882a593Smuzhiyun ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun return ret;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
overcommit_kbytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)849*4882a593Smuzhiyun int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
850*4882a593Smuzhiyun size_t *lenp, loff_t *ppos)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun int ret;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
855*4882a593Smuzhiyun if (ret == 0 && write)
856*4882a593Smuzhiyun sysctl_overcommit_ratio = 0;
857*4882a593Smuzhiyun return ret;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
862*4882a593Smuzhiyun */
vm_commit_limit(void)863*4882a593Smuzhiyun unsigned long vm_commit_limit(void)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun unsigned long allowed;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (sysctl_overcommit_kbytes)
868*4882a593Smuzhiyun allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
869*4882a593Smuzhiyun else
870*4882a593Smuzhiyun allowed = ((totalram_pages() - hugetlb_total_pages())
871*4882a593Smuzhiyun * sysctl_overcommit_ratio / 100);
872*4882a593Smuzhiyun allowed += total_swap_pages;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun return allowed;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /*
878*4882a593Smuzhiyun * Make sure vm_committed_as in one cacheline and not cacheline shared with
879*4882a593Smuzhiyun * other variables. It can be updated by several CPUs frequently.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /*
884*4882a593Smuzhiyun * The global memory commitment made in the system can be a metric
885*4882a593Smuzhiyun * that can be used to drive ballooning decisions when Linux is hosted
886*4882a593Smuzhiyun * as a guest. On Hyper-V, the host implements a policy engine for dynamically
887*4882a593Smuzhiyun * balancing memory across competing virtual machines that are hosted.
888*4882a593Smuzhiyun * Several metrics drive this policy engine including the guest reported
889*4882a593Smuzhiyun * memory commitment.
890*4882a593Smuzhiyun *
891*4882a593Smuzhiyun * The time cost of this is very low for small platforms, and for big
892*4882a593Smuzhiyun * platform like a 2S/36C/72T Skylake server, in worst case where
893*4882a593Smuzhiyun * vm_committed_as's spinlock is under severe contention, the time cost
894*4882a593Smuzhiyun * could be about 30~40 microseconds.
895*4882a593Smuzhiyun */
vm_memory_committed(void)896*4882a593Smuzhiyun unsigned long vm_memory_committed(void)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun return percpu_counter_sum_positive(&vm_committed_as);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vm_memory_committed);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * Check that a process has enough memory to allocate a new virtual
904*4882a593Smuzhiyun * mapping. 0 means there is enough memory for the allocation to
905*4882a593Smuzhiyun * succeed and -ENOMEM implies there is not.
906*4882a593Smuzhiyun *
907*4882a593Smuzhiyun * We currently support three overcommit policies, which are set via the
908*4882a593Smuzhiyun * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
911*4882a593Smuzhiyun * Additional code 2002 Jul 20 by Robert Love.
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * Note this is a helper function intended to be used by LSMs which
916*4882a593Smuzhiyun * wish to use this logic.
917*4882a593Smuzhiyun */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)918*4882a593Smuzhiyun int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun long allowed;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun vm_acct_memory(pages);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /*
925*4882a593Smuzhiyun * Sometimes we want to use more memory than we have
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
928*4882a593Smuzhiyun return 0;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
931*4882a593Smuzhiyun if (pages > totalram_pages() + total_swap_pages)
932*4882a593Smuzhiyun goto error;
933*4882a593Smuzhiyun return 0;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun allowed = vm_commit_limit();
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * Reserve some for root
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun if (!cap_sys_admin)
941*4882a593Smuzhiyun allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun * Don't let a single process grow so big a user can't recover
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun if (mm) {
947*4882a593Smuzhiyun long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun allowed -= min_t(long, mm->total_vm / 32, reserve);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (percpu_counter_read_positive(&vm_committed_as) < allowed)
953*4882a593Smuzhiyun return 0;
954*4882a593Smuzhiyun error:
955*4882a593Smuzhiyun vm_unacct_memory(pages);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun return -ENOMEM;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /**
961*4882a593Smuzhiyun * get_cmdline() - copy the cmdline value to a buffer.
962*4882a593Smuzhiyun * @task: the task whose cmdline value to copy.
963*4882a593Smuzhiyun * @buffer: the buffer to copy to.
964*4882a593Smuzhiyun * @buflen: the length of the buffer. Larger cmdline values are truncated
965*4882a593Smuzhiyun * to this length.
966*4882a593Smuzhiyun *
967*4882a593Smuzhiyun * Return: the size of the cmdline field copied. Note that the copy does
968*4882a593Smuzhiyun * not guarantee an ending NULL byte.
969*4882a593Smuzhiyun */
get_cmdline(struct task_struct * task,char * buffer,int buflen)970*4882a593Smuzhiyun int get_cmdline(struct task_struct *task, char *buffer, int buflen)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun int res = 0;
973*4882a593Smuzhiyun unsigned int len;
974*4882a593Smuzhiyun struct mm_struct *mm = get_task_mm(task);
975*4882a593Smuzhiyun unsigned long arg_start, arg_end, env_start, env_end;
976*4882a593Smuzhiyun if (!mm)
977*4882a593Smuzhiyun goto out;
978*4882a593Smuzhiyun if (!mm->arg_end)
979*4882a593Smuzhiyun goto out_mm; /* Shh! No looking before we're done */
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun spin_lock(&mm->arg_lock);
982*4882a593Smuzhiyun arg_start = mm->arg_start;
983*4882a593Smuzhiyun arg_end = mm->arg_end;
984*4882a593Smuzhiyun env_start = mm->env_start;
985*4882a593Smuzhiyun env_end = mm->env_end;
986*4882a593Smuzhiyun spin_unlock(&mm->arg_lock);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun len = arg_end - arg_start;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (len > buflen)
991*4882a593Smuzhiyun len = buflen;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun * If the nul at the end of args has been overwritten, then
997*4882a593Smuzhiyun * assume application is using setproctitle(3).
998*4882a593Smuzhiyun */
999*4882a593Smuzhiyun if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1000*4882a593Smuzhiyun len = strnlen(buffer, res);
1001*4882a593Smuzhiyun if (len < res) {
1002*4882a593Smuzhiyun res = len;
1003*4882a593Smuzhiyun } else {
1004*4882a593Smuzhiyun len = env_end - env_start;
1005*4882a593Smuzhiyun if (len > buflen - res)
1006*4882a593Smuzhiyun len = buflen - res;
1007*4882a593Smuzhiyun res += access_process_vm(task, env_start,
1008*4882a593Smuzhiyun buffer+res, len,
1009*4882a593Smuzhiyun FOLL_FORCE);
1010*4882a593Smuzhiyun res = strnlen(buffer, res);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun out_mm:
1014*4882a593Smuzhiyun mmput(mm);
1015*4882a593Smuzhiyun out:
1016*4882a593Smuzhiyun return res;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
memcmp_pages(struct page * page1,struct page * page2)1019*4882a593Smuzhiyun int __weak memcmp_pages(struct page *page1, struct page *page2)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun char *addr1, *addr2;
1022*4882a593Smuzhiyun int ret;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun addr1 = kmap_atomic(page1);
1025*4882a593Smuzhiyun addr2 = kmap_atomic(page2);
1026*4882a593Smuzhiyun ret = memcmp(addr1, addr2, PAGE_SIZE);
1027*4882a593Smuzhiyun kunmap_atomic(addr2);
1028*4882a593Smuzhiyun kunmap_atomic(addr1);
1029*4882a593Smuzhiyun return ret;
1030*4882a593Smuzhiyun }
1031