1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26
27 #include <linux/uaccess.h>
28
29 #include "internal.h"
30 #ifndef __GENKSYMS__
31 #include <trace/hooks/syscall_check.h>
32 #endif
33
34 /**
35 * kfree_const - conditionally free memory
36 * @x: pointer to the memory
37 *
38 * Function calls kfree only if @x is not in .rodata section.
39 */
kfree_const(const void * x)40 void kfree_const(const void *x)
41 {
42 if (!is_kernel_rodata((unsigned long)x))
43 kfree(x);
44 }
45 EXPORT_SYMBOL(kfree_const);
46
47 /**
48 * kstrdup - allocate space for and copy an existing string
49 * @s: the string to duplicate
50 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
51 *
52 * Return: newly allocated copy of @s or %NULL in case of error
53 */
kstrdup(const char * s,gfp_t gfp)54 char *kstrdup(const char *s, gfp_t gfp)
55 {
56 size_t len;
57 char *buf;
58
59 if (!s)
60 return NULL;
61
62 len = strlen(s) + 1;
63 buf = kmalloc_track_caller(len, gfp);
64 if (buf)
65 memcpy(buf, s, len);
66 return buf;
67 }
68 EXPORT_SYMBOL(kstrdup);
69
70 /**
71 * kstrdup_const - conditionally duplicate an existing const string
72 * @s: the string to duplicate
73 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
74 *
75 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
76 * must not be passed to krealloc().
77 *
78 * Return: source string if it is in .rodata section otherwise
79 * fallback to kstrdup.
80 */
kstrdup_const(const char * s,gfp_t gfp)81 const char *kstrdup_const(const char *s, gfp_t gfp)
82 {
83 if (is_kernel_rodata((unsigned long)s))
84 return s;
85
86 return kstrdup(s, gfp);
87 }
88 EXPORT_SYMBOL(kstrdup_const);
89
90 /**
91 * kstrndup - allocate space for and copy an existing string
92 * @s: the string to duplicate
93 * @max: read at most @max chars from @s
94 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
95 *
96 * Note: Use kmemdup_nul() instead if the size is known exactly.
97 *
98 * Return: newly allocated copy of @s or %NULL in case of error
99 */
kstrndup(const char * s,size_t max,gfp_t gfp)100 char *kstrndup(const char *s, size_t max, gfp_t gfp)
101 {
102 size_t len;
103 char *buf;
104
105 if (!s)
106 return NULL;
107
108 len = strnlen(s, max);
109 buf = kmalloc_track_caller(len+1, gfp);
110 if (buf) {
111 memcpy(buf, s, len);
112 buf[len] = '\0';
113 }
114 return buf;
115 }
116 EXPORT_SYMBOL(kstrndup);
117
118 /**
119 * kmemdup - duplicate region of memory
120 *
121 * @src: memory region to duplicate
122 * @len: memory region length
123 * @gfp: GFP mask to use
124 *
125 * Return: newly allocated copy of @src or %NULL in case of error
126 */
kmemdup(const void * src,size_t len,gfp_t gfp)127 void *kmemdup(const void *src, size_t len, gfp_t gfp)
128 {
129 void *p;
130
131 p = kmalloc_track_caller(len, gfp);
132 if (p)
133 memcpy(p, src, len);
134 return p;
135 }
136 EXPORT_SYMBOL(kmemdup);
137
138 /**
139 * kmemdup_nul - Create a NUL-terminated string from unterminated data
140 * @s: The data to stringify
141 * @len: The size of the data
142 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
143 *
144 * Return: newly allocated copy of @s with NUL-termination or %NULL in
145 * case of error
146 */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)147 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
148 {
149 char *buf;
150
151 if (!s)
152 return NULL;
153
154 buf = kmalloc_track_caller(len + 1, gfp);
155 if (buf) {
156 memcpy(buf, s, len);
157 buf[len] = '\0';
158 }
159 return buf;
160 }
161 EXPORT_SYMBOL(kmemdup_nul);
162
163 /**
164 * memdup_user - duplicate memory region from user space
165 *
166 * @src: source address in user space
167 * @len: number of bytes to copy
168 *
169 * Return: an ERR_PTR() on failure. Result is physically
170 * contiguous, to be freed by kfree().
171 */
memdup_user(const void __user * src,size_t len)172 void *memdup_user(const void __user *src, size_t len)
173 {
174 void *p;
175
176 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
177 if (!p)
178 return ERR_PTR(-ENOMEM);
179
180 if (copy_from_user(p, src, len)) {
181 kfree(p);
182 return ERR_PTR(-EFAULT);
183 }
184
185 return p;
186 }
187 EXPORT_SYMBOL(memdup_user);
188
189 /**
190 * vmemdup_user - duplicate memory region from user space
191 *
192 * @src: source address in user space
193 * @len: number of bytes to copy
194 *
195 * Return: an ERR_PTR() on failure. Result may be not
196 * physically contiguous. Use kvfree() to free.
197 */
vmemdup_user(const void __user * src,size_t len)198 void *vmemdup_user(const void __user *src, size_t len)
199 {
200 void *p;
201
202 p = kvmalloc(len, GFP_USER);
203 if (!p)
204 return ERR_PTR(-ENOMEM);
205
206 if (copy_from_user(p, src, len)) {
207 kvfree(p);
208 return ERR_PTR(-EFAULT);
209 }
210
211 return p;
212 }
213 EXPORT_SYMBOL(vmemdup_user);
214
215 /**
216 * strndup_user - duplicate an existing string from user space
217 * @s: The string to duplicate
218 * @n: Maximum number of bytes to copy, including the trailing NUL.
219 *
220 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
221 */
strndup_user(const char __user * s,long n)222 char *strndup_user(const char __user *s, long n)
223 {
224 char *p;
225 long length;
226
227 length = strnlen_user(s, n);
228
229 if (!length)
230 return ERR_PTR(-EFAULT);
231
232 if (length > n)
233 return ERR_PTR(-EINVAL);
234
235 p = memdup_user(s, length);
236
237 if (IS_ERR(p))
238 return p;
239
240 p[length - 1] = '\0';
241
242 return p;
243 }
244 EXPORT_SYMBOL(strndup_user);
245
246 /**
247 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
248 *
249 * @src: source address in user space
250 * @len: number of bytes to copy
251 *
252 * Return: an ERR_PTR() on failure.
253 */
memdup_user_nul(const void __user * src,size_t len)254 void *memdup_user_nul(const void __user *src, size_t len)
255 {
256 char *p;
257
258 /*
259 * Always use GFP_KERNEL, since copy_from_user() can sleep and
260 * cause pagefault, which makes it pointless to use GFP_NOFS
261 * or GFP_ATOMIC.
262 */
263 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
264 if (!p)
265 return ERR_PTR(-ENOMEM);
266
267 if (copy_from_user(p, src, len)) {
268 kfree(p);
269 return ERR_PTR(-EFAULT);
270 }
271 p[len] = '\0';
272
273 return p;
274 }
275 EXPORT_SYMBOL(memdup_user_nul);
276
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev)277 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
278 struct vm_area_struct *prev)
279 {
280 struct vm_area_struct *next;
281
282 vma->vm_prev = prev;
283 if (prev) {
284 next = prev->vm_next;
285 prev->vm_next = vma;
286 } else {
287 next = mm->mmap;
288 mm->mmap = vma;
289 }
290 vma->vm_next = next;
291 if (next)
292 next->vm_prev = vma;
293 }
294
__vma_unlink_list(struct mm_struct * mm,struct vm_area_struct * vma)295 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
296 {
297 struct vm_area_struct *prev, *next;
298
299 next = vma->vm_next;
300 prev = vma->vm_prev;
301 if (prev)
302 prev->vm_next = next;
303 else
304 mm->mmap = next;
305 if (next)
306 next->vm_prev = prev;
307 }
308
309 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)310 int vma_is_stack_for_current(struct vm_area_struct *vma)
311 {
312 struct task_struct * __maybe_unused t = current;
313
314 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
315 }
316
317 #ifndef STACK_RND_MASK
318 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
319 #endif
320
randomize_stack_top(unsigned long stack_top)321 unsigned long randomize_stack_top(unsigned long stack_top)
322 {
323 unsigned long random_variable = 0;
324
325 if (current->flags & PF_RANDOMIZE) {
326 random_variable = get_random_long();
327 random_variable &= STACK_RND_MASK;
328 random_variable <<= PAGE_SHIFT;
329 }
330 #ifdef CONFIG_STACK_GROWSUP
331 return PAGE_ALIGN(stack_top) + random_variable;
332 #else
333 return PAGE_ALIGN(stack_top) - random_variable;
334 #endif
335 }
336
337 /**
338 * randomize_page - Generate a random, page aligned address
339 * @start: The smallest acceptable address the caller will take.
340 * @range: The size of the area, starting at @start, within which the
341 * random address must fall.
342 *
343 * If @start + @range would overflow, @range is capped.
344 *
345 * NOTE: Historical use of randomize_range, which this replaces, presumed that
346 * @start was already page aligned. We now align it regardless.
347 *
348 * Return: A page aligned address within [start, start + range). On error,
349 * @start is returned.
350 */
randomize_page(unsigned long start,unsigned long range)351 unsigned long randomize_page(unsigned long start, unsigned long range)
352 {
353 if (!PAGE_ALIGNED(start)) {
354 range -= PAGE_ALIGN(start) - start;
355 start = PAGE_ALIGN(start);
356 }
357
358 if (start > ULONG_MAX - range)
359 range = ULONG_MAX - start;
360
361 range >>= PAGE_SHIFT;
362
363 if (range == 0)
364 return start;
365
366 return start + (get_random_long() % range << PAGE_SHIFT);
367 }
368
369 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)370 unsigned long arch_randomize_brk(struct mm_struct *mm)
371 {
372 /* Is the current task 32bit ? */
373 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
374 return randomize_page(mm->brk, SZ_32M);
375
376 return randomize_page(mm->brk, SZ_1G);
377 }
378
arch_mmap_rnd(void)379 unsigned long arch_mmap_rnd(void)
380 {
381 unsigned long rnd;
382
383 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
384 if (is_compat_task())
385 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
386 else
387 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
388 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
389
390 return rnd << PAGE_SHIFT;
391 }
392 EXPORT_SYMBOL_GPL(arch_mmap_rnd);
393
mmap_is_legacy(struct rlimit * rlim_stack)394 static int mmap_is_legacy(struct rlimit *rlim_stack)
395 {
396 if (current->personality & ADDR_COMPAT_LAYOUT)
397 return 1;
398
399 if (rlim_stack->rlim_cur == RLIM_INFINITY)
400 return 1;
401
402 return sysctl_legacy_va_layout;
403 }
404
405 /*
406 * Leave enough space between the mmap area and the stack to honour ulimit in
407 * the face of randomisation.
408 */
409 #define MIN_GAP (SZ_128M)
410 #define MAX_GAP (STACK_TOP / 6 * 5)
411
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)412 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
413 {
414 unsigned long gap = rlim_stack->rlim_cur;
415 unsigned long pad = stack_guard_gap;
416
417 /* Account for stack randomization if necessary */
418 if (current->flags & PF_RANDOMIZE)
419 pad += (STACK_RND_MASK << PAGE_SHIFT);
420
421 /* Values close to RLIM_INFINITY can overflow. */
422 if (gap + pad > gap)
423 gap += pad;
424
425 if (gap < MIN_GAP)
426 gap = MIN_GAP;
427 else if (gap > MAX_GAP)
428 gap = MAX_GAP;
429
430 return PAGE_ALIGN(STACK_TOP - gap - rnd);
431 }
432
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)433 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
434 {
435 unsigned long random_factor = 0UL;
436
437 if (current->flags & PF_RANDOMIZE)
438 random_factor = arch_mmap_rnd();
439
440 if (mmap_is_legacy(rlim_stack)) {
441 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
442 mm->get_unmapped_area = arch_get_unmapped_area;
443 } else {
444 mm->mmap_base = mmap_base(random_factor, rlim_stack);
445 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
446 }
447 }
448 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)449 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
450 {
451 mm->mmap_base = TASK_UNMAPPED_BASE;
452 mm->get_unmapped_area = arch_get_unmapped_area;
453 }
454 #endif
455
456 /**
457 * __account_locked_vm - account locked pages to an mm's locked_vm
458 * @mm: mm to account against
459 * @pages: number of pages to account
460 * @inc: %true if @pages should be considered positive, %false if not
461 * @task: task used to check RLIMIT_MEMLOCK
462 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
463 *
464 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
465 * that mmap_lock is held as writer.
466 *
467 * Return:
468 * * 0 on success
469 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
470 */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)471 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
472 struct task_struct *task, bool bypass_rlim)
473 {
474 unsigned long locked_vm, limit;
475 int ret = 0;
476
477 mmap_assert_write_locked(mm);
478
479 locked_vm = mm->locked_vm;
480 if (inc) {
481 if (!bypass_rlim) {
482 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
483 if (locked_vm + pages > limit)
484 ret = -ENOMEM;
485 }
486 if (!ret)
487 mm->locked_vm = locked_vm + pages;
488 } else {
489 WARN_ON_ONCE(pages > locked_vm);
490 mm->locked_vm = locked_vm - pages;
491 }
492
493 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
494 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
495 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
496 ret ? " - exceeded" : "");
497
498 return ret;
499 }
500 EXPORT_SYMBOL_GPL(__account_locked_vm);
501
502 /**
503 * account_locked_vm - account locked pages to an mm's locked_vm
504 * @mm: mm to account against, may be NULL
505 * @pages: number of pages to account
506 * @inc: %true if @pages should be considered positive, %false if not
507 *
508 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
509 *
510 * Return:
511 * * 0 on success, or if mm is NULL
512 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
513 */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)514 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
515 {
516 int ret;
517
518 if (pages == 0 || !mm)
519 return 0;
520
521 mmap_write_lock(mm);
522 ret = __account_locked_vm(mm, pages, inc, current,
523 capable(CAP_IPC_LOCK));
524 mmap_write_unlock(mm);
525
526 return ret;
527 }
528 EXPORT_SYMBOL_GPL(account_locked_vm);
529
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)530 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
531 unsigned long len, unsigned long prot,
532 unsigned long flag, unsigned long pgoff)
533 {
534 unsigned long ret;
535 struct mm_struct *mm = current->mm;
536 unsigned long populate;
537 LIST_HEAD(uf);
538
539 ret = security_mmap_file(file, prot, flag);
540 if (!ret) {
541 if (mmap_write_lock_killable(mm))
542 return -EINTR;
543 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
544 &uf);
545 mmap_write_unlock(mm);
546 userfaultfd_unmap_complete(mm, &uf);
547 if (populate)
548 mm_populate(ret, populate);
549 }
550 trace_android_vh_check_mmap_file(file, prot, flag, ret);
551 return ret;
552 }
553
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)554 unsigned long vm_mmap(struct file *file, unsigned long addr,
555 unsigned long len, unsigned long prot,
556 unsigned long flag, unsigned long offset)
557 {
558 if (unlikely(offset + PAGE_ALIGN(len) < offset))
559 return -EINVAL;
560 if (unlikely(offset_in_page(offset)))
561 return -EINVAL;
562
563 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
564 }
565 EXPORT_SYMBOL(vm_mmap);
566
567 /**
568 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
569 * failure, fall back to non-contiguous (vmalloc) allocation.
570 * @size: size of the request.
571 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
572 * @node: numa node to allocate from
573 *
574 * Uses kmalloc to get the memory but if the allocation fails then falls back
575 * to the vmalloc allocator. Use kvfree for freeing the memory.
576 *
577 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
578 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
579 * preferable to the vmalloc fallback, due to visible performance drawbacks.
580 *
581 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
582 * fall back to vmalloc.
583 *
584 * Return: pointer to the allocated memory of %NULL in case of failure
585 */
kvmalloc_node(size_t size,gfp_t flags,int node)586 void *kvmalloc_node(size_t size, gfp_t flags, int node)
587 {
588 gfp_t kmalloc_flags = flags;
589 void *ret;
590
591 /*
592 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
593 * so the given set of flags has to be compatible.
594 */
595 if ((flags & GFP_KERNEL) != GFP_KERNEL)
596 return kmalloc_node(size, flags, node);
597
598 /*
599 * We want to attempt a large physically contiguous block first because
600 * it is less likely to fragment multiple larger blocks and therefore
601 * contribute to a long term fragmentation less than vmalloc fallback.
602 * However make sure that larger requests are not too disruptive - no
603 * OOM killer and no allocation failure warnings as we have a fallback.
604 */
605 if (size > PAGE_SIZE) {
606 kmalloc_flags |= __GFP_NOWARN;
607
608 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
609 kmalloc_flags |= __GFP_NORETRY;
610 }
611
612 ret = kmalloc_node(size, kmalloc_flags, node);
613
614 /*
615 * It doesn't really make sense to fallback to vmalloc for sub page
616 * requests
617 */
618 if (ret || size <= PAGE_SIZE)
619 return ret;
620
621 /* Don't even allow crazy sizes */
622 if (unlikely(size > INT_MAX)) {
623 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
624 return NULL;
625 }
626
627 return __vmalloc_node(size, 1, flags, node,
628 __builtin_return_address(0));
629 }
630 EXPORT_SYMBOL(kvmalloc_node);
631
632 /**
633 * kvfree() - Free memory.
634 * @addr: Pointer to allocated memory.
635 *
636 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
637 * It is slightly more efficient to use kfree() or vfree() if you are certain
638 * that you know which one to use.
639 *
640 * Context: Either preemptible task context or not-NMI interrupt.
641 */
kvfree(const void * addr)642 void kvfree(const void *addr)
643 {
644 if (is_vmalloc_addr(addr))
645 vfree(addr);
646 else
647 kfree(addr);
648 }
649 EXPORT_SYMBOL(kvfree);
650
651 /**
652 * kvfree_sensitive - Free a data object containing sensitive information.
653 * @addr: address of the data object to be freed.
654 * @len: length of the data object.
655 *
656 * Use the special memzero_explicit() function to clear the content of a
657 * kvmalloc'ed object containing sensitive data to make sure that the
658 * compiler won't optimize out the data clearing.
659 */
kvfree_sensitive(const void * addr,size_t len)660 void kvfree_sensitive(const void *addr, size_t len)
661 {
662 if (likely(!ZERO_OR_NULL_PTR(addr))) {
663 memzero_explicit((void *)addr, len);
664 kvfree(addr);
665 }
666 }
667 EXPORT_SYMBOL(kvfree_sensitive);
668
kvrealloc(const void * p,size_t oldsize,size_t newsize,gfp_t flags)669 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
670 {
671 void *newp;
672
673 if (oldsize >= newsize)
674 return (void *)p;
675 newp = kvmalloc(newsize, flags);
676 if (!newp)
677 return NULL;
678 memcpy(newp, p, oldsize);
679 kvfree(p);
680 return newp;
681 }
682 EXPORT_SYMBOL(kvrealloc);
683
__page_rmapping(struct page * page)684 static inline void *__page_rmapping(struct page *page)
685 {
686 unsigned long mapping;
687
688 mapping = (unsigned long)page->mapping;
689 mapping &= ~PAGE_MAPPING_FLAGS;
690
691 return (void *)mapping;
692 }
693
694 /* Neutral page->mapping pointer to address_space or anon_vma or other */
page_rmapping(struct page * page)695 void *page_rmapping(struct page *page)
696 {
697 page = compound_head(page);
698 return __page_rmapping(page);
699 }
700
701 /*
702 * Return true if this page is mapped into pagetables.
703 * For compound page it returns true if any subpage of compound page is mapped.
704 */
page_mapped(struct page * page)705 bool page_mapped(struct page *page)
706 {
707 int i;
708
709 if (likely(!PageCompound(page)))
710 return atomic_read(&page->_mapcount) >= 0;
711 page = compound_head(page);
712 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
713 return true;
714 if (PageHuge(page))
715 return false;
716 for (i = 0; i < compound_nr(page); i++) {
717 if (atomic_read(&page[i]._mapcount) >= 0)
718 return true;
719 }
720 return false;
721 }
722 EXPORT_SYMBOL(page_mapped);
723
page_anon_vma(struct page * page)724 struct anon_vma *page_anon_vma(struct page *page)
725 {
726 unsigned long mapping;
727
728 page = compound_head(page);
729 mapping = (unsigned long)page->mapping;
730 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
731 return NULL;
732 return __page_rmapping(page);
733 }
734
page_mapping(struct page * page)735 struct address_space *page_mapping(struct page *page)
736 {
737 struct address_space *mapping;
738
739 page = compound_head(page);
740
741 /* This happens if someone calls flush_dcache_page on slab page */
742 if (unlikely(PageSlab(page)))
743 return NULL;
744
745 if (unlikely(PageSwapCache(page))) {
746 swp_entry_t entry;
747
748 entry.val = page_private(page);
749 return swap_address_space(entry);
750 }
751
752 mapping = page->mapping;
753 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
754 return NULL;
755
756 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
757 }
758 EXPORT_SYMBOL(page_mapping);
759
760 /*
761 * For file cache pages, return the address_space, otherwise return NULL
762 */
page_mapping_file(struct page * page)763 struct address_space *page_mapping_file(struct page *page)
764 {
765 if (unlikely(PageSwapCache(page)))
766 return NULL;
767 return page_mapping(page);
768 }
769
770 /* Slow path of page_mapcount() for compound pages */
__page_mapcount(struct page * page)771 int __page_mapcount(struct page *page)
772 {
773 int ret;
774
775 ret = atomic_read(&page->_mapcount) + 1;
776 /*
777 * For file THP page->_mapcount contains total number of mapping
778 * of the page: no need to look into compound_mapcount.
779 */
780 if (!PageAnon(page) && !PageHuge(page))
781 return ret;
782 page = compound_head(page);
783 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
784 if (PageDoubleMap(page))
785 ret--;
786 return ret;
787 }
788 EXPORT_SYMBOL_GPL(__page_mapcount);
789
790 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
791 int sysctl_overcommit_ratio __read_mostly = 50;
792 unsigned long sysctl_overcommit_kbytes __read_mostly;
793 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
794 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
795 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
796
overcommit_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)797 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
798 size_t *lenp, loff_t *ppos)
799 {
800 int ret;
801
802 ret = proc_dointvec(table, write, buffer, lenp, ppos);
803 if (ret == 0 && write)
804 sysctl_overcommit_kbytes = 0;
805 return ret;
806 }
807
sync_overcommit_as(struct work_struct * dummy)808 static void sync_overcommit_as(struct work_struct *dummy)
809 {
810 percpu_counter_sync(&vm_committed_as);
811 }
812
overcommit_policy_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)813 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
814 size_t *lenp, loff_t *ppos)
815 {
816 struct ctl_table t;
817 int new_policy = -1;
818 int ret;
819
820 /*
821 * The deviation of sync_overcommit_as could be big with loose policy
822 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
823 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
824 * with the strict "NEVER", and to avoid possible race condtion (even
825 * though user usually won't too frequently do the switching to policy
826 * OVERCOMMIT_NEVER), the switch is done in the following order:
827 * 1. changing the batch
828 * 2. sync percpu count on each CPU
829 * 3. switch the policy
830 */
831 if (write) {
832 t = *table;
833 t.data = &new_policy;
834 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
835 if (ret || new_policy == -1)
836 return ret;
837
838 mm_compute_batch(new_policy);
839 if (new_policy == OVERCOMMIT_NEVER)
840 schedule_on_each_cpu(sync_overcommit_as);
841 sysctl_overcommit_memory = new_policy;
842 } else {
843 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
844 }
845
846 return ret;
847 }
848
overcommit_kbytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)849 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
850 size_t *lenp, loff_t *ppos)
851 {
852 int ret;
853
854 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
855 if (ret == 0 && write)
856 sysctl_overcommit_ratio = 0;
857 return ret;
858 }
859
860 /*
861 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
862 */
vm_commit_limit(void)863 unsigned long vm_commit_limit(void)
864 {
865 unsigned long allowed;
866
867 if (sysctl_overcommit_kbytes)
868 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
869 else
870 allowed = ((totalram_pages() - hugetlb_total_pages())
871 * sysctl_overcommit_ratio / 100);
872 allowed += total_swap_pages;
873
874 return allowed;
875 }
876
877 /*
878 * Make sure vm_committed_as in one cacheline and not cacheline shared with
879 * other variables. It can be updated by several CPUs frequently.
880 */
881 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
882
883 /*
884 * The global memory commitment made in the system can be a metric
885 * that can be used to drive ballooning decisions when Linux is hosted
886 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
887 * balancing memory across competing virtual machines that are hosted.
888 * Several metrics drive this policy engine including the guest reported
889 * memory commitment.
890 *
891 * The time cost of this is very low for small platforms, and for big
892 * platform like a 2S/36C/72T Skylake server, in worst case where
893 * vm_committed_as's spinlock is under severe contention, the time cost
894 * could be about 30~40 microseconds.
895 */
vm_memory_committed(void)896 unsigned long vm_memory_committed(void)
897 {
898 return percpu_counter_sum_positive(&vm_committed_as);
899 }
900 EXPORT_SYMBOL_GPL(vm_memory_committed);
901
902 /*
903 * Check that a process has enough memory to allocate a new virtual
904 * mapping. 0 means there is enough memory for the allocation to
905 * succeed and -ENOMEM implies there is not.
906 *
907 * We currently support three overcommit policies, which are set via the
908 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
909 *
910 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
911 * Additional code 2002 Jul 20 by Robert Love.
912 *
913 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
914 *
915 * Note this is a helper function intended to be used by LSMs which
916 * wish to use this logic.
917 */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)918 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
919 {
920 long allowed;
921
922 vm_acct_memory(pages);
923
924 /*
925 * Sometimes we want to use more memory than we have
926 */
927 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
928 return 0;
929
930 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
931 if (pages > totalram_pages() + total_swap_pages)
932 goto error;
933 return 0;
934 }
935
936 allowed = vm_commit_limit();
937 /*
938 * Reserve some for root
939 */
940 if (!cap_sys_admin)
941 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
942
943 /*
944 * Don't let a single process grow so big a user can't recover
945 */
946 if (mm) {
947 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
948
949 allowed -= min_t(long, mm->total_vm / 32, reserve);
950 }
951
952 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
953 return 0;
954 error:
955 vm_unacct_memory(pages);
956
957 return -ENOMEM;
958 }
959
960 /**
961 * get_cmdline() - copy the cmdline value to a buffer.
962 * @task: the task whose cmdline value to copy.
963 * @buffer: the buffer to copy to.
964 * @buflen: the length of the buffer. Larger cmdline values are truncated
965 * to this length.
966 *
967 * Return: the size of the cmdline field copied. Note that the copy does
968 * not guarantee an ending NULL byte.
969 */
get_cmdline(struct task_struct * task,char * buffer,int buflen)970 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
971 {
972 int res = 0;
973 unsigned int len;
974 struct mm_struct *mm = get_task_mm(task);
975 unsigned long arg_start, arg_end, env_start, env_end;
976 if (!mm)
977 goto out;
978 if (!mm->arg_end)
979 goto out_mm; /* Shh! No looking before we're done */
980
981 spin_lock(&mm->arg_lock);
982 arg_start = mm->arg_start;
983 arg_end = mm->arg_end;
984 env_start = mm->env_start;
985 env_end = mm->env_end;
986 spin_unlock(&mm->arg_lock);
987
988 len = arg_end - arg_start;
989
990 if (len > buflen)
991 len = buflen;
992
993 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
994
995 /*
996 * If the nul at the end of args has been overwritten, then
997 * assume application is using setproctitle(3).
998 */
999 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1000 len = strnlen(buffer, res);
1001 if (len < res) {
1002 res = len;
1003 } else {
1004 len = env_end - env_start;
1005 if (len > buflen - res)
1006 len = buflen - res;
1007 res += access_process_vm(task, env_start,
1008 buffer+res, len,
1009 FOLL_FORCE);
1010 res = strnlen(buffer, res);
1011 }
1012 }
1013 out_mm:
1014 mmput(mm);
1015 out:
1016 return res;
1017 }
1018
memcmp_pages(struct page * page1,struct page * page2)1019 int __weak memcmp_pages(struct page *page1, struct page *page2)
1020 {
1021 char *addr1, *addr2;
1022 int ret;
1023
1024 addr1 = kmap_atomic(page1);
1025 addr2 = kmap_atomic(page2);
1026 ret = memcmp(addr1, addr2, PAGE_SIZE);
1027 kunmap_atomic(addr2);
1028 kunmap_atomic(addr1);
1029 return ret;
1030 }
1031