1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/kernel.h>
3*4882a593Smuzhiyun #include <linux/string.h>
4*4882a593Smuzhiyun #include <linux/mm.h>
5*4882a593Smuzhiyun #include <linux/highmem.h>
6*4882a593Smuzhiyun #include <linux/page_ext.h>
7*4882a593Smuzhiyun #include <linux/poison.h>
8*4882a593Smuzhiyun #include <linux/ratelimit.h>
9*4882a593Smuzhiyun #include <linux/kasan.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun bool _page_poisoning_enabled_early;
12*4882a593Smuzhiyun EXPORT_SYMBOL(_page_poisoning_enabled_early);
13*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
14*4882a593Smuzhiyun EXPORT_SYMBOL(_page_poisoning_enabled);
15*4882a593Smuzhiyun
early_page_poison_param(char * buf)16*4882a593Smuzhiyun static int __init early_page_poison_param(char *buf)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun return kstrtobool(buf, &_page_poisoning_enabled_early);
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun early_param("page_poison", early_page_poison_param);
21*4882a593Smuzhiyun
poison_page(struct page * page)22*4882a593Smuzhiyun static void poison_page(struct page *page)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun void *addr = kmap_atomic(page);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* KASAN still think the page is in-use, so skip it. */
27*4882a593Smuzhiyun kasan_disable_current();
28*4882a593Smuzhiyun memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
29*4882a593Smuzhiyun kasan_enable_current();
30*4882a593Smuzhiyun kunmap_atomic(addr);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
__kernel_poison_pages(struct page * page,int n)33*4882a593Smuzhiyun void __kernel_poison_pages(struct page *page, int n)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun int i;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun for (i = 0; i < n; i++)
38*4882a593Smuzhiyun poison_page(page + i);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
single_bit_flip(unsigned char a,unsigned char b)41*4882a593Smuzhiyun static bool single_bit_flip(unsigned char a, unsigned char b)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned char error = a ^ b;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return error && !(error & (error - 1));
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
check_poison_mem(unsigned char * mem,size_t bytes)48*4882a593Smuzhiyun static void check_poison_mem(unsigned char *mem, size_t bytes)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
51*4882a593Smuzhiyun unsigned char *start;
52*4882a593Smuzhiyun unsigned char *end;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun start = memchr_inv(mem, PAGE_POISON, bytes);
55*4882a593Smuzhiyun if (!start)
56*4882a593Smuzhiyun return;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun for (end = mem + bytes - 1; end > start; end--) {
59*4882a593Smuzhiyun if (*end != PAGE_POISON)
60*4882a593Smuzhiyun break;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (!__ratelimit(&ratelimit))
64*4882a593Smuzhiyun return;
65*4882a593Smuzhiyun else if (start == end && single_bit_flip(*start, PAGE_POISON))
66*4882a593Smuzhiyun pr_err("pagealloc: single bit error\n");
67*4882a593Smuzhiyun else
68*4882a593Smuzhiyun pr_err("pagealloc: memory corruption\n");
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
71*4882a593Smuzhiyun end - start + 1, 1);
72*4882a593Smuzhiyun dump_stack();
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
unpoison_page(struct page * page)75*4882a593Smuzhiyun static void unpoison_page(struct page *page)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun void *addr;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun addr = kmap_atomic(page);
80*4882a593Smuzhiyun kasan_disable_current();
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Page poisoning when enabled poisons each and every page
83*4882a593Smuzhiyun * that is freed to buddy. Thus no extra check is done to
84*4882a593Smuzhiyun * see if a page was poisoned.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun check_poison_mem(kasan_reset_tag(addr), PAGE_SIZE);
87*4882a593Smuzhiyun kasan_enable_current();
88*4882a593Smuzhiyun kunmap_atomic(addr);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
__kernel_unpoison_pages(struct page * page,int n)91*4882a593Smuzhiyun void __kernel_unpoison_pages(struct page *page, int n)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun int i;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun for (i = 0; i < n; i++)
96*4882a593Smuzhiyun unpoison_page(page + i);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
__kernel_map_pages(struct page * page,int numpages,int enable)100*4882a593Smuzhiyun void __kernel_map_pages(struct page *page, int numpages, int enable)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun /* This function does nothing, all work is done via poison pages */
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105