xref: /OK3568_Linux_fs/kernel/arch/arm64/mm/kasan_init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains kasan initialization code for ARM64.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define pr_fmt(fmt) "kasan: " fmt
10*4882a593Smuzhiyun #include <linux/kasan.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/sched/task.h>
13*4882a593Smuzhiyun #include <linux/memblock.h>
14*4882a593Smuzhiyun #include <linux/start_kernel.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/mmu_context.h>
18*4882a593Smuzhiyun #include <asm/kernel-pgtable.h>
19*4882a593Smuzhiyun #include <asm/page.h>
20*4882a593Smuzhiyun #include <asm/pgalloc.h>
21*4882a593Smuzhiyun #include <asm/sections.h>
22*4882a593Smuzhiyun #include <asm/tlbflush.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30*4882a593Smuzhiyun  * directly on kernel symbols (bm_p*d). All the early functions are called too
31*4882a593Smuzhiyun  * early to use lm_alias so __p*d_populate functions must be used to populate
32*4882a593Smuzhiyun  * with the physical address from __pa_symbol.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
kasan_alloc_zeroed_page(int node)35*4882a593Smuzhiyun static phys_addr_t __init kasan_alloc_zeroed_page(int node)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
38*4882a593Smuzhiyun 					      __pa(MAX_DMA_ADDRESS),
39*4882a593Smuzhiyun 					      MEMBLOCK_ALLOC_KASAN, node);
40*4882a593Smuzhiyun 	if (!p)
41*4882a593Smuzhiyun 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42*4882a593Smuzhiyun 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
43*4882a593Smuzhiyun 		      __pa(MAX_DMA_ADDRESS));
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return __pa(p);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
kasan_alloc_raw_page(int node)48*4882a593Smuzhiyun static phys_addr_t __init kasan_alloc_raw_page(int node)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51*4882a593Smuzhiyun 						__pa(MAX_DMA_ADDRESS),
52*4882a593Smuzhiyun 						MEMBLOCK_ALLOC_KASAN, node);
53*4882a593Smuzhiyun 	if (!p)
54*4882a593Smuzhiyun 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
55*4882a593Smuzhiyun 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
56*4882a593Smuzhiyun 		      __pa(MAX_DMA_ADDRESS));
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return __pa(p);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
kasan_pte_offset(pmd_t * pmdp,unsigned long addr,int node,bool early)61*4882a593Smuzhiyun static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
62*4882a593Smuzhiyun 				      bool early)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	if (pmd_none(READ_ONCE(*pmdp))) {
65*4882a593Smuzhiyun 		phys_addr_t pte_phys = early ?
66*4882a593Smuzhiyun 				__pa_symbol(kasan_early_shadow_pte)
67*4882a593Smuzhiyun 					: kasan_alloc_zeroed_page(node);
68*4882a593Smuzhiyun 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return early ? pte_offset_kimg(pmdp, addr)
72*4882a593Smuzhiyun 		     : pte_offset_kernel(pmdp, addr);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
kasan_pmd_offset(pud_t * pudp,unsigned long addr,int node,bool early)75*4882a593Smuzhiyun static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
76*4882a593Smuzhiyun 				      bool early)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	if (pud_none(READ_ONCE(*pudp))) {
79*4882a593Smuzhiyun 		phys_addr_t pmd_phys = early ?
80*4882a593Smuzhiyun 				__pa_symbol(kasan_early_shadow_pmd)
81*4882a593Smuzhiyun 					: kasan_alloc_zeroed_page(node);
82*4882a593Smuzhiyun 		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
kasan_pud_offset(p4d_t * p4dp,unsigned long addr,int node,bool early)88*4882a593Smuzhiyun static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
89*4882a593Smuzhiyun 				      bool early)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	if (p4d_none(READ_ONCE(*p4dp))) {
92*4882a593Smuzhiyun 		phys_addr_t pud_phys = early ?
93*4882a593Smuzhiyun 				__pa_symbol(kasan_early_shadow_pud)
94*4882a593Smuzhiyun 					: kasan_alloc_zeroed_page(node);
95*4882a593Smuzhiyun 		__p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
kasan_pte_populate(pmd_t * pmdp,unsigned long addr,unsigned long end,int node,bool early)101*4882a593Smuzhiyun static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
102*4882a593Smuzhiyun 				      unsigned long end, int node, bool early)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	unsigned long next;
105*4882a593Smuzhiyun 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	do {
108*4882a593Smuzhiyun 		phys_addr_t page_phys = early ?
109*4882a593Smuzhiyun 				__pa_symbol(kasan_early_shadow_page)
110*4882a593Smuzhiyun 					: kasan_alloc_raw_page(node);
111*4882a593Smuzhiyun 		if (!early)
112*4882a593Smuzhiyun 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
113*4882a593Smuzhiyun 		next = addr + PAGE_SIZE;
114*4882a593Smuzhiyun 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
115*4882a593Smuzhiyun 	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
kasan_pmd_populate(pud_t * pudp,unsigned long addr,unsigned long end,int node,bool early)118*4882a593Smuzhiyun static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
119*4882a593Smuzhiyun 				      unsigned long end, int node, bool early)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	unsigned long next;
122*4882a593Smuzhiyun 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	do {
125*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
126*4882a593Smuzhiyun 		kasan_pte_populate(pmdp, addr, next, node, early);
127*4882a593Smuzhiyun 	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
kasan_pud_populate(p4d_t * p4dp,unsigned long addr,unsigned long end,int node,bool early)130*4882a593Smuzhiyun static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
131*4882a593Smuzhiyun 				      unsigned long end, int node, bool early)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	unsigned long next;
134*4882a593Smuzhiyun 	pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	do {
137*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
138*4882a593Smuzhiyun 		kasan_pmd_populate(pudp, addr, next, node, early);
139*4882a593Smuzhiyun 	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
kasan_p4d_populate(pgd_t * pgdp,unsigned long addr,unsigned long end,int node,bool early)142*4882a593Smuzhiyun static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
143*4882a593Smuzhiyun 				      unsigned long end, int node, bool early)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	unsigned long next;
146*4882a593Smuzhiyun 	p4d_t *p4dp = p4d_offset(pgdp, addr);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	do {
149*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
150*4882a593Smuzhiyun 		kasan_pud_populate(p4dp, addr, next, node, early);
151*4882a593Smuzhiyun 	} while (p4dp++, addr = next, addr != end);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
kasan_pgd_populate(unsigned long addr,unsigned long end,int node,bool early)154*4882a593Smuzhiyun static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
155*4882a593Smuzhiyun 				      int node, bool early)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	unsigned long next;
158*4882a593Smuzhiyun 	pgd_t *pgdp;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	pgdp = pgd_offset_k(addr);
161*4882a593Smuzhiyun 	do {
162*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
163*4882a593Smuzhiyun 		kasan_p4d_populate(pgdp, addr, next, node, early);
164*4882a593Smuzhiyun 	} while (pgdp++, addr = next, addr != end);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* The early shadow maps everything to a single page of zeroes */
kasan_early_init(void)168*4882a593Smuzhiyun asmlinkage void __init kasan_early_init(void)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
171*4882a593Smuzhiyun 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
172*4882a593Smuzhiyun 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
173*4882a593Smuzhiyun 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
174*4882a593Smuzhiyun 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
175*4882a593Smuzhiyun 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
176*4882a593Smuzhiyun 			   true);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
kasan_map_populate(unsigned long start,unsigned long end,int node)180*4882a593Smuzhiyun static void __init kasan_map_populate(unsigned long start, unsigned long end,
181*4882a593Smuzhiyun 				      int node)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun  * Copy the current shadow region into a new pgdir.
188*4882a593Smuzhiyun  */
kasan_copy_shadow(pgd_t * pgdir)189*4882a593Smuzhiyun void __init kasan_copy_shadow(pgd_t *pgdir)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	pgd_t *pgdp, *pgdp_new, *pgdp_end;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	pgdp = pgd_offset_k(KASAN_SHADOW_START);
194*4882a593Smuzhiyun 	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
195*4882a593Smuzhiyun 	pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
196*4882a593Smuzhiyun 	do {
197*4882a593Smuzhiyun 		set_pgd(pgdp_new, READ_ONCE(*pgdp));
198*4882a593Smuzhiyun 	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
clear_pgds(unsigned long start,unsigned long end)201*4882a593Smuzhiyun static void __init clear_pgds(unsigned long start,
202*4882a593Smuzhiyun 			unsigned long end)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Remove references to kasan page tables from
206*4882a593Smuzhiyun 	 * swapper_pg_dir. pgd_clear() can't be used
207*4882a593Smuzhiyun 	 * here because it's nop on 2,3-level pagetable setups
208*4882a593Smuzhiyun 	 */
209*4882a593Smuzhiyun 	for (; start < end; start += PGDIR_SIZE)
210*4882a593Smuzhiyun 		set_pgd(pgd_offset_k(start), __pgd(0));
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
kasan_init_shadow(void)213*4882a593Smuzhiyun static void __init kasan_init_shadow(void)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	u64 kimg_shadow_start, kimg_shadow_end;
216*4882a593Smuzhiyun 	u64 mod_shadow_start, mod_shadow_end;
217*4882a593Smuzhiyun 	u64 vmalloc_shadow_end;
218*4882a593Smuzhiyun 	phys_addr_t pa_start, pa_end;
219*4882a593Smuzhiyun 	u64 i;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
222*4882a593Smuzhiyun 	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
225*4882a593Smuzhiyun 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/*
230*4882a593Smuzhiyun 	 * We are going to perform proper setup of shadow memory.
231*4882a593Smuzhiyun 	 * At first we should unmap early shadow (clear_pgds() call below).
232*4882a593Smuzhiyun 	 * However, instrumented code couldn't execute without shadow memory.
233*4882a593Smuzhiyun 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
234*4882a593Smuzhiyun 	 * setup will be finished.
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
237*4882a593Smuzhiyun 	dsb(ishst);
238*4882a593Smuzhiyun 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
243*4882a593Smuzhiyun 			   early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
246*4882a593Smuzhiyun 				   (void *)mod_shadow_start);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
249*4882a593Smuzhiyun 		BUILD_BUG_ON(VMALLOC_START != MODULES_END);
250*4882a593Smuzhiyun 		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
251*4882a593Smuzhiyun 					    (void *)KASAN_SHADOW_END);
252*4882a593Smuzhiyun 	} else {
253*4882a593Smuzhiyun 		kasan_populate_early_shadow((void *)kimg_shadow_end,
254*4882a593Smuzhiyun 					    (void *)KASAN_SHADOW_END);
255*4882a593Smuzhiyun 		if (kimg_shadow_start > mod_shadow_end)
256*4882a593Smuzhiyun 			kasan_populate_early_shadow((void *)mod_shadow_end,
257*4882a593Smuzhiyun 						    (void *)kimg_shadow_start);
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	for_each_mem_range(i, &pa_start, &pa_end) {
261*4882a593Smuzhiyun 		void *start = (void *)__phys_to_virt(pa_start);
262*4882a593Smuzhiyun 		void *end = (void *)__phys_to_virt(pa_end);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		if (start >= end)
265*4882a593Smuzhiyun 			break;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
268*4882a593Smuzhiyun 				   (unsigned long)kasan_mem_to_shadow(end),
269*4882a593Smuzhiyun 				   early_pfn_to_nid(virt_to_pfn(start)));
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/*
273*4882a593Smuzhiyun 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
274*4882a593Smuzhiyun 	 * so we should make sure that it maps the zero page read-only.
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++)
277*4882a593Smuzhiyun 		set_pte(&kasan_early_shadow_pte[i],
278*4882a593Smuzhiyun 			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
279*4882a593Smuzhiyun 				PAGE_KERNEL_RO));
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
282*4882a593Smuzhiyun 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
kasan_init_depth(void)285*4882a593Smuzhiyun static void __init kasan_init_depth(void)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	init_task.kasan_depth = 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
kasan_init(void)290*4882a593Smuzhiyun void __init kasan_init(void)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	kasan_init_shadow();
293*4882a593Smuzhiyun 	kasan_init_depth();
294*4882a593Smuzhiyun #if defined(CONFIG_KASAN_GENERIC)
295*4882a593Smuzhiyun 	/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
296*4882a593Smuzhiyun 	pr_info("KernelAddressSanitizer initialized\n");
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
301