xref: /OK3568_Linux_fs/kernel/arch/x86/mm/kasan_init_64.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #define DISABLE_BRANCH_PROFILING
3*4882a593Smuzhiyun #define pr_fmt(fmt) "kasan: " fmt
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /* cpu_feature_enabled() cannot be used this early */
6*4882a593Smuzhiyun #define USE_EARLY_PGTABLE_L5
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/memblock.h>
9*4882a593Smuzhiyun #include <linux/kasan.h>
10*4882a593Smuzhiyun #include <linux/kdebug.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/sched/task.h>
14*4882a593Smuzhiyun #include <linux/vmalloc.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/e820/types.h>
17*4882a593Smuzhiyun #include <asm/pgalloc.h>
18*4882a593Smuzhiyun #include <asm/tlbflush.h>
19*4882a593Smuzhiyun #include <asm/sections.h>
20*4882a593Smuzhiyun #include <asm/cpu_entry_area.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun extern struct range pfn_mapped[E820_MAX_ENTRIES];
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
25*4882a593Smuzhiyun 
early_alloc(size_t size,int nid,bool should_panic)26*4882a593Smuzhiyun static __init void *early_alloc(size_t size, int nid, bool should_panic)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	void *ptr = memblock_alloc_try_nid(size, size,
29*4882a593Smuzhiyun 			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (!ptr && should_panic)
32*4882a593Smuzhiyun 		panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
33*4882a593Smuzhiyun 		      (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return ptr;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
kasan_populate_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,int nid)38*4882a593Smuzhiyun static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
39*4882a593Smuzhiyun 				      unsigned long end, int nid)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	pte_t *pte;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	if (pmd_none(*pmd)) {
44*4882a593Smuzhiyun 		void *p;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		if (boot_cpu_has(X86_FEATURE_PSE) &&
47*4882a593Smuzhiyun 		    ((end - addr) == PMD_SIZE) &&
48*4882a593Smuzhiyun 		    IS_ALIGNED(addr, PMD_SIZE)) {
49*4882a593Smuzhiyun 			p = early_alloc(PMD_SIZE, nid, false);
50*4882a593Smuzhiyun 			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
51*4882a593Smuzhiyun 				return;
52*4882a593Smuzhiyun 			else if (p)
53*4882a593Smuzhiyun 				memblock_free(__pa(p), PMD_SIZE);
54*4882a593Smuzhiyun 		}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 		p = early_alloc(PAGE_SIZE, nid, true);
57*4882a593Smuzhiyun 		pmd_populate_kernel(&init_mm, pmd, p);
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	pte = pte_offset_kernel(pmd, addr);
61*4882a593Smuzhiyun 	do {
62*4882a593Smuzhiyun 		pte_t entry;
63*4882a593Smuzhiyun 		void *p;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		if (!pte_none(*pte))
66*4882a593Smuzhiyun 			continue;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		p = early_alloc(PAGE_SIZE, nid, true);
69*4882a593Smuzhiyun 		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
70*4882a593Smuzhiyun 		set_pte_at(&init_mm, addr, pte, entry);
71*4882a593Smuzhiyun 	} while (pte++, addr += PAGE_SIZE, addr != end);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
kasan_populate_pud(pud_t * pud,unsigned long addr,unsigned long end,int nid)74*4882a593Smuzhiyun static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
75*4882a593Smuzhiyun 				      unsigned long end, int nid)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	pmd_t *pmd;
78*4882a593Smuzhiyun 	unsigned long next;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (pud_none(*pud)) {
81*4882a593Smuzhiyun 		void *p;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
84*4882a593Smuzhiyun 		    ((end - addr) == PUD_SIZE) &&
85*4882a593Smuzhiyun 		    IS_ALIGNED(addr, PUD_SIZE)) {
86*4882a593Smuzhiyun 			p = early_alloc(PUD_SIZE, nid, false);
87*4882a593Smuzhiyun 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
88*4882a593Smuzhiyun 				return;
89*4882a593Smuzhiyun 			else if (p)
90*4882a593Smuzhiyun 				memblock_free(__pa(p), PUD_SIZE);
91*4882a593Smuzhiyun 		}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		p = early_alloc(PAGE_SIZE, nid, true);
94*4882a593Smuzhiyun 		pud_populate(&init_mm, pud, p);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
98*4882a593Smuzhiyun 	do {
99*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
100*4882a593Smuzhiyun 		if (!pmd_large(*pmd))
101*4882a593Smuzhiyun 			kasan_populate_pmd(pmd, addr, next, nid);
102*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
kasan_populate_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,int nid)105*4882a593Smuzhiyun static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
106*4882a593Smuzhiyun 				      unsigned long end, int nid)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	pud_t *pud;
109*4882a593Smuzhiyun 	unsigned long next;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (p4d_none(*p4d)) {
112*4882a593Smuzhiyun 		void *p = early_alloc(PAGE_SIZE, nid, true);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 		p4d_populate(&init_mm, p4d, p);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
118*4882a593Smuzhiyun 	do {
119*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
120*4882a593Smuzhiyun 		if (!pud_large(*pud))
121*4882a593Smuzhiyun 			kasan_populate_pud(pud, addr, next, nid);
122*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
kasan_populate_pgd(pgd_t * pgd,unsigned long addr,unsigned long end,int nid)125*4882a593Smuzhiyun static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
126*4882a593Smuzhiyun 				      unsigned long end, int nid)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	void *p;
129*4882a593Smuzhiyun 	p4d_t *p4d;
130*4882a593Smuzhiyun 	unsigned long next;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
133*4882a593Smuzhiyun 		p = early_alloc(PAGE_SIZE, nid, true);
134*4882a593Smuzhiyun 		pgd_populate(&init_mm, pgd, p);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
138*4882a593Smuzhiyun 	do {
139*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
140*4882a593Smuzhiyun 		kasan_populate_p4d(p4d, addr, next, nid);
141*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
kasan_populate_shadow(unsigned long addr,unsigned long end,int nid)144*4882a593Smuzhiyun static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
145*4882a593Smuzhiyun 					 int nid)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	pgd_t *pgd;
148*4882a593Smuzhiyun 	unsigned long next;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	addr = addr & PAGE_MASK;
151*4882a593Smuzhiyun 	end = round_up(end, PAGE_SIZE);
152*4882a593Smuzhiyun 	pgd = pgd_offset_k(addr);
153*4882a593Smuzhiyun 	do {
154*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
155*4882a593Smuzhiyun 		kasan_populate_pgd(pgd, addr, next, nid);
156*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
map_range(struct range * range)159*4882a593Smuzhiyun static void __init map_range(struct range *range)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	unsigned long start;
162*4882a593Smuzhiyun 	unsigned long end;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
165*4882a593Smuzhiyun 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
clear_pgds(unsigned long start,unsigned long end)170*4882a593Smuzhiyun static void __init clear_pgds(unsigned long start,
171*4882a593Smuzhiyun 			unsigned long end)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	pgd_t *pgd;
174*4882a593Smuzhiyun 	/* See comment in kasan_init() */
175*4882a593Smuzhiyun 	unsigned long pgd_end = end & PGDIR_MASK;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	for (; start < pgd_end; start += PGDIR_SIZE) {
178*4882a593Smuzhiyun 		pgd = pgd_offset_k(start);
179*4882a593Smuzhiyun 		/*
180*4882a593Smuzhiyun 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
181*4882a593Smuzhiyun 		 * instead.
182*4882a593Smuzhiyun 		 */
183*4882a593Smuzhiyun 		if (pgtable_l5_enabled())
184*4882a593Smuzhiyun 			pgd_clear(pgd);
185*4882a593Smuzhiyun 		else
186*4882a593Smuzhiyun 			p4d_clear(p4d_offset(pgd, start));
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	pgd = pgd_offset_k(start);
190*4882a593Smuzhiyun 	for (; start < end; start += P4D_SIZE)
191*4882a593Smuzhiyun 		p4d_clear(p4d_offset(pgd, start));
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
early_p4d_offset(pgd_t * pgd,unsigned long addr)194*4882a593Smuzhiyun static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	unsigned long p4d;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (!pgtable_l5_enabled())
199*4882a593Smuzhiyun 		return (p4d_t *)pgd;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	p4d = pgd_val(*pgd) & PTE_PFN_MASK;
202*4882a593Smuzhiyun 	p4d += __START_KERNEL_map - phys_base;
203*4882a593Smuzhiyun 	return (p4d_t *)p4d + p4d_index(addr);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
kasan_early_p4d_populate(pgd_t * pgd,unsigned long addr,unsigned long end)206*4882a593Smuzhiyun static void __init kasan_early_p4d_populate(pgd_t *pgd,
207*4882a593Smuzhiyun 		unsigned long addr,
208*4882a593Smuzhiyun 		unsigned long end)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	pgd_t pgd_entry;
211*4882a593Smuzhiyun 	p4d_t *p4d, p4d_entry;
212*4882a593Smuzhiyun 	unsigned long next;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
215*4882a593Smuzhiyun 		pgd_entry = __pgd(_KERNPG_TABLE |
216*4882a593Smuzhiyun 					__pa_nodebug(kasan_early_shadow_p4d));
217*4882a593Smuzhiyun 		set_pgd(pgd, pgd_entry);
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	p4d = early_p4d_offset(pgd, addr);
221*4882a593Smuzhiyun 	do {
222*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		if (!p4d_none(*p4d))
225*4882a593Smuzhiyun 			continue;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		p4d_entry = __p4d(_KERNPG_TABLE |
228*4882a593Smuzhiyun 					__pa_nodebug(kasan_early_shadow_pud));
229*4882a593Smuzhiyun 		set_p4d(p4d, p4d_entry);
230*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
kasan_map_early_shadow(pgd_t * pgd)233*4882a593Smuzhiyun static void __init kasan_map_early_shadow(pgd_t *pgd)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	/* See comment in kasan_init() */
236*4882a593Smuzhiyun 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
237*4882a593Smuzhiyun 	unsigned long end = KASAN_SHADOW_END;
238*4882a593Smuzhiyun 	unsigned long next;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	pgd += pgd_index(addr);
241*4882a593Smuzhiyun 	do {
242*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
243*4882a593Smuzhiyun 		kasan_early_p4d_populate(pgd, addr, next);
244*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
kasan_shallow_populate_p4ds(pgd_t * pgd,unsigned long addr,unsigned long end)247*4882a593Smuzhiyun static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
248*4882a593Smuzhiyun 					       unsigned long addr,
249*4882a593Smuzhiyun 					       unsigned long end)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	p4d_t *p4d;
252*4882a593Smuzhiyun 	unsigned long next;
253*4882a593Smuzhiyun 	void *p;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
256*4882a593Smuzhiyun 	do {
257*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		if (p4d_none(*p4d)) {
260*4882a593Smuzhiyun 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
261*4882a593Smuzhiyun 			p4d_populate(&init_mm, p4d, p);
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
kasan_shallow_populate_pgds(void * start,void * end)266*4882a593Smuzhiyun static void __init kasan_shallow_populate_pgds(void *start, void *end)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	unsigned long addr, next;
269*4882a593Smuzhiyun 	pgd_t *pgd;
270*4882a593Smuzhiyun 	void *p;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	addr = (unsigned long)start;
273*4882a593Smuzhiyun 	pgd = pgd_offset_k(addr);
274*4882a593Smuzhiyun 	do {
275*4882a593Smuzhiyun 		next = pgd_addr_end(addr, (unsigned long)end);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		if (pgd_none(*pgd)) {
278*4882a593Smuzhiyun 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
279*4882a593Smuzhiyun 			pgd_populate(&init_mm, pgd, p);
280*4882a593Smuzhiyun 		}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		/*
283*4882a593Smuzhiyun 		 * we need to populate p4ds to be synced when running in
284*4882a593Smuzhiyun 		 * four level mode - see sync_global_pgds_l4()
285*4882a593Smuzhiyun 		 */
286*4882a593Smuzhiyun 		kasan_shallow_populate_p4ds(pgd, addr, next);
287*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != (unsigned long)end);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
kasan_early_init(void)290*4882a593Smuzhiyun void __init kasan_early_init(void)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	int i;
293*4882a593Smuzhiyun 	pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
294*4882a593Smuzhiyun 				__PAGE_KERNEL | _PAGE_ENC;
295*4882a593Smuzhiyun 	pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
296*4882a593Smuzhiyun 	pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
297*4882a593Smuzhiyun 	p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* Mask out unsupported __PAGE_KERNEL bits: */
300*4882a593Smuzhiyun 	pte_val &= __default_kernel_pte_mask;
301*4882a593Smuzhiyun 	pmd_val &= __default_kernel_pte_mask;
302*4882a593Smuzhiyun 	pud_val &= __default_kernel_pte_mask;
303*4882a593Smuzhiyun 	p4d_val &= __default_kernel_pte_mask;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++)
306*4882a593Smuzhiyun 		kasan_early_shadow_pte[i] = __pte(pte_val);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PMD; i++)
309*4882a593Smuzhiyun 		kasan_early_shadow_pmd[i] = __pmd(pmd_val);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PUD; i++)
312*4882a593Smuzhiyun 		kasan_early_shadow_pud[i] = __pud(pud_val);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
315*4882a593Smuzhiyun 		kasan_early_shadow_p4d[i] = __p4d(p4d_val);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	kasan_map_early_shadow(early_top_pgt);
318*4882a593Smuzhiyun 	kasan_map_early_shadow(init_top_pgt);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
kasan_init(void)321*4882a593Smuzhiyun void __init kasan_init(void)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	int i;
324*4882a593Smuzhiyun 	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * We use the same shadow offset for 4- and 5-level paging to
330*4882a593Smuzhiyun 	 * facilitate boot-time switching between paging modes.
331*4882a593Smuzhiyun 	 * As result in 5-level paging mode KASAN_SHADOW_START and
332*4882a593Smuzhiyun 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
333*4882a593Smuzhiyun 	 *
334*4882a593Smuzhiyun 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
335*4882a593Smuzhiyun 	 * We claim whole PGD entry to make things easier.
336*4882a593Smuzhiyun 	 *
337*4882a593Smuzhiyun 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
338*4882a593Smuzhiyun 	 * bunch of things like kernel code, modules, EFI mapping, etc.
339*4882a593Smuzhiyun 	 * We need to take extra steps to not overwrite them.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 	if (pgtable_l5_enabled()) {
342*4882a593Smuzhiyun 		void *ptr;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
345*4882a593Smuzhiyun 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
346*4882a593Smuzhiyun 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
347*4882a593Smuzhiyun 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	load_cr3(early_top_pgt);
351*4882a593Smuzhiyun 	__flush_tlb_all();
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
356*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
359*4882a593Smuzhiyun 		if (pfn_mapped[i].end == 0)
360*4882a593Smuzhiyun 			break;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		map_range(&pfn_mapped[i]);
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
366*4882a593Smuzhiyun 	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
367*4882a593Smuzhiyun 	shadow_cpu_entry_begin = (void *)round_down(
368*4882a593Smuzhiyun 			(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
371*4882a593Smuzhiyun 					CPU_ENTRY_AREA_MAP_SIZE);
372*4882a593Smuzhiyun 	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
373*4882a593Smuzhiyun 	shadow_cpu_entry_end = (void *)round_up(
374*4882a593Smuzhiyun 			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	kasan_populate_early_shadow(
377*4882a593Smuzhiyun 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
378*4882a593Smuzhiyun 		kasan_mem_to_shadow((void *)VMALLOC_START));
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/*
381*4882a593Smuzhiyun 	 * If we're in full vmalloc mode, don't back vmalloc space with early
382*4882a593Smuzhiyun 	 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
383*4882a593Smuzhiyun 	 * the global table and we can populate the lower levels on demand.
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
386*4882a593Smuzhiyun 		kasan_shallow_populate_pgds(
387*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)VMALLOC_START),
388*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)VMALLOC_END));
389*4882a593Smuzhiyun 	else
390*4882a593Smuzhiyun 		kasan_populate_early_shadow(
391*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)VMALLOC_START),
392*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)VMALLOC_END));
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	kasan_populate_early_shadow(
395*4882a593Smuzhiyun 		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
396*4882a593Smuzhiyun 		shadow_cpu_entry_begin);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
399*4882a593Smuzhiyun 			      (unsigned long)shadow_cpu_entry_end, 0);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	kasan_populate_early_shadow(shadow_cpu_entry_end,
402*4882a593Smuzhiyun 			kasan_mem_to_shadow((void *)__START_KERNEL_map));
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
405*4882a593Smuzhiyun 			      (unsigned long)kasan_mem_to_shadow(_end),
406*4882a593Smuzhiyun 			      early_pfn_to_nid(__pa(_stext)));
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
409*4882a593Smuzhiyun 					(void *)KASAN_SHADOW_END);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	load_cr3(init_top_pgt);
412*4882a593Smuzhiyun 	__flush_tlb_all();
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/*
415*4882a593Smuzhiyun 	 * kasan_early_shadow_page has been used as early shadow memory, thus
416*4882a593Smuzhiyun 	 * it may contain some garbage. Now we can clear and write protect it,
417*4882a593Smuzhiyun 	 * since after the TLB flush no one should write to it.
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
420*4882a593Smuzhiyun 	for (i = 0; i < PTRS_PER_PTE; i++) {
421*4882a593Smuzhiyun 		pte_t pte;
422*4882a593Smuzhiyun 		pgprot_t prot;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
425*4882a593Smuzhiyun 		pgprot_val(prot) &= __default_kernel_pte_mask;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
428*4882a593Smuzhiyun 		set_pte(&kasan_early_shadow_pte[i], pte);
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 	/* Flush TLBs again to be sure that write protection applied. */
431*4882a593Smuzhiyun 	__flush_tlb_all();
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	init_task.kasan_depth = 0;
434*4882a593Smuzhiyun 	pr_info("KernelAddressSanitizer initialized\n");
435*4882a593Smuzhiyun }
436