1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Xtensa KASAN shadow map initialization
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2017 Cadence Design Systems Inc.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/memblock.h>
12*4882a593Smuzhiyun #include <linux/init_task.h>
13*4882a593Smuzhiyun #include <linux/kasan.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <asm/initialize_mmu.h>
16*4882a593Smuzhiyun #include <asm/tlbflush.h>
17*4882a593Smuzhiyun #include <asm/traps.h>
18*4882a593Smuzhiyun
kasan_early_init(void)19*4882a593Smuzhiyun void __init kasan_early_init(void)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun unsigned long vaddr = KASAN_SHADOW_START;
22*4882a593Smuzhiyun pmd_t *pmd = pmd_off_k(vaddr);
23*4882a593Smuzhiyun int i;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PTE; ++i)
26*4882a593Smuzhiyun set_pte(kasan_early_shadow_pte + i,
27*4882a593Smuzhiyun mk_pte(virt_to_page(kasan_early_shadow_page),
28*4882a593Smuzhiyun PAGE_KERNEL));
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
31*4882a593Smuzhiyun BUG_ON(!pmd_none(*pmd));
32*4882a593Smuzhiyun set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun early_trap_init();
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
populate(void * start,void * end)37*4882a593Smuzhiyun static void __init populate(void *start, void *end)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun unsigned long n_pages = (end - start) / PAGE_SIZE;
40*4882a593Smuzhiyun unsigned long n_pmds = n_pages / PTRS_PER_PTE;
41*4882a593Smuzhiyun unsigned long i, j;
42*4882a593Smuzhiyun unsigned long vaddr = (unsigned long)start;
43*4882a593Smuzhiyun pmd_t *pmd = pmd_off_k(vaddr);
44*4882a593Smuzhiyun pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!pte)
47*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
48*4882a593Smuzhiyun __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun pr_debug("%s: %p - %p\n", __func__, start, end);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun for (i = j = 0; i < n_pmds; ++i) {
53*4882a593Smuzhiyun int k;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
56*4882a593Smuzhiyun phys_addr_t phys =
57*4882a593Smuzhiyun memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
58*4882a593Smuzhiyun 0,
59*4882a593Smuzhiyun MEMBLOCK_ALLOC_ANYWHERE);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (!phys)
62*4882a593Smuzhiyun panic("Failed to allocate page table page\n");
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
69*4882a593Smuzhiyun set_pmd(pmd + i, __pmd((unsigned long)pte));
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun local_flush_tlb_all();
72*4882a593Smuzhiyun memset(start, 0, end - start);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
kasan_init(void)75*4882a593Smuzhiyun void __init kasan_init(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int i;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
80*4882a593Smuzhiyun (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
81*4882a593Smuzhiyun BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * Replace shadow map pages that cover addresses from VMALLOC area
85*4882a593Smuzhiyun * start to the end of KSEG with clean writable pages.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun populate(kasan_mem_to_shadow((void *)VMALLOC_START),
88*4882a593Smuzhiyun kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Write protect kasan_early_shadow_page and zero-initialize it again.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PTE; ++i)
94*4882a593Smuzhiyun set_pte(kasan_early_shadow_pte + i,
95*4882a593Smuzhiyun mk_pte(virt_to_page(kasan_early_shadow_page),
96*4882a593Smuzhiyun PAGE_KERNEL_RO));
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun local_flush_tlb_all();
99*4882a593Smuzhiyun memset(kasan_early_shadow_page, 0, PAGE_SIZE);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* At this point kasan is fully initialized. Enable error messages. */
102*4882a593Smuzhiyun current->kasan_depth = 0;
103*4882a593Smuzhiyun pr_info("KernelAddressSanitizer initialized\n");
104*4882a593Smuzhiyun }
105