1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #define DISABLE_BRANCH_PROFILING
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kasan.h>
6*4882a593Smuzhiyun #include <linux/memblock.h>
7*4882a593Smuzhiyun #include <mm/mmu_decl.h>
8*4882a593Smuzhiyun
kasan_init_region(void * start,size_t size)9*4882a593Smuzhiyun int __init kasan_init_region(void *start, size_t size)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
12*4882a593Smuzhiyun unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
13*4882a593Smuzhiyun unsigned long k_nobat = k_start;
14*4882a593Smuzhiyun unsigned long k_cur;
15*4882a593Smuzhiyun phys_addr_t phys;
16*4882a593Smuzhiyun int ret;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun while (k_nobat < k_end) {
19*4882a593Smuzhiyun unsigned int k_size = bat_block_size(k_nobat, k_end);
20*4882a593Smuzhiyun int idx = find_free_bat();
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun if (idx == -1)
23*4882a593Smuzhiyun break;
24*4882a593Smuzhiyun if (k_size < SZ_128K)
25*4882a593Smuzhiyun break;
26*4882a593Smuzhiyun phys = memblock_phys_alloc_range(k_size, k_size, 0,
27*4882a593Smuzhiyun MEMBLOCK_ALLOC_ANYWHERE);
28*4882a593Smuzhiyun if (!phys)
29*4882a593Smuzhiyun break;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
32*4882a593Smuzhiyun k_nobat += k_size;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun if (k_nobat != k_start)
35*4882a593Smuzhiyun update_bats();
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (k_nobat < k_end) {
38*4882a593Smuzhiyun phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
39*4882a593Smuzhiyun MEMBLOCK_ALLOC_ANYWHERE);
40*4882a593Smuzhiyun if (!phys)
41*4882a593Smuzhiyun return -ENOMEM;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun ret = kasan_init_shadow_page_tables(k_start, k_end);
45*4882a593Smuzhiyun if (ret)
46*4882a593Smuzhiyun return ret;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun kasan_update_early_region(k_start, k_nobat, __pte(0));
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
51*4882a593Smuzhiyun pmd_t *pmd = pmd_off_k(k_cur);
52*4882a593Smuzhiyun pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun flush_tlb_kernel_range(k_start, k_end);
57*4882a593Smuzhiyun memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun }
61