1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Kernel page table mapping 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2015 ARM Ltd. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef __ASM_KERNEL_PGTABLE_H 9*4882a593Smuzhiyun #define __ASM_KERNEL_PGTABLE_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <asm/pgtable-hwdef.h> 12*4882a593Smuzhiyun #include <asm/sparsemem.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * The linear mapping and the start of memory are both 2M aligned (per 16*4882a593Smuzhiyun * the arm64 booting.txt requirements). Hence we can use section mapping 17*4882a593Smuzhiyun * with 4K (section size = 2M) but not with 16K (section size = 32M) or 18*4882a593Smuzhiyun * 64K (section size = 512M). 19*4882a593Smuzhiyun */ 20*4882a593Smuzhiyun #ifdef CONFIG_ARM64_4K_PAGES 21*4882a593Smuzhiyun #define ARM64_SWAPPER_USES_SECTION_MAPS 1 22*4882a593Smuzhiyun #else 23*4882a593Smuzhiyun #define ARM64_SWAPPER_USES_SECTION_MAPS 0 24*4882a593Smuzhiyun #endif 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun /* 27*4882a593Smuzhiyun * The idmap and swapper page tables need some space reserved in the kernel 28*4882a593Smuzhiyun * image. Both require pgd, pud (4 levels only) and pmd tables to (section) 29*4882a593Smuzhiyun * map the kernel. With the 64K page configuration, swapper and idmap need to 30*4882a593Smuzhiyun * map to pte level. The swapper also maps the FDT (see __create_page_tables 31*4882a593Smuzhiyun * for more information). Note that the number of ID map translation levels 32*4882a593Smuzhiyun * could be increased on the fly if system RAM is out of reach for the default 33*4882a593Smuzhiyun * VA range, so pages required to map highest possible PA are reserved in all 34*4882a593Smuzhiyun * cases. 35*4882a593Smuzhiyun */ 36*4882a593Smuzhiyun #if ARM64_SWAPPER_USES_SECTION_MAPS 37*4882a593Smuzhiyun #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) 38*4882a593Smuzhiyun #define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1) 39*4882a593Smuzhiyun #else 40*4882a593Smuzhiyun #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) 41*4882a593Smuzhiyun #define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT)) 42*4882a593Smuzhiyun #endif 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun /* 46*4882a593Smuzhiyun * If KASLR is enabled, then an offset K is added to the kernel address 47*4882a593Smuzhiyun * space. The bottom 21 bits of this offset are zero to guarantee 2MB 48*4882a593Smuzhiyun * alignment for PA and VA. 49*4882a593Smuzhiyun * 50*4882a593Smuzhiyun * For each pagetable level of the swapper, we know that the shift will 51*4882a593Smuzhiyun * be larger than 21 (for the 4KB granule case we use section maps thus 52*4882a593Smuzhiyun * the smallest shift is actually 30) thus there is the possibility that 53*4882a593Smuzhiyun * KASLR can increase the number of pagetable entries by 1, so we make 54*4882a593Smuzhiyun * room for this extra entry. 55*4882a593Smuzhiyun * 56*4882a593Smuzhiyun * Note KASLR cannot increase the number of required entries for a level 57*4882a593Smuzhiyun * by more than one because it increments both the virtual start and end 58*4882a593Smuzhiyun * addresses equally (the extra entry comes from the case where the end 59*4882a593Smuzhiyun * address is just pushed over a boundary and the start address isn't). 60*4882a593Smuzhiyun */ 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun #ifdef CONFIG_RANDOMIZE_BASE 63*4882a593Smuzhiyun #define EARLY_KASLR (1) 64*4882a593Smuzhiyun #else 65*4882a593Smuzhiyun #define EARLY_KASLR (0) 66*4882a593Smuzhiyun #endif 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun #define EARLY_ENTRIES(vstart, vend, shift) \ 69*4882a593Smuzhiyun ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun #if SWAPPER_PGTABLE_LEVELS > 3 74*4882a593Smuzhiyun #define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT)) 75*4882a593Smuzhiyun #else 76*4882a593Smuzhiyun #define EARLY_PUDS(vstart, vend) (0) 77*4882a593Smuzhiyun #endif 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun #if SWAPPER_PGTABLE_LEVELS > 2 80*4882a593Smuzhiyun #define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT)) 81*4882a593Smuzhiyun #else 82*4882a593Smuzhiyun #define EARLY_PMDS(vstart, vend) (0) 83*4882a593Smuzhiyun #endif 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun #define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \ 86*4882a593Smuzhiyun + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ 87*4882a593Smuzhiyun + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ 88*4882a593Smuzhiyun + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ 89*4882a593Smuzhiyun #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) 90*4882a593Smuzhiyun #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun /* Initial memory map size */ 93*4882a593Smuzhiyun #if ARM64_SWAPPER_USES_SECTION_MAPS 94*4882a593Smuzhiyun #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT 95*4882a593Smuzhiyun #define SWAPPER_BLOCK_SIZE SECTION_SIZE 96*4882a593Smuzhiyun #define SWAPPER_TABLE_SHIFT PUD_SHIFT 97*4882a593Smuzhiyun #else 98*4882a593Smuzhiyun #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT 99*4882a593Smuzhiyun #define SWAPPER_BLOCK_SIZE PAGE_SIZE 100*4882a593Smuzhiyun #define SWAPPER_TABLE_SHIFT PMD_SHIFT 101*4882a593Smuzhiyun #endif 102*4882a593Smuzhiyun 103*4882a593Smuzhiyun /* The size of the initial kernel direct mapping */ 104*4882a593Smuzhiyun #define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT) 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun /* 107*4882a593Smuzhiyun * Initial memory map attributes. 108*4882a593Smuzhiyun */ 109*4882a593Smuzhiyun #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 110*4882a593Smuzhiyun #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun #if ARM64_SWAPPER_USES_SECTION_MAPS 113*4882a593Smuzhiyun #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) 114*4882a593Smuzhiyun #else 115*4882a593Smuzhiyun #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) 116*4882a593Smuzhiyun #endif 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun /* 119*4882a593Smuzhiyun * To make optimal use of block mappings when laying out the linear 120*4882a593Smuzhiyun * mapping, round down the base of physical memory to a size that can 121*4882a593Smuzhiyun * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE 122*4882a593Smuzhiyun * (64k granule), or a multiple that can be mapped using contiguous bits 123*4882a593Smuzhiyun * in the page tables: 32 * PMD_SIZE (16k granule) 124*4882a593Smuzhiyun */ 125*4882a593Smuzhiyun #if defined(CONFIG_ARM64_4K_PAGES) 126*4882a593Smuzhiyun #define ARM64_MEMSTART_SHIFT PUD_SHIFT 127*4882a593Smuzhiyun #elif defined(CONFIG_ARM64_16K_PAGES) 128*4882a593Smuzhiyun #define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5) 129*4882a593Smuzhiyun #else 130*4882a593Smuzhiyun #define ARM64_MEMSTART_SHIFT PMD_SHIFT 131*4882a593Smuzhiyun #endif 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun /* 134*4882a593Smuzhiyun * sparsemem vmemmap imposes an additional requirement on the alignment of 135*4882a593Smuzhiyun * memstart_addr, due to the fact that the base of the vmemmap region 136*4882a593Smuzhiyun * has a direct correspondence, and needs to appear sufficiently aligned 137*4882a593Smuzhiyun * in the virtual address space. 138*4882a593Smuzhiyun */ 139*4882a593Smuzhiyun #if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS 140*4882a593Smuzhiyun #define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) 141*4882a593Smuzhiyun #else 142*4882a593Smuzhiyun #define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) 143*4882a593Smuzhiyun #endif 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun #endif /* __ASM_KERNEL_PGTABLE_H */ 146