1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2016 ARM Ltd. 4*4882a593Smuzhiyun */ 5*4882a593Smuzhiyun #ifndef __ASM_PGTABLE_PROT_H 6*4882a593Smuzhiyun #define __ASM_PGTABLE_PROT_H 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #include <asm/memory.h> 9*4882a593Smuzhiyun #include <asm/pgtable-hwdef.h> 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/const.h> 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun /* 14*4882a593Smuzhiyun * Software defined PTE bits definition. 15*4882a593Smuzhiyun */ 16*4882a593Smuzhiyun #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */ 17*4882a593Smuzhiyun #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 18*4882a593Smuzhiyun #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 19*4882a593Smuzhiyun #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) 20*4882a593Smuzhiyun #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun /* 23*4882a593Smuzhiyun * This bit indicates that the entry is present i.e. pmd_page() 24*4882a593Smuzhiyun * still points to a valid huge page in memory even if the pmd 25*4882a593Smuzhiyun * has been invalidated. 26*4882a593Smuzhiyun */ 27*4882a593Smuzhiyun #define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */ 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #ifndef __ASSEMBLY__ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #include <asm/cpufeature.h> 32*4882a593Smuzhiyun #include <asm/pgtable-types.h> 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun extern bool arm64_use_ng_mappings; 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 37*4882a593Smuzhiyun #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun #define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) 40*4882a593Smuzhiyun #define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0) 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /* 43*4882a593Smuzhiyun * If we have userspace only BTI we don't want to mark kernel pages 44*4882a593Smuzhiyun * guarded even if the system does support BTI. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun #ifdef CONFIG_ARM64_BTI_KERNEL 47*4882a593Smuzhiyun #define PTE_MAYBE_GP (system_supports_bti() ? PTE_GP : 0) 48*4882a593Smuzhiyun #else 49*4882a593Smuzhiyun #define PTE_MAYBE_GP 0 50*4882a593Smuzhiyun #endif 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) 53*4882a593Smuzhiyun #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 56*4882a593Smuzhiyun #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 57*4882a593Smuzhiyun #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) 58*4882a593Smuzhiyun #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) 59*4882a593Smuzhiyun #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) 60*4882a593Smuzhiyun #define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) 63*4882a593Smuzhiyun #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 64*4882a593Smuzhiyun #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun #define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(PROT_NORMAL) 69*4882a593Smuzhiyun #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) 70*4882a593Smuzhiyun #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) 71*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) 72*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun #define PAGE_S2_MEMATTR(attr, has_fwb) \ 75*4882a593Smuzhiyun ({ \ 76*4882a593Smuzhiyun u64 __val; \ 77*4882a593Smuzhiyun if (has_fwb) \ 78*4882a593Smuzhiyun __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ 79*4882a593Smuzhiyun else \ 80*4882a593Smuzhiyun __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \ 81*4882a593Smuzhiyun __val; \ 82*4882a593Smuzhiyun }) 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 85*4882a593Smuzhiyun /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ 86*4882a593Smuzhiyun #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 87*4882a593Smuzhiyun #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) 88*4882a593Smuzhiyun #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 89*4882a593Smuzhiyun #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun #define __P000 PAGE_NONE 92*4882a593Smuzhiyun #define __P001 PAGE_READONLY 93*4882a593Smuzhiyun #define __P010 PAGE_READONLY 94*4882a593Smuzhiyun #define __P011 PAGE_READONLY 95*4882a593Smuzhiyun #define __P100 PAGE_READONLY_EXEC 96*4882a593Smuzhiyun #define __P101 PAGE_READONLY_EXEC 97*4882a593Smuzhiyun #define __P110 PAGE_READONLY_EXEC 98*4882a593Smuzhiyun #define __P111 PAGE_READONLY_EXEC 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun #define __S000 PAGE_NONE 101*4882a593Smuzhiyun #define __S001 PAGE_READONLY 102*4882a593Smuzhiyun #define __S010 PAGE_SHARED 103*4882a593Smuzhiyun #define __S011 PAGE_SHARED 104*4882a593Smuzhiyun #define __S100 PAGE_READONLY_EXEC 105*4882a593Smuzhiyun #define __S101 PAGE_READONLY_EXEC 106*4882a593Smuzhiyun #define __S110 PAGE_SHARED_EXEC 107*4882a593Smuzhiyun #define __S111 PAGE_SHARED_EXEC 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */ 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun #endif /* __ASM_PGTABLE_PROT_H */ 112