1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * Based on arch/arm/mm/proc.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2001 Deep Blue Solutions Ltd. 6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd. 7*4882a593Smuzhiyun * Author: Catalin Marinas <catalin.marinas@arm.com> 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun#include <linux/init.h> 11*4882a593Smuzhiyun#include <linux/linkage.h> 12*4882a593Smuzhiyun#include <linux/pgtable.h> 13*4882a593Smuzhiyun#include <asm/assembler.h> 14*4882a593Smuzhiyun#include <asm/asm-offsets.h> 15*4882a593Smuzhiyun#include <asm/asm_pointer_auth.h> 16*4882a593Smuzhiyun#include <asm/hwcap.h> 17*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 18*4882a593Smuzhiyun#include <asm/cpufeature.h> 19*4882a593Smuzhiyun#include <asm/alternative.h> 20*4882a593Smuzhiyun#include <asm/smp.h> 21*4882a593Smuzhiyun#include <asm/sysreg.h> 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun#ifdef CONFIG_ARM64_64K_PAGES 24*4882a593Smuzhiyun#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 25*4882a593Smuzhiyun#elif defined(CONFIG_ARM64_16K_PAGES) 26*4882a593Smuzhiyun#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 27*4882a593Smuzhiyun#else /* CONFIG_ARM64_4K_PAGES */ 28*4882a593Smuzhiyun#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 29*4882a593Smuzhiyun#endif 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun#ifdef CONFIG_RANDOMIZE_BASE 32*4882a593Smuzhiyun#define TCR_KASLR_FLAGS TCR_NFD1 33*4882a593Smuzhiyun#else 34*4882a593Smuzhiyun#define TCR_KASLR_FLAGS 0 35*4882a593Smuzhiyun#endif 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun#define TCR_SMP_FLAGS TCR_SHARED 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun/* PTWs cacheable, inner/outer WBWA */ 40*4882a593Smuzhiyun#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun#ifdef CONFIG_KASAN_SW_TAGS 43*4882a593Smuzhiyun#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 44*4882a593Smuzhiyun#else 45*4882a593Smuzhiyun#define TCR_KASAN_SW_FLAGS 0 46*4882a593Smuzhiyun#endif 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun#ifdef CONFIG_KASAN_HW_TAGS 49*4882a593Smuzhiyun#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 50*4882a593Smuzhiyun#else 51*4882a593Smuzhiyun/* 52*4882a593Smuzhiyun * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on 53*4882a593Smuzhiyun * TBI being enabled at EL1. 54*4882a593Smuzhiyun */ 55*4882a593Smuzhiyun#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 56*4882a593Smuzhiyun#endif 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun/* 59*4882a593Smuzhiyun * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and 60*4882a593Smuzhiyun * changed during __cpu_setup to Normal Tagged if the system supports MTE. 61*4882a593Smuzhiyun */ 62*4882a593Smuzhiyun#define MAIR_EL1_SET \ 63*4882a593Smuzhiyun (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 64*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 65*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 66*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 67*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 68*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ 69*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED) | \ 70*4882a593Smuzhiyun MAIR_ATTRIDX(MAIR_ATTR_NORMAL_iNC_oWB, MT_NORMAL_iNC_oWB)) 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun#ifdef CONFIG_CPU_PM 73*4882a593Smuzhiyun/** 74*4882a593Smuzhiyun * cpu_do_suspend - save CPU registers context 75*4882a593Smuzhiyun * 76*4882a593Smuzhiyun * x0: virtual address of context pointer 77*4882a593Smuzhiyun * 78*4882a593Smuzhiyun * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. 79*4882a593Smuzhiyun */ 80*4882a593SmuzhiyunSYM_FUNC_START(cpu_do_suspend) 81*4882a593Smuzhiyun mrs x2, tpidr_el0 82*4882a593Smuzhiyun mrs x3, tpidrro_el0 83*4882a593Smuzhiyun mrs x4, contextidr_el1 84*4882a593Smuzhiyun mrs x5, osdlr_el1 85*4882a593Smuzhiyun mrs x6, cpacr_el1 86*4882a593Smuzhiyun mrs x7, tcr_el1 87*4882a593Smuzhiyun mrs x8, vbar_el1 88*4882a593Smuzhiyun mrs x9, mdscr_el1 89*4882a593Smuzhiyun mrs x10, oslsr_el1 90*4882a593Smuzhiyun mrs x11, sctlr_el1 91*4882a593Smuzhiyunalternative_if_not ARM64_HAS_VIRT_HOST_EXTN 92*4882a593Smuzhiyun mrs x12, tpidr_el1 93*4882a593Smuzhiyunalternative_else 94*4882a593Smuzhiyun mrs x12, tpidr_el2 95*4882a593Smuzhiyunalternative_endif 96*4882a593Smuzhiyun mrs x13, sp_el0 97*4882a593Smuzhiyun stp x2, x3, [x0] 98*4882a593Smuzhiyun stp x4, x5, [x0, #16] 99*4882a593Smuzhiyun stp x6, x7, [x0, #32] 100*4882a593Smuzhiyun stp x8, x9, [x0, #48] 101*4882a593Smuzhiyun stp x10, x11, [x0, #64] 102*4882a593Smuzhiyun stp x12, x13, [x0, #80] 103*4882a593Smuzhiyun /* 104*4882a593Smuzhiyun * Save x18 as it may be used as a platform register, e.g. by shadow 105*4882a593Smuzhiyun * call stack. 106*4882a593Smuzhiyun */ 107*4882a593Smuzhiyun str x18, [x0, #96] 108*4882a593Smuzhiyun ret 109*4882a593SmuzhiyunSYM_FUNC_END(cpu_do_suspend) 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun/** 112*4882a593Smuzhiyun * cpu_do_resume - restore CPU register context 113*4882a593Smuzhiyun * 114*4882a593Smuzhiyun * x0: Address of context pointer 115*4882a593Smuzhiyun */ 116*4882a593Smuzhiyun .pushsection ".idmap.text", "awx" 117*4882a593SmuzhiyunSYM_FUNC_START(cpu_do_resume) 118*4882a593Smuzhiyun ldp x2, x3, [x0] 119*4882a593Smuzhiyun ldp x4, x5, [x0, #16] 120*4882a593Smuzhiyun ldp x6, x8, [x0, #32] 121*4882a593Smuzhiyun ldp x9, x10, [x0, #48] 122*4882a593Smuzhiyun ldp x11, x12, [x0, #64] 123*4882a593Smuzhiyun ldp x13, x14, [x0, #80] 124*4882a593Smuzhiyun /* 125*4882a593Smuzhiyun * Restore x18, as it may be used as a platform register, and clear 126*4882a593Smuzhiyun * the buffer to minimize the risk of exposure when used for shadow 127*4882a593Smuzhiyun * call stack. 128*4882a593Smuzhiyun */ 129*4882a593Smuzhiyun ldr x18, [x0, #96] 130*4882a593Smuzhiyun str xzr, [x0, #96] 131*4882a593Smuzhiyun msr tpidr_el0, x2 132*4882a593Smuzhiyun msr tpidrro_el0, x3 133*4882a593Smuzhiyun msr contextidr_el1, x4 134*4882a593Smuzhiyun msr cpacr_el1, x6 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun /* Don't change t0sz here, mask those bits when restoring */ 137*4882a593Smuzhiyun mrs x7, tcr_el1 138*4882a593Smuzhiyun bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun msr tcr_el1, x8 141*4882a593Smuzhiyun msr vbar_el1, x9 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun /* 144*4882a593Smuzhiyun * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking 145*4882a593Smuzhiyun * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug 146*4882a593Smuzhiyun * exception. Mask them until local_daif_restore() in cpu_suspend() 147*4882a593Smuzhiyun * resets them. 148*4882a593Smuzhiyun */ 149*4882a593Smuzhiyun disable_daif 150*4882a593Smuzhiyun msr mdscr_el1, x10 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun msr sctlr_el1, x12 153*4882a593Smuzhiyunalternative_if_not ARM64_HAS_VIRT_HOST_EXTN 154*4882a593Smuzhiyun msr tpidr_el1, x13 155*4882a593Smuzhiyunalternative_else 156*4882a593Smuzhiyun msr tpidr_el2, x13 157*4882a593Smuzhiyunalternative_endif 158*4882a593Smuzhiyun msr sp_el0, x14 159*4882a593Smuzhiyun /* 160*4882a593Smuzhiyun * Restore oslsr_el1 by writing oslar_el1 161*4882a593Smuzhiyun */ 162*4882a593Smuzhiyun msr osdlr_el1, x5 163*4882a593Smuzhiyun ubfx x11, x11, #1, #1 164*4882a593Smuzhiyun msr oslar_el1, x11 165*4882a593Smuzhiyun reset_pmuserenr_el0 x0 // Disable PMU access from EL0 166*4882a593Smuzhiyun reset_amuserenr_el0 x0 // Disable AMU access from EL0 167*4882a593Smuzhiyun 168*4882a593Smuzhiyunalternative_if ARM64_HAS_RAS_EXTN 169*4882a593Smuzhiyun msr_s SYS_DISR_EL1, xzr 170*4882a593Smuzhiyunalternative_else_nop_endif 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 173*4882a593Smuzhiyun isb 174*4882a593Smuzhiyun ret 175*4882a593SmuzhiyunSYM_FUNC_END(cpu_do_resume) 176*4882a593Smuzhiyun .popsection 177*4882a593Smuzhiyun#endif 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun .pushsection ".idmap.text", "awx" 180*4882a593Smuzhiyun 181*4882a593Smuzhiyun.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 182*4882a593Smuzhiyun adrp \tmp1, reserved_pg_dir 183*4882a593Smuzhiyun phys_to_ttbr \tmp2, \tmp1 184*4882a593Smuzhiyun offset_ttbr1 \tmp2, \tmp1 185*4882a593Smuzhiyun msr ttbr1_el1, \tmp2 186*4882a593Smuzhiyun isb 187*4882a593Smuzhiyun tlbi vmalle1 188*4882a593Smuzhiyun dsb nsh 189*4882a593Smuzhiyun isb 190*4882a593Smuzhiyun.endm 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun/* 193*4882a593Smuzhiyun * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) 194*4882a593Smuzhiyun * 195*4882a593Smuzhiyun * This is the low-level counterpart to cpu_replace_ttbr1, and should not be 196*4882a593Smuzhiyun * called by anything else. It can only be executed from a TTBR0 mapping. 197*4882a593Smuzhiyun */ 198*4882a593SmuzhiyunSYM_FUNC_START(idmap_cpu_replace_ttbr1) 199*4882a593Smuzhiyun save_and_disable_daif flags=x2 200*4882a593Smuzhiyun 201*4882a593Smuzhiyun __idmap_cpu_set_reserved_ttbr1 x1, x3 202*4882a593Smuzhiyun 203*4882a593Smuzhiyun offset_ttbr1 x0, x3 204*4882a593Smuzhiyun msr ttbr1_el1, x0 205*4882a593Smuzhiyun isb 206*4882a593Smuzhiyun 207*4882a593Smuzhiyun restore_daif x2 208*4882a593Smuzhiyun 209*4882a593Smuzhiyun ret 210*4882a593SmuzhiyunSYM_FUNC_END(idmap_cpu_replace_ttbr1) 211*4882a593Smuzhiyun .popsection 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 214*4882a593Smuzhiyun .pushsection ".idmap.text", "awx" 215*4882a593Smuzhiyun 216*4882a593Smuzhiyun .macro __idmap_kpti_get_pgtable_ent, type 217*4882a593Smuzhiyun dc cvac, cur_\()\type\()p // Ensure any existing dirty 218*4882a593Smuzhiyun dmb sy // lines are written back before 219*4882a593Smuzhiyun ldr \type, [cur_\()\type\()p] // loading the entry 220*4882a593Smuzhiyun tbz \type, #0, skip_\()\type // Skip invalid and 221*4882a593Smuzhiyun tbnz \type, #11, skip_\()\type // non-global entries 222*4882a593Smuzhiyun .endm 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun .macro __idmap_kpti_put_pgtable_ent_ng, type 225*4882a593Smuzhiyun orr \type, \type, #PTE_NG // Same bit for blocks and pages 226*4882a593Smuzhiyun str \type, [cur_\()\type\()p] // Update the entry and ensure 227*4882a593Smuzhiyun dmb sy // that it is visible to all 228*4882a593Smuzhiyun dc civac, cur_\()\type\()p // CPUs. 229*4882a593Smuzhiyun .endm 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun/* 232*4882a593Smuzhiyun * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) 233*4882a593Smuzhiyun * 234*4882a593Smuzhiyun * Called exactly once from stop_machine context by each CPU found during boot. 235*4882a593Smuzhiyun */ 236*4882a593Smuzhiyun__idmap_kpti_flag: 237*4882a593Smuzhiyun .long 1 238*4882a593SmuzhiyunSYM_FUNC_START(idmap_kpti_install_ng_mappings) 239*4882a593Smuzhiyun cpu .req w0 240*4882a593Smuzhiyun num_cpus .req w1 241*4882a593Smuzhiyun swapper_pa .req x2 242*4882a593Smuzhiyun swapper_ttb .req x3 243*4882a593Smuzhiyun flag_ptr .req x4 244*4882a593Smuzhiyun cur_pgdp .req x5 245*4882a593Smuzhiyun end_pgdp .req x6 246*4882a593Smuzhiyun pgd .req x7 247*4882a593Smuzhiyun cur_pudp .req x8 248*4882a593Smuzhiyun end_pudp .req x9 249*4882a593Smuzhiyun pud .req x10 250*4882a593Smuzhiyun cur_pmdp .req x11 251*4882a593Smuzhiyun end_pmdp .req x12 252*4882a593Smuzhiyun pmd .req x13 253*4882a593Smuzhiyun cur_ptep .req x14 254*4882a593Smuzhiyun end_ptep .req x15 255*4882a593Smuzhiyun pte .req x16 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun mrs swapper_ttb, ttbr1_el1 258*4882a593Smuzhiyun restore_ttbr1 swapper_ttb 259*4882a593Smuzhiyun adr flag_ptr, __idmap_kpti_flag 260*4882a593Smuzhiyun 261*4882a593Smuzhiyun cbnz cpu, __idmap_kpti_secondary 262*4882a593Smuzhiyun 263*4882a593Smuzhiyun /* We're the boot CPU. Wait for the others to catch up */ 264*4882a593Smuzhiyun sevl 265*4882a593Smuzhiyun1: wfe 266*4882a593Smuzhiyun ldaxr w17, [flag_ptr] 267*4882a593Smuzhiyun eor w17, w17, num_cpus 268*4882a593Smuzhiyun cbnz w17, 1b 269*4882a593Smuzhiyun 270*4882a593Smuzhiyun /* We need to walk swapper, so turn off the MMU. */ 271*4882a593Smuzhiyun pre_disable_mmu_workaround 272*4882a593Smuzhiyun mrs x17, sctlr_el1 273*4882a593Smuzhiyun bic x17, x17, #SCTLR_ELx_M 274*4882a593Smuzhiyun msr sctlr_el1, x17 275*4882a593Smuzhiyun isb 276*4882a593Smuzhiyun 277*4882a593Smuzhiyun /* Everybody is enjoying the idmap, so we can rewrite swapper. */ 278*4882a593Smuzhiyun /* PGD */ 279*4882a593Smuzhiyun mov cur_pgdp, swapper_pa 280*4882a593Smuzhiyun add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) 281*4882a593Smuzhiyundo_pgd: __idmap_kpti_get_pgtable_ent pgd 282*4882a593Smuzhiyun tbnz pgd, #1, walk_puds 283*4882a593Smuzhiyunnext_pgd: 284*4882a593Smuzhiyun __idmap_kpti_put_pgtable_ent_ng pgd 285*4882a593Smuzhiyunskip_pgd: 286*4882a593Smuzhiyun add cur_pgdp, cur_pgdp, #8 287*4882a593Smuzhiyun cmp cur_pgdp, end_pgdp 288*4882a593Smuzhiyun b.ne do_pgd 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun /* Publish the updated tables and nuke all the TLBs */ 291*4882a593Smuzhiyun dsb sy 292*4882a593Smuzhiyun tlbi vmalle1is 293*4882a593Smuzhiyun dsb ish 294*4882a593Smuzhiyun isb 295*4882a593Smuzhiyun 296*4882a593Smuzhiyun /* We're done: fire up the MMU again */ 297*4882a593Smuzhiyun mrs x17, sctlr_el1 298*4882a593Smuzhiyun orr x17, x17, #SCTLR_ELx_M 299*4882a593Smuzhiyun set_sctlr_el1 x17 300*4882a593Smuzhiyun 301*4882a593Smuzhiyun /* Set the flag to zero to indicate that we're all done */ 302*4882a593Smuzhiyun str wzr, [flag_ptr] 303*4882a593Smuzhiyun ret 304*4882a593Smuzhiyun 305*4882a593Smuzhiyun /* PUD */ 306*4882a593Smuzhiyunwalk_puds: 307*4882a593Smuzhiyun .if CONFIG_PGTABLE_LEVELS > 3 308*4882a593Smuzhiyun pte_to_phys cur_pudp, pgd 309*4882a593Smuzhiyun add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) 310*4882a593Smuzhiyundo_pud: __idmap_kpti_get_pgtable_ent pud 311*4882a593Smuzhiyun tbnz pud, #1, walk_pmds 312*4882a593Smuzhiyunnext_pud: 313*4882a593Smuzhiyun __idmap_kpti_put_pgtable_ent_ng pud 314*4882a593Smuzhiyunskip_pud: 315*4882a593Smuzhiyun add cur_pudp, cur_pudp, 8 316*4882a593Smuzhiyun cmp cur_pudp, end_pudp 317*4882a593Smuzhiyun b.ne do_pud 318*4882a593Smuzhiyun b next_pgd 319*4882a593Smuzhiyun .else /* CONFIG_PGTABLE_LEVELS <= 3 */ 320*4882a593Smuzhiyun mov pud, pgd 321*4882a593Smuzhiyun b walk_pmds 322*4882a593Smuzhiyunnext_pud: 323*4882a593Smuzhiyun b next_pgd 324*4882a593Smuzhiyun .endif 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun /* PMD */ 327*4882a593Smuzhiyunwalk_pmds: 328*4882a593Smuzhiyun .if CONFIG_PGTABLE_LEVELS > 2 329*4882a593Smuzhiyun pte_to_phys cur_pmdp, pud 330*4882a593Smuzhiyun add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) 331*4882a593Smuzhiyundo_pmd: __idmap_kpti_get_pgtable_ent pmd 332*4882a593Smuzhiyun tbnz pmd, #1, walk_ptes 333*4882a593Smuzhiyunnext_pmd: 334*4882a593Smuzhiyun __idmap_kpti_put_pgtable_ent_ng pmd 335*4882a593Smuzhiyunskip_pmd: 336*4882a593Smuzhiyun add cur_pmdp, cur_pmdp, #8 337*4882a593Smuzhiyun cmp cur_pmdp, end_pmdp 338*4882a593Smuzhiyun b.ne do_pmd 339*4882a593Smuzhiyun b next_pud 340*4882a593Smuzhiyun .else /* CONFIG_PGTABLE_LEVELS <= 2 */ 341*4882a593Smuzhiyun mov pmd, pud 342*4882a593Smuzhiyun b walk_ptes 343*4882a593Smuzhiyunnext_pmd: 344*4882a593Smuzhiyun b next_pud 345*4882a593Smuzhiyun .endif 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun /* PTE */ 348*4882a593Smuzhiyunwalk_ptes: 349*4882a593Smuzhiyun pte_to_phys cur_ptep, pmd 350*4882a593Smuzhiyun add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) 351*4882a593Smuzhiyundo_pte: __idmap_kpti_get_pgtable_ent pte 352*4882a593Smuzhiyun __idmap_kpti_put_pgtable_ent_ng pte 353*4882a593Smuzhiyunskip_pte: 354*4882a593Smuzhiyun add cur_ptep, cur_ptep, #8 355*4882a593Smuzhiyun cmp cur_ptep, end_ptep 356*4882a593Smuzhiyun b.ne do_pte 357*4882a593Smuzhiyun b next_pmd 358*4882a593Smuzhiyun 359*4882a593Smuzhiyun .unreq cpu 360*4882a593Smuzhiyun .unreq num_cpus 361*4882a593Smuzhiyun .unreq swapper_pa 362*4882a593Smuzhiyun .unreq cur_pgdp 363*4882a593Smuzhiyun .unreq end_pgdp 364*4882a593Smuzhiyun .unreq pgd 365*4882a593Smuzhiyun .unreq cur_pudp 366*4882a593Smuzhiyun .unreq end_pudp 367*4882a593Smuzhiyun .unreq pud 368*4882a593Smuzhiyun .unreq cur_pmdp 369*4882a593Smuzhiyun .unreq end_pmdp 370*4882a593Smuzhiyun .unreq pmd 371*4882a593Smuzhiyun .unreq cur_ptep 372*4882a593Smuzhiyun .unreq end_ptep 373*4882a593Smuzhiyun .unreq pte 374*4882a593Smuzhiyun 375*4882a593Smuzhiyun /* Secondary CPUs end up here */ 376*4882a593Smuzhiyun__idmap_kpti_secondary: 377*4882a593Smuzhiyun /* Uninstall swapper before surgery begins */ 378*4882a593Smuzhiyun __idmap_cpu_set_reserved_ttbr1 x16, x17 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun /* Increment the flag to let the boot CPU we're ready */ 381*4882a593Smuzhiyun1: ldxr w16, [flag_ptr] 382*4882a593Smuzhiyun add w16, w16, #1 383*4882a593Smuzhiyun stxr w17, w16, [flag_ptr] 384*4882a593Smuzhiyun cbnz w17, 1b 385*4882a593Smuzhiyun 386*4882a593Smuzhiyun /* Wait for the boot CPU to finish messing around with swapper */ 387*4882a593Smuzhiyun sevl 388*4882a593Smuzhiyun1: wfe 389*4882a593Smuzhiyun ldxr w16, [flag_ptr] 390*4882a593Smuzhiyun cbnz w16, 1b 391*4882a593Smuzhiyun 392*4882a593Smuzhiyun /* All done, act like nothing happened */ 393*4882a593Smuzhiyun offset_ttbr1 swapper_ttb, x16 394*4882a593Smuzhiyun msr ttbr1_el1, swapper_ttb 395*4882a593Smuzhiyun isb 396*4882a593Smuzhiyun ret 397*4882a593Smuzhiyun 398*4882a593Smuzhiyun .unreq swapper_ttb 399*4882a593Smuzhiyun .unreq flag_ptr 400*4882a593SmuzhiyunSYM_FUNC_END(idmap_kpti_install_ng_mappings) 401*4882a593Smuzhiyun .popsection 402*4882a593Smuzhiyun#endif 403*4882a593Smuzhiyun 404*4882a593Smuzhiyun/* 405*4882a593Smuzhiyun * __cpu_setup 406*4882a593Smuzhiyun * 407*4882a593Smuzhiyun * Initialise the processor for turning the MMU on. 408*4882a593Smuzhiyun * 409*4882a593Smuzhiyun * Output: 410*4882a593Smuzhiyun * Return in x0 the value of the SCTLR_EL1 register. 411*4882a593Smuzhiyun */ 412*4882a593Smuzhiyun .pushsection ".idmap.text", "awx" 413*4882a593SmuzhiyunSYM_FUNC_START(__cpu_setup) 414*4882a593Smuzhiyun tlbi vmalle1 // Invalidate local TLB 415*4882a593Smuzhiyun dsb nsh 416*4882a593Smuzhiyun 417*4882a593Smuzhiyun mov x1, #3 << 20 418*4882a593Smuzhiyun msr cpacr_el1, x1 // Enable FP/ASIMD 419*4882a593Smuzhiyun mov x1, #1 << 12 // Reset mdscr_el1 and disable 420*4882a593Smuzhiyun msr mdscr_el1, x1 // access to the DCC from EL0 421*4882a593Smuzhiyun isb // Unmask debug exceptions now, 422*4882a593Smuzhiyun enable_dbg // since this is per-cpu 423*4882a593Smuzhiyun reset_pmuserenr_el0 x1 // Disable PMU access from EL0 424*4882a593Smuzhiyun reset_amuserenr_el0 x1 // Disable AMU access from EL0 425*4882a593Smuzhiyun 426*4882a593Smuzhiyun /* 427*4882a593Smuzhiyun * Memory region attributes 428*4882a593Smuzhiyun */ 429*4882a593Smuzhiyun mov_q x5, MAIR_EL1_SET 430*4882a593Smuzhiyun#ifdef CONFIG_ARM64_MTE 431*4882a593Smuzhiyun mte_tcr .req x20 432*4882a593Smuzhiyun 433*4882a593Smuzhiyun mov mte_tcr, #0 434*4882a593Smuzhiyun 435*4882a593Smuzhiyun /* 436*4882a593Smuzhiyun * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported 437*4882a593Smuzhiyun * (ID_AA64PFR1_EL1[11:8] > 1). 438*4882a593Smuzhiyun */ 439*4882a593Smuzhiyun mrs x10, ID_AA64PFR1_EL1 440*4882a593Smuzhiyun ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 441*4882a593Smuzhiyun cmp x10, #ID_AA64PFR1_MTE 442*4882a593Smuzhiyun b.lt 1f 443*4882a593Smuzhiyun 444*4882a593Smuzhiyun /* Normal Tagged memory type at the corresponding MAIR index */ 445*4882a593Smuzhiyun mov x10, #MAIR_ATTR_NORMAL_TAGGED 446*4882a593Smuzhiyun bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8 447*4882a593Smuzhiyun 448*4882a593Smuzhiyun mov x10, #KERNEL_GCR_EL1 449*4882a593Smuzhiyun msr_s SYS_GCR_EL1, x10 450*4882a593Smuzhiyun 451*4882a593Smuzhiyun /* 452*4882a593Smuzhiyun * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then 453*4882a593Smuzhiyun * RGSR_EL1.SEED must be non-zero for IRG to produce 454*4882a593Smuzhiyun * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we 455*4882a593Smuzhiyun * must initialize it. 456*4882a593Smuzhiyun */ 457*4882a593Smuzhiyun mrs x10, CNTVCT_EL0 458*4882a593Smuzhiyun ands x10, x10, #SYS_RGSR_EL1_SEED_MASK 459*4882a593Smuzhiyun csinc x10, x10, xzr, ne 460*4882a593Smuzhiyun lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT 461*4882a593Smuzhiyun msr_s SYS_RGSR_EL1, x10 462*4882a593Smuzhiyun 463*4882a593Smuzhiyun /* clear any pending tag check faults in TFSR*_EL1 */ 464*4882a593Smuzhiyun msr_s SYS_TFSR_EL1, xzr 465*4882a593Smuzhiyun msr_s SYS_TFSRE0_EL1, xzr 466*4882a593Smuzhiyun 467*4882a593Smuzhiyun /* set the TCR_EL1 bits */ 468*4882a593Smuzhiyun mov_q mte_tcr, TCR_MTE_FLAGS 469*4882a593Smuzhiyun1: 470*4882a593Smuzhiyun#endif 471*4882a593Smuzhiyun msr mair_el1, x5 472*4882a593Smuzhiyun /* 473*4882a593Smuzhiyun * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further 474*4882a593Smuzhiyun * adjusted if the kernel is compiled with 52bit VA support. 475*4882a593Smuzhiyun */ 476*4882a593Smuzhiyun mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 477*4882a593Smuzhiyun TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 478*4882a593Smuzhiyun TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS 479*4882a593Smuzhiyun#ifdef CONFIG_ARM64_MTE 480*4882a593Smuzhiyun orr x10, x10, mte_tcr 481*4882a593Smuzhiyun .unreq mte_tcr 482*4882a593Smuzhiyun#endif 483*4882a593Smuzhiyun tcr_clear_errata_bits x10, x9, x5 484*4882a593Smuzhiyun 485*4882a593Smuzhiyun#ifdef CONFIG_ARM64_VA_BITS_52 486*4882a593Smuzhiyun ldr_l x9, vabits_actual 487*4882a593Smuzhiyun sub x9, xzr, x9 488*4882a593Smuzhiyun add x9, x9, #64 489*4882a593Smuzhiyun tcr_set_t1sz x10, x9 490*4882a593Smuzhiyun#else 491*4882a593Smuzhiyun ldr_l x9, idmap_t0sz 492*4882a593Smuzhiyun#endif 493*4882a593Smuzhiyun tcr_set_t0sz x10, x9 494*4882a593Smuzhiyun 495*4882a593Smuzhiyun /* 496*4882a593Smuzhiyun * Set the IPS bits in TCR_EL1. 497*4882a593Smuzhiyun */ 498*4882a593Smuzhiyun tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 499*4882a593Smuzhiyun#ifdef CONFIG_ARM64_HW_AFDBM 500*4882a593Smuzhiyun /* 501*4882a593Smuzhiyun * Enable hardware update of the Access Flags bit. 502*4882a593Smuzhiyun * Hardware dirty bit management is enabled later, 503*4882a593Smuzhiyun * via capabilities. 504*4882a593Smuzhiyun */ 505*4882a593Smuzhiyun mrs x9, ID_AA64MMFR1_EL1 506*4882a593Smuzhiyun and x9, x9, #0xf 507*4882a593Smuzhiyun cbz x9, 1f 508*4882a593Smuzhiyun orr x10, x10, #TCR_HA // hardware Access flag update 509*4882a593Smuzhiyun1: 510*4882a593Smuzhiyun#endif /* CONFIG_ARM64_HW_AFDBM */ 511*4882a593Smuzhiyun msr tcr_el1, x10 512*4882a593Smuzhiyun /* 513*4882a593Smuzhiyun * Prepare SCTLR 514*4882a593Smuzhiyun */ 515*4882a593Smuzhiyun mov_q x0, INIT_SCTLR_EL1_MMU_ON 516*4882a593Smuzhiyun ret // return to head.S 517*4882a593SmuzhiyunSYM_FUNC_END(__cpu_setup) 518