1 /* 2 * include/asm-arm/macro.h 3 * 4 * Copyright (C) 2009 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9 #ifndef __ASM_ARM_MACRO_H__ 10 #define __ASM_ARM_MACRO_H__ 11 12 #ifdef CONFIG_ARM64 13 #include <asm/system.h> 14 #endif 15 16 #ifdef __ASSEMBLY__ 17 18 /* 19 * These macros provide a convenient way to write 8, 16 and 32 bit data 20 * to any address. 21 * Registers r4 and r5 are used, any data in these registers are 22 * overwritten by the macros. 23 * The macros are valid for any ARM architecture, they do not implement 24 * any memory barriers so caution is recommended when using these when the 25 * caches are enabled or on a multi-core system. 26 */ 27 28 .macro write32, addr, data 29 ldr r4, =\addr 30 ldr r5, =\data 31 str r5, [r4] 32 .endm 33 34 .macro write16, addr, data 35 ldr r4, =\addr 36 ldrh r5, =\data 37 strh r5, [r4] 38 .endm 39 40 .macro write8, addr, data 41 ldr r4, =\addr 42 ldrb r5, =\data 43 strb r5, [r4] 44 .endm 45 46 /* 47 * This macro generates a loop that can be used for delays in the code. 48 * Register r4 is used, any data in this register is overwritten by the 49 * macro. 50 * The macro is valid for any ARM architeture. The actual time spent in the 51 * loop will vary from CPU to CPU though. 52 */ 53 54 .macro wait_timer, time 55 ldr r4, =\time 56 1: 57 nop 58 subs r4, r4, #1 59 bcs 1b 60 .endm 61 62 #ifdef CONFIG_ARM64 63 /* 64 * Register aliases. 65 */ 66 lr .req x30 67 68 /* 69 * Branch according to exception level 70 */ 71 .macro switch_el, xreg, el3_label, el2_label, el1_label 72 mrs \xreg, CurrentEL 73 cmp \xreg, 0xc 74 b.eq \el3_label 75 cmp \xreg, 0x8 76 b.eq \el2_label 77 cmp \xreg, 0x4 78 b.eq \el1_label 79 .endm 80 81 /* 82 * Branch if current processor is a Cortex-A35 core. 83 */ 84 .macro branch_if_a35_core, xreg, a35_label 85 mrs \xreg, midr_el1 86 lsr \xreg, \xreg, #4 87 and \xreg, \xreg, #0x00000FFF 88 cmp \xreg, #0xD04 /* Cortex-A35 MPCore processor. */ 89 b.eq \a35_label 90 .endm 91 92 /* 93 * Branch if current processor is a Cortex-A57 core. 94 */ 95 .macro branch_if_a57_core, xreg, a57_label 96 mrs \xreg, midr_el1 97 lsr \xreg, \xreg, #4 98 and \xreg, \xreg, #0x00000FFF 99 cmp \xreg, #0xD07 /* Cortex-A57 MPCore processor. */ 100 b.eq \a57_label 101 .endm 102 103 /* 104 * Branch if current processor is a Cortex-A53 core. 105 */ 106 .macro branch_if_a53_core, xreg, a53_label 107 mrs \xreg, midr_el1 108 lsr \xreg, \xreg, #4 109 and \xreg, \xreg, #0x00000FFF 110 cmp \xreg, #0xD03 /* Cortex-A53 MPCore processor. */ 111 b.eq \a53_label 112 .endm 113 114 /* 115 * Branch if current processor is a slave, 116 * choose processor with all zero affinity value as the master. 117 */ 118 .macro branch_if_slave, xreg, slave_label 119 #ifdef CONFIG_ARMV8_MULTIENTRY 120 /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */ 121 mrs \xreg, mpidr_el1 122 tst \xreg, #0xff /* Test Affinity 0 */ 123 b.ne \slave_label 124 lsr \xreg, \xreg, #8 125 tst \xreg, #0xff /* Test Affinity 1 */ 126 b.ne \slave_label 127 lsr \xreg, \xreg, #8 128 tst \xreg, #0xff /* Test Affinity 2 */ 129 b.ne \slave_label 130 lsr \xreg, \xreg, #16 131 tst \xreg, #0xff /* Test Affinity 3 */ 132 b.ne \slave_label 133 #endif 134 .endm 135 136 /* 137 * Branch if current processor is a master, 138 * choose processor with all zero affinity value as the master. 139 */ 140 .macro branch_if_master, xreg1, xreg2, master_label 141 #ifdef CONFIG_ARMV8_MULTIENTRY 142 /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */ 143 mrs \xreg1, mpidr_el1 144 lsr \xreg2, \xreg1, #32 145 lsl \xreg2, \xreg2, #32 146 lsl \xreg1, \xreg1, #40 147 lsr \xreg1, \xreg1, #40 148 orr \xreg1, \xreg1, \xreg2 149 cbz \xreg1, \master_label 150 #else 151 b \master_label 152 #endif 153 .endm 154 155 /* 156 * Switch from EL3 to EL2 for ARMv8 157 * @ep: kernel entry point 158 * @flag: The execution state flag for lower exception 159 * level, ES_TO_AARCH64 or ES_TO_AARCH32 160 * @tmp: temporary register 161 * 162 * For loading 32-bit OS, x1 is machine nr and x2 is ftaddr. 163 * For loading 64-bit OS, x0 is physical address to the FDT blob. 164 * They will be passed to the guest. 165 */ 166 .macro armv8_switch_to_el2_m, ep, flag, tmp 167 msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */ 168 mov \tmp, #CPTR_EL2_RES1 169 msr cptr_el2, \tmp /* Disable coprocessor traps to EL2 */ 170 171 /* Initialize Generic Timers */ 172 msr cntvoff_el2, xzr 173 174 /* Initialize SCTLR_EL2 175 * 176 * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1 177 * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) + 178 * EE,WXN,I,SA,C,A,M to 0 179 */ 180 ldr \tmp, =(SCTLR_EL2_RES1 | SCTLR_EL2_EE_LE |\ 181 SCTLR_EL2_WXN_DIS | SCTLR_EL2_ICACHE_DIS |\ 182 SCTLR_EL2_SA_DIS | SCTLR_EL2_DCACHE_DIS |\ 183 SCTLR_EL2_ALIGN_DIS | SCTLR_EL2_MMU_DIS) 184 msr sctlr_el2, \tmp 185 186 mov \tmp, sp 187 msr sp_el2, \tmp /* Migrate SP */ 188 mrs \tmp, vbar_el3 189 msr vbar_el2, \tmp /* Migrate VBAR */ 190 191 /* Check switch to AArch64 EL2 or AArch32 Hypervisor mode */ 192 cmp \flag, #ES_TO_AARCH32 193 b.eq 1f 194 195 /* 196 * The next lower exception level is AArch64, 64bit EL2 | HCE | 197 * RES1 (Bits[5:4]) | Non-secure EL0/EL1. 198 * and the SMD depends on requirements. 199 */ 200 #ifdef CONFIG_ARMV8_PSCI 201 ldr \tmp, =(SCR_EL3_RW_AARCH64 | SCR_EL3_HCE_EN |\ 202 SCR_EL3_RES1 | SCR_EL3_NS_EN) 203 #else 204 ldr \tmp, =(SCR_EL3_RW_AARCH64 | SCR_EL3_HCE_EN |\ 205 SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\ 206 SCR_EL3_NS_EN) 207 #endif 208 msr scr_el3, \tmp 209 210 /* Return to the EL2_SP2 mode from EL3 */ 211 ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\ 212 SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ 213 SPSR_EL_M_AARCH64 | SPSR_EL_M_EL2H) 214 msr spsr_el3, \tmp 215 msr elr_el3, \ep 216 eret 217 218 1: 219 /* 220 * The next lower exception level is AArch32, 32bit EL2 | HCE | 221 * SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1. 222 */ 223 ldr \tmp, =(SCR_EL3_RW_AARCH32 | SCR_EL3_HCE_EN |\ 224 SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\ 225 SCR_EL3_NS_EN) 226 msr scr_el3, \tmp 227 228 /* Return to AArch32 Hypervisor mode */ 229 ldr \tmp, =(SPSR_EL_END_LE | SPSR_EL_ASYN_MASK |\ 230 SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ 231 SPSR_EL_T_A32 | SPSR_EL_M_AARCH32 |\ 232 SPSR_EL_M_HYP) 233 msr spsr_el3, \tmp 234 msr elr_el3, \ep 235 eret 236 .endm 237 238 /* 239 * Switch from EL2 to EL1 for ARMv8 240 * @ep: kernel entry point 241 * @flag: The execution state flag for lower exception 242 * level, ES_TO_AARCH64 or ES_TO_AARCH32 243 * @tmp: temporary register 244 * 245 * For loading 32-bit OS, x1 is machine nr and x2 is ftaddr. 246 * For loading 64-bit OS, x0 is physical address to the FDT blob. 247 * They will be passed to the guest. 248 */ 249 .macro armv8_switch_to_el1_m, ep, flag, tmp 250 /* Initialize Generic Timers */ 251 mrs \tmp, cnthctl_el2 252 /* Enable EL1 access to timers */ 253 orr \tmp, \tmp, #(CNTHCTL_EL2_EL1PCEN_EN |\ 254 CNTHCTL_EL2_EL1PCTEN_EN) 255 msr cnthctl_el2, \tmp 256 msr cntvoff_el2, xzr 257 258 /* Initilize MPID/MPIDR registers */ 259 mrs \tmp, midr_el1 260 msr vpidr_el2, \tmp 261 mrs \tmp, mpidr_el1 262 msr vmpidr_el2, \tmp 263 264 /* Disable coprocessor traps */ 265 mov \tmp, #CPTR_EL2_RES1 266 msr cptr_el2, \tmp /* Disable coprocessor traps to EL2 */ 267 msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */ 268 mov \tmp, #CPACR_EL1_FPEN_EN 269 msr cpacr_el1, \tmp /* Enable FP/SIMD at EL1 */ 270 271 /* SCTLR_EL1 initialization 272 * 273 * setting RES1 bits (29,28,23,22,20,11) to 1 274 * and RES0 bits (31,30,27,21,17,13,10,6) + 275 * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD, 276 * CP15BEN,SA0,SA,C,A,M to 0 277 */ 278 ldr \tmp, =(SCTLR_EL1_RES1 | SCTLR_EL1_UCI_DIS |\ 279 SCTLR_EL1_EE_LE | SCTLR_EL1_WXN_DIS |\ 280 SCTLR_EL1_NTWE_DIS | SCTLR_EL1_NTWI_DIS |\ 281 SCTLR_EL1_UCT_DIS | SCTLR_EL1_DZE_DIS |\ 282 SCTLR_EL1_ICACHE_DIS | SCTLR_EL1_UMA_DIS |\ 283 SCTLR_EL1_SED_EN | SCTLR_EL1_ITD_EN |\ 284 SCTLR_EL1_CP15BEN_DIS | SCTLR_EL1_SA0_DIS |\ 285 SCTLR_EL1_SA_DIS | SCTLR_EL1_DCACHE_DIS |\ 286 SCTLR_EL1_ALIGN_DIS | SCTLR_EL1_MMU_DIS) 287 msr sctlr_el1, \tmp 288 289 mov \tmp, sp 290 msr sp_el1, \tmp /* Migrate SP */ 291 mrs \tmp, vbar_el2 292 msr vbar_el1, \tmp /* Migrate VBAR */ 293 294 /* Check switch to AArch64 EL1 or AArch32 Supervisor mode */ 295 cmp \flag, #ES_TO_AARCH32 296 b.eq 1f 297 298 /* Initialize HCR_EL2 */ 299 ldr \tmp, =(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS) 300 msr hcr_el2, \tmp 301 302 /* Return to the EL1_SP1 mode from EL2 */ 303 ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\ 304 SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ 305 SPSR_EL_M_AARCH64 | SPSR_EL_M_EL1H) 306 msr spsr_el2, \tmp 307 msr elr_el2, \ep 308 eret 309 310 1: 311 /* Initialize HCR_EL2 */ 312 ldr \tmp, =(HCR_EL2_RW_AARCH32 | HCR_EL2_HCD_DIS) 313 msr hcr_el2, \tmp 314 315 /* Return to AArch32 Supervisor mode from EL2 */ 316 ldr \tmp, =(SPSR_EL_END_LE | SPSR_EL_ASYN_MASK |\ 317 SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ 318 SPSR_EL_T_A32 | SPSR_EL_M_AARCH32 |\ 319 SPSR_EL_M_SVC) 320 msr spsr_el2, \tmp 321 msr elr_el2, \ep 322 eret 323 .endm 324 325 #if defined(CONFIG_GICV3) 326 .macro gic_wait_for_interrupt_m xreg1 327 0 : wfi 328 mrs \xreg1, ICC_IAR1_EL1 329 msr ICC_EOIR1_EL1, \xreg1 330 cbnz \xreg1, 0b 331 .endm 332 #elif defined(CONFIG_GICV2) 333 .macro gic_wait_for_interrupt_m xreg1, wreg2 334 0 : wfi 335 ldr \wreg2, [\xreg1, GICC_AIAR] 336 str \wreg2, [\xreg1, GICC_AEOIR] 337 and \wreg2, \wreg2, #0x3ff 338 cbnz \wreg2, 0b 339 .endm 340 #endif 341 342 #endif /* CONFIG_ARM64 */ 343 344 #endif /* __ASSEMBLY__ */ 345 #endif /* __ASM_ARM_MACRO_H__ */ 346