1 /* 2 * include/asm-arm/macro.h 3 * 4 * Copyright (C) 2009 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9 #ifndef __ASM_ARM_MACRO_H__ 10 #define __ASM_ARM_MACRO_H__ 11 #ifdef __ASSEMBLY__ 12 13 /* 14 * These macros provide a convenient way to write 8, 16 and 32 bit data 15 * to any address. 16 * Registers r4 and r5 are used, any data in these registers are 17 * overwritten by the macros. 18 * The macros are valid for any ARM architecture, they do not implement 19 * any memory barriers so caution is recommended when using these when the 20 * caches are enabled or on a multi-core system. 21 */ 22 23 .macro write32, addr, data 24 ldr r4, =\addr 25 ldr r5, =\data 26 str r5, [r4] 27 .endm 28 29 .macro write16, addr, data 30 ldr r4, =\addr 31 ldrh r5, =\data 32 strh r5, [r4] 33 .endm 34 35 .macro write8, addr, data 36 ldr r4, =\addr 37 ldrb r5, =\data 38 strb r5, [r4] 39 .endm 40 41 /* 42 * This macro generates a loop that can be used for delays in the code. 43 * Register r4 is used, any data in this register is overwritten by the 44 * macro. 45 * The macro is valid for any ARM architeture. The actual time spent in the 46 * loop will vary from CPU to CPU though. 47 */ 48 49 .macro wait_timer, time 50 ldr r4, =\time 51 1: 52 nop 53 subs r4, r4, #1 54 bcs 1b 55 .endm 56 57 #ifdef CONFIG_ARM64 58 /* 59 * Register aliases. 60 */ 61 lr .req x30 62 63 /* 64 * Branch according to exception level 65 */ 66 .macro switch_el, xreg, el3_label, el2_label, el1_label 67 mrs \xreg, CurrentEL 68 cmp \xreg, 0xc 69 b.eq \el3_label 70 cmp \xreg, 0x8 71 b.eq \el2_label 72 cmp \xreg, 0x4 73 b.eq \el1_label 74 .endm 75 76 /* 77 * Branch if current processor is a slave, 78 * choose processor with all zero affinity value as the master. 79 */ 80 .macro branch_if_slave, xreg, slave_label 81 #ifdef CONFIG_ARMV8_MULTIENTRY 82 /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */ 83 mrs \xreg, mpidr_el1 84 tst \xreg, #0xff /* Test Affinity 0 */ 85 b.ne \slave_label 86 lsr \xreg, \xreg, #8 87 tst \xreg, #0xff /* Test Affinity 1 */ 88 b.ne \slave_label 89 lsr \xreg, \xreg, #8 90 tst \xreg, #0xff /* Test Affinity 2 */ 91 b.ne \slave_label 92 lsr \xreg, \xreg, #16 93 tst \xreg, #0xff /* Test Affinity 3 */ 94 b.ne \slave_label 95 #endif 96 .endm 97 98 /* 99 * Branch if current processor is a master, 100 * choose processor with all zero affinity value as the master. 101 */ 102 .macro branch_if_master, xreg1, xreg2, master_label 103 #ifdef CONFIG_ARMV8_MULTIENTRY 104 /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */ 105 mrs \xreg1, mpidr_el1 106 lsr \xreg2, \xreg1, #32 107 lsl \xreg1, \xreg1, #40 108 lsr \xreg1, \xreg1, #40 109 orr \xreg1, \xreg1, \xreg2 110 cbz \xreg1, \master_label 111 #else 112 b \master_label 113 #endif 114 .endm 115 116 .macro armv8_switch_to_el2_m, xreg1 117 /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */ 118 mov \xreg1, #0x5b1 119 msr scr_el3, \xreg1 120 msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */ 121 mov \xreg1, #0x33ff 122 msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */ 123 124 /* Initialize SCTLR_EL2 125 * 126 * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1 127 * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) + 128 * EE,WXN,I,SA,C,A,M to 0 129 */ 130 mov \xreg1, #0x0830 131 movk \xreg1, #0x30C5, lsl #16 132 msr sctlr_el2, \xreg1 133 134 /* Return to the EL2_SP2 mode from EL3 */ 135 mov \xreg1, sp 136 msr sp_el2, \xreg1 /* Migrate SP */ 137 mrs \xreg1, vbar_el3 138 msr vbar_el2, \xreg1 /* Migrate VBAR */ 139 mov \xreg1, #0x3c9 140 msr spsr_el3, \xreg1 /* EL2_SP2 | D | A | I | F */ 141 msr elr_el3, lr 142 eret 143 .endm 144 145 .macro armv8_switch_to_el1_m, xreg1, xreg2 146 /* Initialize Generic Timers */ 147 mrs \xreg1, cnthctl_el2 148 orr \xreg1, \xreg1, #0x3 /* Enable EL1 access to timers */ 149 msr cnthctl_el2, \xreg1 150 msr cntvoff_el2, xzr 151 152 /* Initilize MPID/MPIDR registers */ 153 mrs \xreg1, midr_el1 154 mrs \xreg2, mpidr_el1 155 msr vpidr_el2, \xreg1 156 msr vmpidr_el2, \xreg2 157 158 /* Disable coprocessor traps */ 159 mov \xreg1, #0x33ff 160 msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */ 161 msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */ 162 mov \xreg1, #3 << 20 163 msr cpacr_el1, \xreg1 /* Enable FP/SIMD at EL1 */ 164 165 /* Initialize HCR_EL2 */ 166 mov \xreg1, #(1 << 31) /* 64bit EL1 */ 167 orr \xreg1, \xreg1, #(1 << 29) /* Disable HVC */ 168 msr hcr_el2, \xreg1 169 170 /* SCTLR_EL1 initialization 171 * 172 * setting RES1 bits (29,28,23,22,20,11) to 1 173 * and RES0 bits (31,30,27,21,17,13,10,6) + 174 * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD, 175 * CP15BEN,SA0,SA,C,A,M to 0 176 */ 177 mov \xreg1, #0x0800 178 movk \xreg1, #0x30d0, lsl #16 179 msr sctlr_el1, \xreg1 180 181 /* Return to the EL1_SP1 mode from EL2 */ 182 mov \xreg1, sp 183 msr sp_el1, \xreg1 /* Migrate SP */ 184 mrs \xreg1, vbar_el2 185 msr vbar_el1, \xreg1 /* Migrate VBAR */ 186 mov \xreg1, #0x3c5 187 msr spsr_el2, \xreg1 /* EL1_SP1 | D | A | I | F */ 188 msr elr_el2, lr 189 eret 190 .endm 191 192 #if defined(CONFIG_GICV3) 193 .macro gic_wait_for_interrupt_m xreg1 194 0 : wfi 195 mrs \xreg1, ICC_IAR1_EL1 196 msr ICC_EOIR1_EL1, \xreg1 197 cbnz \xreg1, 0b 198 .endm 199 #elif defined(CONFIG_GICV2) 200 .macro gic_wait_for_interrupt_m xreg1, wreg2 201 0 : wfi 202 ldr \wreg2, [\xreg1, GICC_AIAR] 203 str \wreg2, [\xreg1, GICC_AEOIR] 204 and \wreg2, \wreg2, #0x3ff 205 cbnz \wreg2, 0b 206 .endm 207 #endif 208 209 #endif /* CONFIG_ARM64 */ 210 211 #endif /* __ASSEMBLY__ */ 212 #endif /* __ASM_ARM_MACRO_H__ */ 213