1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/unwind.h> 13#include <sm/optee_smc.h> 14#include <sm/sm.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17#include <util.h> 18 19#define SM_CTX_SEC_END (SM_CTX_SEC + SM_CTX_SEC_SIZE) 20 21 .macro save_regs mode 22 cps \mode 23 mrs r2, spsr 24 str r2, [r0], #4 25 str sp, [r0], #4 26 str lr, [r0], #4 27 .endm 28 29FUNC sm_save_unbanked_regs , : 30UNWIND( .fnstart) 31UNWIND( .cantunwind) 32 /* User mode registers has to be saved from system mode */ 33 cps #CPSR_MODE_SYS 34 str sp, [r0], #4 35 str lr, [r0], #4 36 37 save_regs #CPSR_MODE_IRQ 38 save_regs #CPSR_MODE_FIQ 39 save_regs #CPSR_MODE_SVC 40 save_regs #CPSR_MODE_ABT 41 save_regs #CPSR_MODE_UND 42 43#ifdef CFG_SM_NO_CYCLE_COUNTING 44 read_pmcr r2 45 stm r0!, {r2} 46#endif 47 cps #CPSR_MODE_MON 48 bx lr 49UNWIND( .fnend) 50END_FUNC sm_save_unbanked_regs 51 52 .macro restore_regs mode 53 cps \mode 54 ldr r2, [r0], #4 55 ldr sp, [r0], #4 56 ldr lr, [r0], #4 57 msr spsr_fsxc, r2 58 .endm 59 60/* Restores the mode specific registers */ 61FUNC sm_restore_unbanked_regs , : 62UNWIND( .fnstart) 63UNWIND( .cantunwind) 64 /* User mode registers has to be saved from system mode */ 65 cps #CPSR_MODE_SYS 66 ldr sp, [r0], #4 67 ldr lr, [r0], #4 68 69 restore_regs #CPSR_MODE_IRQ 70 restore_regs #CPSR_MODE_FIQ 71 restore_regs #CPSR_MODE_SVC 72 restore_regs #CPSR_MODE_ABT 73 restore_regs #CPSR_MODE_UND 74 75#ifdef CFG_SM_NO_CYCLE_COUNTING 76 ldm r0!, {r2} 77 write_pmcr r2 78#endif 79 cps #CPSR_MODE_MON 80 bx lr 81UNWIND( .fnend) 82END_FUNC sm_restore_unbanked_regs 83 84/* 85 * stack_tmp is used as stack, the top of the stack is reserved to hold 86 * struct sm_ctx, everything below is for normal stack usage. As several 87 * different CPU modes are using the same stack it's important that switch 88 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and 89 * Async abort has to be masked while using stack_tmp. 90 */ 91LOCAL_FUNC sm_smc_entry , : 92UNWIND( .fnstart) 93UNWIND( .cantunwind) 94 srsdb sp!, #CPSR_MODE_MON 95 push {r0-r7} 96 97 clrex /* Clear the exclusive monitor */ 98 99 /* Find out if we're doing an secure or non-secure entry */ 100 read_scr r1 101 tst r1, #SCR_NS 102 bne .smc_from_nsec 103 104 /* 105 * As we're coming from secure world (NS bit cleared) the stack 106 * pointer points to sm_ctx.sec.r0 at this stage. After the 107 * instruction below the stack pointer points to sm_ctx. 108 */ 109 sub sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 110 111 /* Save secure context */ 112 add r0, sp, #SM_CTX_SEC 113 bl sm_save_unbanked_regs 114 115 /* 116 * On FIQ exit we're restoring the non-secure context unchanged, on 117 * all other exits we're shifting r1-r4 from secure context into 118 * r0-r3 in non-secure context. 119 */ 120 add r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 121 ldm r8, {r0-r4} 122 mov_imm r9, TEESMC_OPTEED_RETURN_FIQ_DONE 123 cmp r0, r9 124 addne r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 125 stmne r8, {r1-r4} 126 127 /* Restore non-secure context */ 128 add r0, sp, #SM_CTX_NSEC 129 bl sm_restore_unbanked_regs 130 131.sm_ret_to_nsec: 132 /* 133 * Return to non-secure world 134 */ 135 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 136 ldm r0, {r8-r12} 137 138 /* Update SCR */ 139 read_scr r0 140 orr r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */ 141 write_scr r0 142 /* 143 * isb not needed since we're doing an exception return below 144 * without dependency to the changes in SCR before that. 145 */ 146 147 add sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 148 b .sm_exit 149 150.smc_from_nsec: 151 /* 152 * As we're coming from non-secure world (NS bit set) the stack 153 * pointer points to sm_ctx.nsec.r0 at this stage. After the 154 * instruction below the stack pointer points to sm_ctx. 155 */ 156 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 157 158 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 159 write_scr r1 160 isb 161 162 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 163 stm r0, {r8-r12} 164 165 mov r0, sp 166 bl sm_from_nsec 167 cmp r0, #SM_EXIT_TO_NON_SECURE 168 beq .sm_ret_to_nsec 169 170 /* 171 * Continue into secure world 172 */ 173 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 174 175.sm_exit: 176 pop {r0-r7} 177 rfefd sp! 178UNWIND( .fnend) 179END_FUNC sm_smc_entry 180 181/* 182 * FIQ handling 183 * 184 * Saves CPU context in the same way as sm_smc_entry() above. The CPU 185 * context will later be restored by sm_smc_entry() when handling a return 186 * from FIQ. 187 */ 188LOCAL_FUNC sm_fiq_entry , : 189UNWIND( .fnstart) 190UNWIND( .cantunwind) 191 /* FIQ has a +4 offset for lr compared to preferred return address */ 192 sub lr, lr, #4 193 /* sp points just past struct sm_sec_ctx */ 194 srsdb sp!, #CPSR_MODE_MON 195 push {r0-r7} 196 197 clrex /* Clear the exclusive monitor */ 198 199 /* 200 * As we're coming from non-secure world the stack pointer points 201 * to sm_ctx.nsec.r0 at this stage. After the instruction below the 202 * stack pointer points to sm_ctx. 203 */ 204 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 205 206 /* Update SCR */ 207 read_scr r1 208 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 209 write_scr r1 210 isb 211 212 /* Save non-secure context */ 213 add r0, sp, #SM_CTX_NSEC 214 bl sm_save_unbanked_regs 215 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 216 stm r0!, {r8-r12} 217 218 /* Set FIQ entry */ 219 ldr r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY) 220 str r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)] 221 222 /* Restore secure context */ 223 add r0, sp, #SM_CTX_SEC 224 bl sm_restore_unbanked_regs 225 226 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR) 227 228 rfefd sp! 229UNWIND( .fnend) 230END_FUNC sm_fiq_entry 231 232 .section .text.sm_vect_table 233 .align 5 234LOCAL_FUNC sm_vect_table , : 235UNWIND( .fnstart) 236UNWIND( .cantunwind) 237 b . /* Reset */ 238 b . /* Undefined instruction */ 239 b sm_smc_entry /* Secure monitor call */ 240 b . /* Prefetch abort */ 241 b . /* Data abort */ 242 b . /* Reserved */ 243 b . /* IRQ */ 244 b sm_fiq_entry /* FIQ */ 245 246#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 247 .macro vector_prologue_spectre 248 /* 249 * This depends on SP being 8 byte aligned, that is, the 250 * lowest three bits in SP are zero. 251 * 252 * The idea is to form a specific bit pattern in the lowest 253 * three bits of SP depending on which entry in the vector 254 * we enter via. This is done by adding 1 to SP in each 255 * entry but the last. 256 */ 257 add sp, sp, #1 /* 7:111 Reset */ 258 add sp, sp, #1 /* 6:110 Undefined instruction */ 259 add sp, sp, #1 /* 5:101 Secure monitor call */ 260 add sp, sp, #1 /* 4:100 Prefetch abort */ 261 add sp, sp, #1 /* 3:011 Data abort */ 262 add sp, sp, #1 /* 2:010 Reserved */ 263 add sp, sp, #1 /* 1:001 IRQ */ 264 nop /* 0:000 FIQ */ 265 .endm 266 267 .align 5 268sm_vect_table_a15: 269 vector_prologue_spectre 270 /* 271 * Invalidate the branch predictor for the current processor. 272 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 273 * effective. 274 * Note that the BPIALL instruction is not effective in 275 * invalidating the branch predictor on Cortex-A15. For that CPU, 276 * set ACTLR[0] to 1 during early processor initialisation, and 277 * invalidate the branch predictor by performing an ICIALLU 278 * instruction. See also: 279 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 280 */ 281 write_iciallu 282 isb 283 b 1f 284 285 .align 5 286sm_vect_table_bpiall: 287 vector_prologue_spectre 288 /* Invalidate the branch predictor for the current processor. */ 289 write_bpiall 290 isb 291 2921: 293 /* 294 * Only two exception does normally occur, smc and fiq. With all 295 * other exceptions it's good enough to just spinn, the lowest bits 296 * still tells which exception we're stuck with when attaching a 297 * debugger. 298 */ 299 300 /* Test for FIQ, all the lowest bits of SP are supposed to be 0 */ 301 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 302 beq sm_fiq_entry 303 304 /* Test for SMC, xor the lowest bits of SP to be 0 */ 305 eor sp, sp, #(BIT(0) | BIT(2)) 306 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 307 beq sm_smc_entry 308 309 /* unhandled exception */ 310 b . 311#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/ 312UNWIND( .fnend) 313END_FUNC sm_vect_table 314 315/* void sm_init(vaddr_t stack_pointer); */ 316FUNC sm_init , : 317UNWIND( .fnstart) 318 /* Set monitor stack */ 319 mrs r1, cpsr 320 cps #CPSR_MODE_MON 321 /* Point just beyond sm_ctx.sec */ 322 sub sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END) 323 324#ifdef CFG_INIT_CNTVOFF 325 read_scr r0 326 orr r0, r0, #SCR_NS /* Set NS bit in SCR */ 327 write_scr r0 328 isb 329 330 /* 331 * Accessing CNTVOFF: 332 * If the implementation includes the Virtualization Extensions 333 * this is a RW register, accessible from Hyp mode, and 334 * from Monitor mode when SCR.NS is set to 1. 335 * If the implementation includes the Security Extensions 336 * but not the Virtualization Extensions, an MCRR or MRRC to 337 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor 338 * mode, regardless of the value of SCR.NS. 339 */ 340 read_id_pfr1 r2 341 mov r3, r2 342 ands r3, r3, #IDPFR1_GENTIMER_MASK 343 beq .no_gentimer 344 ands r2, r2, #IDPFR1_VIRT_MASK 345 beq .no_gentimer 346 mov r2, #0 347 write_cntvoff r2, r2 348 349.no_gentimer: 350 bic r0, r0, #SCR_NS /* Clr NS bit in SCR */ 351 write_scr r0 352 isb 353#endif 354#ifdef CFG_SM_NO_CYCLE_COUNTING 355 read_pmcr r0 356 orr r0, #PMCR_DP 357 write_pmcr r0 358#endif 359 msr cpsr, r1 360 361#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 362 /* 363 * For unrecognized CPUs we fall back to the vector used for 364 * unaffected CPUs. Cortex A-15 has special treatment compared to 365 * the other affected Cortex CPUs. 366 */ 367 read_midr r1 368 ubfx r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 369 cmp r2, #MIDR_IMPLEMENTER_ARM 370 bne 1f 371 372 ubfx r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 373 #MIDR_PRIMARY_PART_NUM_WIDTH 374 375 movw r3, #CORTEX_A8_PART_NUM 376 cmp r2, r3 377 movwne r3, #CORTEX_A9_PART_NUM 378 cmpne r2, r3 379 movwne r3, #CORTEX_A17_PART_NUM 380 cmpne r2, r3 381 ldreq r0, =sm_vect_table_bpiall 382 beq 2f 383 384 movw r3, #CORTEX_A15_PART_NUM 385 cmp r2, r3 386 ldreq r0, =sm_vect_table_a15 387 beq 2f 388#endif 389 /* Set monitor vector (MVBAR) */ 3901: ldr r0, =sm_vect_table 3912: write_mvbar r0 392 393 bx lr 394END_FUNC sm_init 395KEEP_PAGER sm_init 396 397 398/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */ 399FUNC sm_get_nsec_ctx , : 400 mrs r1, cpsr 401 cps #CPSR_MODE_MON 402 /* 403 * As we're in secure mode mon_sp points just beyond sm_ctx.sec, 404 * which allows us to calculate the address of sm_ctx.nsec. 405 */ 406 add r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END) 407 msr cpsr, r1 408 409 bx lr 410END_FUNC sm_get_nsec_ctx 411