1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/unwind.h> 13#include <sm/optee_smc.h> 14#include <sm/sm.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17#include <util.h> 18 19#define SM_CTX_SEC_END (SM_CTX_SEC + SM_CTX_SEC_SIZE) 20 21 .macro save_regs mode 22 cps \mode 23 mrs r2, spsr 24 str r2, [r0], #4 25 str sp, [r0], #4 26 str lr, [r0], #4 27 .endm 28 29FUNC sm_save_unbanked_regs , : 30UNWIND( .fnstart) 31UNWIND( .cantunwind) 32 /* User mode registers has to be saved from system mode */ 33 cps #CPSR_MODE_SYS 34 str sp, [r0], #4 35 str lr, [r0], #4 36 37 save_regs #CPSR_MODE_IRQ 38 save_regs #CPSR_MODE_FIQ 39 save_regs #CPSR_MODE_SVC 40 save_regs #CPSR_MODE_ABT 41 save_regs #CPSR_MODE_UND 42 43#ifdef CFG_SM_NO_CYCLE_COUNTING 44 read_pmcr r2 45 stm r0!, {r2} 46#endif 47 48#ifdef CFG_FTRACE_SUPPORT 49 read_cntkctl r2 50 stm r0!, {r2} 51#endif 52 cps #CPSR_MODE_MON 53 bx lr 54UNWIND( .fnend) 55END_FUNC sm_save_unbanked_regs 56 57 .macro restore_regs mode 58 cps \mode 59 ldr r2, [r0], #4 60 ldr sp, [r0], #4 61 ldr lr, [r0], #4 62 msr spsr_fsxc, r2 63 .endm 64 65/* Restores the mode specific registers */ 66FUNC sm_restore_unbanked_regs , : 67UNWIND( .fnstart) 68UNWIND( .cantunwind) 69 /* User mode registers has to be saved from system mode */ 70 cps #CPSR_MODE_SYS 71 ldr sp, [r0], #4 72 ldr lr, [r0], #4 73 74 restore_regs #CPSR_MODE_IRQ 75 restore_regs #CPSR_MODE_FIQ 76 restore_regs #CPSR_MODE_SVC 77 restore_regs #CPSR_MODE_ABT 78 restore_regs #CPSR_MODE_UND 79 80#ifdef CFG_SM_NO_CYCLE_COUNTING 81 ldm r0!, {r2} 82 write_pmcr r2 83#endif 84 85#ifdef CFG_FTRACE_SUPPORT 86 ldm r0!, {r2} 87 write_cntkctl r2 88#endif 89 cps #CPSR_MODE_MON 90 bx lr 91UNWIND( .fnend) 92END_FUNC sm_restore_unbanked_regs 93 94/* 95 * stack_tmp is used as stack, the top of the stack is reserved to hold 96 * struct sm_ctx, everything below is for normal stack usage. As several 97 * different CPU modes are using the same stack it's important that switch 98 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and 99 * Async abort has to be masked while using stack_tmp. 100 */ 101LOCAL_FUNC sm_smc_entry , : 102UNWIND( .fnstart) 103UNWIND( .cantunwind) 104 srsdb sp!, #CPSR_MODE_MON 105 push {r0-r7} 106 107 clrex /* Clear the exclusive monitor */ 108 109 /* Find out if we're doing an secure or non-secure entry */ 110 read_scr r1 111 tst r1, #SCR_NS 112 bne .smc_from_nsec 113 114 /* 115 * As we're coming from secure world (NS bit cleared) the stack 116 * pointer points to sm_ctx.sec.r0 at this stage. After the 117 * instruction below the stack pointer points to sm_ctx. 118 */ 119 sub sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 120 121 /* Save secure context */ 122 add r0, sp, #SM_CTX_SEC 123 bl sm_save_unbanked_regs 124 125 /* 126 * On FIQ exit we're restoring the non-secure context unchanged, on 127 * all other exits we're shifting r1-r4 from secure context into 128 * r0-r3 in non-secure context. 129 */ 130 add r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 131 ldm r8, {r0-r4} 132 mov_imm r9, TEESMC_OPTEED_RETURN_FIQ_DONE 133 cmp r0, r9 134 addne r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 135 stmne r8, {r1-r4} 136 137 /* Restore non-secure context */ 138 add r0, sp, #SM_CTX_NSEC 139 bl sm_restore_unbanked_regs 140 141.sm_ret_to_nsec: 142 /* 143 * Return to non-secure world 144 */ 145 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 146 ldm r0, {r8-r12} 147 148#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 149 /* 150 * Prevent leaking information about which code has been executed. 151 * This is required to be used together with 152 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too. 153 * 154 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch 155 * predictor on affected CPUs. In the cases where an alternative 156 * vector has been installed the branch predictor is already 157 * invalidated so invalidating here again would be redundant, but 158 * testing for that is more trouble than it's worth. 159 */ 160 write_bpiall 161#endif 162 163 /* Update SCR */ 164 read_scr r0 165 orr r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */ 166 write_scr r0 167 /* 168 * isb not needed since we're doing an exception return below 169 * without dependency to the changes in SCR before that. 170 */ 171 172 add sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 173 b .sm_exit 174 175.smc_from_nsec: 176 /* 177 * As we're coming from non-secure world (NS bit set) the stack 178 * pointer points to sm_ctx.nsec.r0 at this stage. After the 179 * instruction below the stack pointer points to sm_ctx. 180 */ 181 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 182 183 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 184 write_scr r1 185 isb 186 187 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 188 stm r0, {r8-r12} 189 190 mov r0, sp 191 bl sm_from_nsec 192 cmp r0, #SM_EXIT_TO_NON_SECURE 193 beq .sm_ret_to_nsec 194 195 /* 196 * Continue into secure world 197 */ 198 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 199 200.sm_exit: 201 pop {r0-r7} 202 rfefd sp! 203UNWIND( .fnend) 204END_FUNC sm_smc_entry 205 206/* 207 * FIQ handling 208 * 209 * Saves CPU context in the same way as sm_smc_entry() above. The CPU 210 * context will later be restored by sm_smc_entry() when handling a return 211 * from FIQ. 212 */ 213LOCAL_FUNC sm_fiq_entry , : 214UNWIND( .fnstart) 215UNWIND( .cantunwind) 216 /* FIQ has a +4 offset for lr compared to preferred return address */ 217 sub lr, lr, #4 218 /* sp points just past struct sm_sec_ctx */ 219 srsdb sp!, #CPSR_MODE_MON 220 push {r0-r7} 221 222 clrex /* Clear the exclusive monitor */ 223 224 /* 225 * As we're coming from non-secure world the stack pointer points 226 * to sm_ctx.nsec.r0 at this stage. After the instruction below the 227 * stack pointer points to sm_ctx. 228 */ 229 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 230 231 /* Update SCR */ 232 read_scr r1 233 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 234 write_scr r1 235 isb 236 237 /* Save non-secure context */ 238 add r0, sp, #SM_CTX_NSEC 239 bl sm_save_unbanked_regs 240 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 241 stm r0!, {r8-r12} 242 243 /* Set FIQ entry */ 244 ldr r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY) 245 str r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)] 246 247 /* Restore secure context */ 248 add r0, sp, #SM_CTX_SEC 249 bl sm_restore_unbanked_regs 250 251 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR) 252 253 rfefd sp! 254UNWIND( .fnend) 255END_FUNC sm_fiq_entry 256 257 .align 5 258LOCAL_FUNC sm_vect_table , : 259UNWIND( .fnstart) 260UNWIND( .cantunwind) 261 b . /* Reset */ 262 b . /* Undefined instruction */ 263 b sm_smc_entry /* Secure monitor call */ 264 b . /* Prefetch abort */ 265 b . /* Data abort */ 266 b . /* Reserved */ 267 b . /* IRQ */ 268 b sm_fiq_entry /* FIQ */ 269 270#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 271 .macro vector_prologue_spectre 272 /* 273 * This depends on SP being 8 byte aligned, that is, the 274 * lowest three bits in SP are zero. 275 * 276 * The idea is to form a specific bit pattern in the lowest 277 * three bits of SP depending on which entry in the vector 278 * we enter via. This is done by adding 1 to SP in each 279 * entry but the last. 280 */ 281 add sp, sp, #1 /* 7:111 Reset */ 282 add sp, sp, #1 /* 6:110 Undefined instruction */ 283 add sp, sp, #1 /* 5:101 Secure monitor call */ 284 add sp, sp, #1 /* 4:100 Prefetch abort */ 285 add sp, sp, #1 /* 3:011 Data abort */ 286 add sp, sp, #1 /* 2:010 Reserved */ 287 add sp, sp, #1 /* 1:001 IRQ */ 288 nop /* 0:000 FIQ */ 289 .endm 290 291 .align 5 292sm_vect_table_a15: 293 vector_prologue_spectre 294 /* 295 * Invalidate the branch predictor for the current processor. 296 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 297 * effective. 298 * Note that the BPIALL instruction is not effective in 299 * invalidating the branch predictor on Cortex-A15. For that CPU, 300 * set ACTLR[0] to 1 during early processor initialisation, and 301 * invalidate the branch predictor by performing an ICIALLU 302 * instruction. See also: 303 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 304 */ 305 write_iciallu 306 isb 307 b 1f 308 309 .align 5 310sm_vect_table_bpiall: 311 vector_prologue_spectre 312 /* Invalidate the branch predictor for the current processor. */ 313 write_bpiall 314 isb 315 3161: 317 /* 318 * Only two exception does normally occur, smc and fiq. With all 319 * other exceptions it's good enough to just spinn, the lowest bits 320 * still tells which exception we're stuck with when attaching a 321 * debugger. 322 */ 323 324 /* Test for FIQ, all the lowest bits of SP are supposed to be 0 */ 325 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 326 beq sm_fiq_entry 327 328 /* Test for SMC, xor the lowest bits of SP to be 0 */ 329 eor sp, sp, #(BIT(0) | BIT(2)) 330 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 331 beq sm_smc_entry 332 333 /* unhandled exception */ 334 b . 335#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/ 336UNWIND( .fnend) 337END_FUNC sm_vect_table 338 339/* void sm_init(vaddr_t stack_pointer); */ 340FUNC sm_init , : 341UNWIND( .fnstart) 342 /* Set monitor stack */ 343 mrs r1, cpsr 344 cps #CPSR_MODE_MON 345 /* Point just beyond sm_ctx.sec */ 346 sub sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END) 347 348#ifdef CFG_INIT_CNTVOFF 349 read_scr r0 350 orr r0, r0, #SCR_NS /* Set NS bit in SCR */ 351 write_scr r0 352 isb 353 354 /* 355 * Accessing CNTVOFF: 356 * If the implementation includes the Virtualization Extensions 357 * this is a RW register, accessible from Hyp mode, and 358 * from Monitor mode when SCR.NS is set to 1. 359 * If the implementation includes the Security Extensions 360 * but not the Virtualization Extensions, an MCRR or MRRC to 361 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor 362 * mode, regardless of the value of SCR.NS. 363 */ 364 read_id_pfr1 r2 365 mov r3, r2 366 ands r3, r3, #IDPFR1_GENTIMER_MASK 367 beq .no_gentimer 368 ands r2, r2, #IDPFR1_VIRT_MASK 369 beq .no_gentimer 370 mov r2, #0 371 write_cntvoff r2, r2 372 373.no_gentimer: 374 bic r0, r0, #SCR_NS /* Clr NS bit in SCR */ 375 write_scr r0 376 isb 377#endif 378#ifdef CFG_SM_NO_CYCLE_COUNTING 379 read_pmcr r0 380 orr r0, #PMCR_DP 381 write_pmcr r0 382#endif 383 msr cpsr, r1 384 385#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 386 /* 387 * For unrecognized CPUs we fall back to the vector used for 388 * unaffected CPUs. Cortex A-15 has special treatment compared to 389 * the other affected Cortex CPUs. 390 */ 391 read_midr r1 392 ubfx r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 393 cmp r2, #MIDR_IMPLEMENTER_ARM 394 bne 1f 395 396 ubfx r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 397 #MIDR_PRIMARY_PART_NUM_WIDTH 398 399 movw r3, #CORTEX_A8_PART_NUM 400 cmp r2, r3 401 movwne r3, #CORTEX_A9_PART_NUM 402 cmpne r2, r3 403 movwne r3, #CORTEX_A17_PART_NUM 404 cmpne r2, r3 405 ldreq r0, =sm_vect_table_bpiall 406 beq 2f 407 408 movw r3, #CORTEX_A15_PART_NUM 409 cmp r2, r3 410 ldreq r0, =sm_vect_table_a15 411 beq 2f 412#endif 413 /* Set monitor vector (MVBAR) */ 4141: ldr r0, =sm_vect_table 4152: write_mvbar r0 416 417 bx lr 418END_FUNC sm_init 419KEEP_PAGER sm_init 420 421 422/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */ 423FUNC sm_get_nsec_ctx , : 424 mrs r1, cpsr 425 cps #CPSR_MODE_MON 426 /* 427 * As we're in secure mode mon_sp points just beyond sm_ctx.sec, 428 * which allows us to calculate the address of sm_ctx.nsec. 429 */ 430 add r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END) 431 msr cpsr, r1 432 433 bx lr 434END_FUNC sm_get_nsec_ctx 435