1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/unwind.h> 13#include <sm/optee_smc.h> 14#include <sm/teesmc_opteed.h> 15#include <sm/teesmc_opteed_macros.h> 16#include <util.h> 17 18 .section .text.sm_asm 19 20FUNC sm_save_unbanked_regs , : 21UNWIND( .fnstart) 22UNWIND( .cantunwind) 23 /* User mode registers has to be saved from system mode */ 24 cps #CPSR_MODE_SYS 25 stm r0!, {sp, lr} 26 27 cps #CPSR_MODE_IRQ 28 mrs r2, spsr 29 stm r0!, {r2, sp, lr} 30 31 cps #CPSR_MODE_FIQ 32 mrs r2, spsr 33 stm r0!, {r2, sp, lr} 34 35 cps #CPSR_MODE_SVC 36 mrs r2, spsr 37 stm r0!, {r2, sp, lr} 38 39 cps #CPSR_MODE_ABT 40 mrs r2, spsr 41 stm r0!, {r2, sp, lr} 42 43 cps #CPSR_MODE_UND 44 mrs r2, spsr 45 stm r0!, {r2, sp, lr} 46 47#ifdef CFG_SM_NO_CYCLE_COUNTING 48 read_pmcr r2 49 stm r0!, {r2} 50#endif 51 cps #CPSR_MODE_MON 52 bx lr 53UNWIND( .fnend) 54END_FUNC sm_save_unbanked_regs 55 56/* Restores the mode specific registers */ 57FUNC sm_restore_unbanked_regs , : 58UNWIND( .fnstart) 59UNWIND( .cantunwind) 60 /* User mode registers has to be saved from system mode */ 61 cps #CPSR_MODE_SYS 62 ldm r0!, {sp, lr} 63 64 cps #CPSR_MODE_IRQ 65 ldm r0!, {r2, sp, lr} 66 msr spsr_fsxc, r2 67 68 cps #CPSR_MODE_FIQ 69 ldm r0!, {r2, sp, lr} 70 msr spsr_fsxc, r2 71 72 cps #CPSR_MODE_SVC 73 ldm r0!, {r2, sp, lr} 74 msr spsr_fsxc, r2 75 76 cps #CPSR_MODE_ABT 77 ldm r0!, {r2, sp, lr} 78 msr spsr_fsxc, r2 79 80 cps #CPSR_MODE_UND 81 ldm r0!, {r2, sp, lr} 82 msr spsr_fsxc, r2 83 84#ifdef CFG_SM_NO_CYCLE_COUNTING 85 ldm r0!, {r2} 86 write_pmcr r2 87#endif 88 cps #CPSR_MODE_MON 89 bx lr 90UNWIND( .fnend) 91END_FUNC sm_restore_unbanked_regs 92 93/* 94 * stack_tmp is used as stack, the top of the stack is reserved to hold 95 * struct sm_ctx, everything below is for normal stack usage. As several 96 * different CPU modes are using the same stack it's important that switch 97 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and 98 * Async abort has to be masked while using stack_tmp. 99 */ 100LOCAL_FUNC sm_smc_entry , : 101UNWIND( .fnstart) 102UNWIND( .cantunwind) 103 srsdb sp!, #CPSR_MODE_MON 104 push {r0-r7} 105 106 clrex /* Clear the exclusive monitor */ 107 108 /* Find out if we're doing an secure or non-secure entry */ 109 read_scr r1 110 tst r1, #SCR_NS 111 bne .smc_from_nsec 112 113 /* 114 * As we're coming from secure world (NS bit cleared) the stack 115 * pointer points to sm_ctx.sec.r0 at this stage. After the 116 * instruction below the stack pointer points to sm_ctx. 117 */ 118 sub sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 119 120 /* Save secure context */ 121 add r0, sp, #SM_CTX_SEC 122 bl sm_save_unbanked_regs 123 124 /* 125 * On FIQ exit we're restoring the non-secure context unchanged, on 126 * all other exits we're shifting r1-r4 from secure context into 127 * r0-r3 in non-secure context. 128 */ 129 add r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 130 ldm r8, {r0-r4} 131 mov_imm r9, TEESMC_OPTEED_RETURN_FIQ_DONE 132 cmp r0, r9 133 addne r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 134 stmne r8, {r1-r4} 135 136 /* Restore non-secure context */ 137 add r0, sp, #SM_CTX_NSEC 138 bl sm_restore_unbanked_regs 139 140.sm_ret_to_nsec: 141 /* 142 * Return to non-secure world 143 */ 144 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 145 ldm r0, {r8-r12} 146 147 /* Update SCR */ 148 read_scr r0 149 orr r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */ 150 write_scr r0 151 /* 152 * isb not needed since we're doing an exception return below 153 * without dependency to the changes in SCR before that. 154 */ 155 156 add sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 157 b .sm_exit 158 159.smc_from_nsec: 160 /* 161 * As we're coming from non-secure world (NS bit set) the stack 162 * pointer points to sm_ctx.nsec.r0 at this stage. After the 163 * instruction below the stack pointer points to sm_ctx. 164 */ 165 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 166 167 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 168 write_scr r1 169 isb 170 171 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 172 stm r0, {r8-r12} 173 174 mov r0, sp 175 bl sm_from_nsec 176 cmp r0, #0 177 beq .sm_ret_to_nsec 178 179 /* 180 * Continue into secure world 181 */ 182 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0) 183 184.sm_exit: 185 pop {r0-r7} 186 rfefd sp! 187UNWIND( .fnend) 188END_FUNC sm_smc_entry 189 190/* 191 * FIQ handling 192 * 193 * Saves CPU context in the same way as sm_smc_entry() above. The CPU 194 * context will later be restored by sm_smc_entry() when handling a return 195 * from FIQ. 196 */ 197LOCAL_FUNC sm_fiq_entry , : 198UNWIND( .fnstart) 199UNWIND( .cantunwind) 200 /* FIQ has a +4 offset for lr compared to preferred return address */ 201 sub lr, lr, #4 202 /* sp points just past struct sm_sec_ctx */ 203 srsdb sp!, #CPSR_MODE_MON 204 push {r0-r7} 205 206 clrex /* Clear the exclusive monitor */ 207 208 /* 209 * As we're coming from non-secure world the stack pointer points 210 * to sm_ctx.nsec.r0 at this stage. After the instruction below the 211 * stack pointer points to sm_ctx. 212 */ 213 sub sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0) 214 215 /* Update SCR */ 216 read_scr r1 217 bic r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */ 218 write_scr r1 219 isb 220 221 /* Save non-secure context */ 222 add r0, sp, #SM_CTX_NSEC 223 bl sm_save_unbanked_regs 224 add r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8) 225 stm r0!, {r8-r12} 226 227 /* Set FIQ entry */ 228 ldr r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY) 229 str r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)] 230 231 /* Restore secure context */ 232 add r0, sp, #SM_CTX_SEC 233 bl sm_restore_unbanked_regs 234 235 add sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR) 236 237 rfefd sp! 238UNWIND( .fnend) 239END_FUNC sm_fiq_entry 240 241 .section .text.sm_vect_table 242 .align 5 243LOCAL_FUNC sm_vect_table , : 244UNWIND( .fnstart) 245UNWIND( .cantunwind) 246 b . /* Reset */ 247 b . /* Undefined instruction */ 248 b sm_smc_entry /* Secure monitor call */ 249 b . /* Prefetch abort */ 250 b . /* Data abort */ 251 b . /* Reserved */ 252 b . /* IRQ */ 253 b sm_fiq_entry /* FIQ */ 254 255#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 256 .macro vector_prologue_spectre 257 /* 258 * This depends on SP being 8 byte aligned, that is, the 259 * lowest three bits in SP are zero. 260 * 261 * The idea is to form a specific bit pattern in the lowest 262 * three bits of SP depending on which entry in the vector 263 * we enter via. This is done by adding 1 to SP in each 264 * entry but the last. 265 */ 266 add sp, sp, #1 /* 7:111 Reset */ 267 add sp, sp, #1 /* 6:110 Undefined instruction */ 268 add sp, sp, #1 /* 5:101 Secure monitor call */ 269 add sp, sp, #1 /* 4:100 Prefetch abort */ 270 add sp, sp, #1 /* 3:011 Data abort */ 271 add sp, sp, #1 /* 2:010 Reserved */ 272 add sp, sp, #1 /* 1:001 IRQ */ 273 nop /* 0:000 FIQ */ 274 .endm 275 276 .align 5 277sm_vect_table_a15: 278 vector_prologue_spectre 279 /* 280 * Invalidate the branch predictor for the current processor. 281 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 282 * effective. 283 * Note that the BPIALL instruction is not effective in 284 * invalidating the branch predictor on Cortex-A15. For that CPU, 285 * set ACTLR[0] to 1 during early processor initialisation, and 286 * invalidate the branch predictor by performing an ICIALLU 287 * instruction. See also: 288 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 289 */ 290 write_iciallu 291 isb 292 b 1f 293 294 .align 5 295sm_vect_table_bpiall: 296 vector_prologue_spectre 297 /* Invalidate the branch predictor for the current processor. */ 298 write_bpiall 299 isb 300 3011: 302 /* 303 * Only two exception does normally occur, smc and fiq. With all 304 * other exceptions it's good enough to just spinn, the lowest bits 305 * still tells which exception we're stuck with when attaching a 306 * debugger. 307 */ 308 309 /* Test for FIQ, all the lowest bits of SP are supposed to be 0 */ 310 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 311 beq sm_fiq_entry 312 313 /* Test for SMC, xor the lowest bits of SP to be 0 */ 314 eor sp, sp, #(BIT(0) | BIT(2)) 315 tst sp, #(BIT(0) | BIT(1) | BIT(2)) 316 beq sm_smc_entry 317 318 /* unhandled exception */ 319 b . 320#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/ 321UNWIND( .fnend) 322END_FUNC sm_vect_table 323 324/* void sm_init(vaddr_t stack_pointer); */ 325FUNC sm_init , : 326UNWIND( .fnstart) 327 /* Set monitor stack */ 328 mrs r1, cpsr 329 cps #CPSR_MODE_MON 330 /* Point just beyond sm_ctx.sec */ 331 sub sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC) 332 333#ifdef CFG_INIT_CNTVOFF 334 read_scr r0 335 orr r0, r0, #SCR_NS /* Set NS bit in SCR */ 336 write_scr r0 337 isb 338 339 /* 340 * Accessing CNTVOFF: 341 * If the implementation includes the Virtualization Extensions 342 * this is a RW register, accessible from Hyp mode, and 343 * from Monitor mode when SCR.NS is set to 1. 344 * If the implementation includes the Security Extensions 345 * but not the Virtualization Extensions, an MCRR or MRRC to 346 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor 347 * mode, regardless of the value of SCR.NS. 348 */ 349 read_idpfr1 r2 350 mov r3, r2 351 ands r3, r3, #IDPFR1_GENTIMER_MASK 352 beq .no_gentimer 353 ands r2, r2, #IDPFR1_VIRT_MASK 354 beq .no_gentimer 355 mov r2, #0 356 write_cntvoff r2, r2 357 358.no_gentimer: 359 bic r0, r0, #SCR_NS /* Clr NS bit in SCR */ 360 write_scr r0 361 isb 362#endif 363#ifdef CFG_SM_NO_CYCLE_COUNTING 364 read_pmcr r0 365 orr r0, #PMCR_DP 366 write_pmcr r0 367#endif 368 msr cpsr, r1 369 370#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP 371 /* 372 * For unrecognized CPUs we fall back to the vector used for 373 * unaffected CPUs. Cortex A-15 has special treatment compared to 374 * the other affected Cortex CPUs. 375 */ 376 read_midr r1 377 ubfx r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 378 cmp r2, #MIDR_IMPLEMENTER_ARM 379 bne 1f 380 381 ubfx r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 382 #MIDR_PRIMARY_PART_NUM_WIDTH 383 384 movw r3, #CORTEX_A8_PART_NUM 385 cmp r2, r3 386 movwne r3, #CORTEX_A9_PART_NUM 387 cmpne r2, r3 388 movwne r3, #CORTEX_A17_PART_NUM 389 cmpne r2, r3 390 ldreq r0, =sm_vect_table_bpiall 391 beq 2f 392 393 movw r3, #CORTEX_A15_PART_NUM 394 cmp r2, r3 395 ldreq r0, =sm_vect_table_a15 396 beq 2f 397#endif 398 /* Set monitor vector (MVBAR) */ 3991: ldr r0, =sm_vect_table 4002: write_mvbar r0 401 402 bx lr 403END_FUNC sm_init 404KEEP_PAGER sm_init 405 406 407/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */ 408FUNC sm_get_nsec_ctx , : 409 mrs r1, cpsr 410 cps #CPSR_MODE_MON 411 mov r0, sp 412 msr cpsr, r1 413 414 /* 415 * As we're in secure mode mon_sp points just beyond sm_ctx.sec 416 * which is sm_ctx.nsec 417 */ 418 bx lr 419END_FUNC sm_get_nsec_ctx 420