1/* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <asm.S> 29#include <arm.h> 30#include <arm32_macros.S> 31#include <sm/teesmc.h> 32#include <sm/teesmc_opteed_macros.h> 33#include <sm/teesmc_opteed.h> 34#include <kernel/thread_defs.h> 35 36 .section .text.thread_asm 37 38LOCAL_FUNC vector_std_smc_entry , : 39 push {r0-r7} 40 mov r0, sp 41 bl thread_handle_std_smc 42 /* 43 * Normally thread_handle_std_smc() should return via 44 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 45 * hasn't switched stack (error detected) it will do a normal "C" 46 * return. 47 */ 48 pop {r1-r8} 49 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 50 smc #0 51 b . /* SMC should not return */ 52END_FUNC vector_std_smc_entry 53 54LOCAL_FUNC vector_fast_smc_entry , : 55 push {r0-r7} 56 mov r0, sp 57 bl thread_handle_fast_smc 58 pop {r1-r8} 59 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 60 smc #0 61 b . /* SMC should not return */ 62END_FUNC vector_fast_smc_entry 63 64LOCAL_FUNC vector_fiq_entry , : 65 /* Secure Monitor received a FIQ and passed control to us. */ 66 bl thread_check_canaries 67 ldr lr, =thread_fiq_handler_ptr 68 ldr lr, [lr] 69 blx lr 70 mov r1, r0 71 ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE 72 smc #0 73 b . /* SMC should not return */ 74END_FUNC vector_fiq_entry 75 76LOCAL_FUNC vector_cpu_on_entry , : 77 ldr lr, =thread_cpu_on_handler_ptr 78 ldr lr, [lr] 79 blx lr 80 mov r1, r0 81 ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE 82 smc #0 83 b . /* SMC should not return */ 84END_FUNC vector_cpu_on_entry 85 86LOCAL_FUNC vector_cpu_off_entry , : 87 ldr lr, =thread_cpu_off_handler_ptr 88 ldr lr, [lr] 89 blx lr 90 mov r1, r0 91 ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE 92 smc #0 93 b . /* SMC should not return */ 94END_FUNC vector_cpu_off_entry 95 96LOCAL_FUNC vector_cpu_suspend_entry , : 97 ldr lr, =thread_cpu_suspend_handler_ptr 98 ldr lr, [lr] 99 blx lr 100 mov r1, r0 101 ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 102 smc #0 103 b . /* SMC should not return */ 104END_FUNC vector_cpu_suspend_entry 105 106LOCAL_FUNC vector_cpu_resume_entry , : 107 ldr lr, =thread_cpu_resume_handler_ptr 108 ldr lr, [lr] 109 blx lr 110 mov r1, r0 111 ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE 112 smc #0 113 b . /* SMC should not return */ 114END_FUNC vector_cpu_resume_entry 115 116LOCAL_FUNC vector_system_off_entry , : 117 ldr lr, =thread_system_off_handler_ptr 118 ldr lr, [lr] 119 blx lr 120 mov r1, r0 121 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 122 smc #0 123 b . /* SMC should not return */ 124END_FUNC vector_system_off_entry 125 126LOCAL_FUNC vector_system_reset_entry , : 127 ldr lr, =thread_system_reset_handler_ptr 128 ldr lr, [lr] 129 blx lr 130 mov r1, r0 131 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 132 smc #0 133 b . /* SMC should not return */ 134END_FUNC vector_system_reset_entry 135 136/* 137 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 138 * initialization. Also used when compiled with the internal monitor, but 139 * the cpu_*_entry and system_*_entry are not used then. 140 * 141 * Note that ARM-TF depends on the layout of this vector table, any change 142 * in layout has to be synced with ARM-TF. This layout must also be kept in 143 * sync with sm_entry_vector in sm.c 144 */ 145FUNC thread_vector_table , : 146 b vector_std_smc_entry 147 b vector_fast_smc_entry 148 b vector_cpu_on_entry 149 b vector_cpu_off_entry 150 b vector_cpu_resume_entry 151 b vector_cpu_suspend_entry 152 b vector_fiq_entry 153 b vector_system_off_entry 154 b vector_system_reset_entry 155END_FUNC thread_vector_table 156 157FUNC thread_set_abt_sp , : 158 mrs r1, cpsr 159 cps #CPSR_MODE_ABT 160 mov sp, r0 161 msr cpsr, r1 162 bx lr 163END_FUNC thread_set_abt_sp 164 165FUNC thread_set_irq_sp , : 166 mrs r1, cpsr 167 cps #CPSR_MODE_IRQ 168 mov sp, r0 169 msr cpsr, r1 170 bx lr 171END_FUNC thread_set_irq_sp 172 173FUNC thread_set_fiq_sp , : 174 mrs r1, cpsr 175 cps #CPSR_MODE_FIQ 176 mov sp, r0 177 msr cpsr, r1 178 bx lr 179END_FUNC thread_set_fiq_sp 180 181/* void thread_resume(struct thread_ctx_regs *regs) */ 182FUNC thread_resume , : 183 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 184 185 cps #CPSR_MODE_SYS 186 ldm r12!, {sp, lr} 187 188 cps #CPSR_MODE_SVC 189 ldm r12!, {r1, sp, lr} 190 msr spsr_fsxc, r1 191 192 cps #CPSR_MODE_SVC 193 ldm r12, {r1, r2} 194 push {r1, r2} 195 196 ldm r0, {r0-r12} 197 198 199 /* Restore CPSR and jump to the instruction to resume at */ 200 rfefd sp! 201END_FUNC thread_resume 202 203/* 204 * Disables IRQ and FIQ and saves state of thread, returns original 205 * CPSR. 206 */ 207LOCAL_FUNC thread_save_state , : 208 push {r12, lr} 209 /* 210 * Uses stack for temporary storage, while storing needed 211 * context in the thread context struct. 212 */ 213 214 mrs r12, cpsr 215 216 cpsid aif /* Disable Async abort, IRQ and FIQ */ 217 218 push {r4-r7} 219 push {r0-r3} 220 221 mov r5, r12 /* Save CPSR in a preserved register */ 222 mrs r6, cpsr /* Save current CPSR */ 223 224 bl thread_get_ctx_regs 225 226 pop {r1-r4} /* r0-r3 pushed above */ 227 stm r0!, {r1-r4} 228 pop {r1-r4} /* r4-r7 pushed above */ 229 stm r0!, {r1-r4} 230 stm r0!, {r8-r11} 231 232 pop {r12, lr} 233 stm r0!, {r12} 234 235 cps #CPSR_MODE_SYS 236 stm r0!, {sp, lr} 237 238 cps #CPSR_MODE_SVC 239 mrs r1, spsr 240 stm r0!, {r1, sp, lr} 241 242 msr cpsr, r6 /* Restore mode */ 243 244 mov r0, r5 /* Return original CPSR */ 245 bx lr 246END_FUNC thread_save_state 247 248FUNC thread_std_smc_entry , : 249 /* Pass r0-r7 in a struct thread_smc_args */ 250 push {r0-r7} 251 mov r0, sp 252 bl __thread_std_smc_entry 253 /* 254 * Load the returned r0-r3 into preserved registers and skip the 255 * "returned" r4-r7 since they will not be returned to normal 256 * world. 257 */ 258 pop {r4-r7} 259 add sp, #(4 * 4) 260 261 /* Disable interrupts before switching to temporary stack */ 262 cpsid if 263 bl thread_get_tmp_sp 264 mov sp, r0 265 266 bl thread_state_free 267 268 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 269 mov r1, r4 270 mov r2, r5 271 mov r3, r6 272 mov r4, r7 273 smc #0 274 b . /* SMC should not return */ 275END_FUNC thread_std_smc_entry 276 277 278/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 279FUNC thread_rpc , : 280 push {lr} 281 push {r0} 282 283 bl thread_save_state 284 mov r4, r0 /* Save original CPSR */ 285 286 /* 287 * Switch to temporary stack and SVC mode. Save CPSR to resume into. 288 */ 289 bl thread_get_tmp_sp 290 ldr r5, [sp] /* Get pointer to rv[] */ 291 cps #CPSR_MODE_SVC /* Change to SVC mode */ 292 mov sp, r0 /* Switch to tmp stack */ 293 294 mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 295 mov r1, r4 /* CPSR to restore */ 296 ldr r2, =.thread_rpc_return 297 bl thread_state_suspend 298 mov r4, r0 /* Supply thread index */ 299 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 300 ldm r5, {r1-r3} /* Load rv[] into r0-r2 */ 301 smc #0 302 b . /* SMC should not return */ 303 304.thread_rpc_return: 305 /* 306 * At this point has the stack pointer been restored to the value 307 * it had when thread_save_state() was called above. 308 * 309 * Jumps here from thread_resume above when RPC has returned. The 310 * IRQ and FIQ bits are restored to what they where when this 311 * function was originally entered. 312 */ 313 pop {r12} /* Get pointer to rv[] */ 314 stm r12, {r0-r2} /* Store r0-r2 into rv[] */ 315 pop {pc} 316END_FUNC thread_rpc 317 318LOCAL_FUNC thread_fiq_handler , : 319 /* FIQ has a +4 offset for lr compared to preferred return address */ 320 sub lr, lr, #4 321 push {r0-r12, lr} 322 bl thread_check_canaries 323 ldr lr, =thread_fiq_handler_ptr 324 ldr lr, [lr] 325 blx lr 326 pop {r0-r12, lr} 327 movs pc, lr 328END_FUNC thread_fiq_handler 329 330LOCAL_FUNC thread_irq_handler , : 331 /* 332 * IRQ mode is set up to use tmp stack so FIQ has to be 333 * disabled before touching the stack. We can also assign 334 * SVC sp from IRQ sp to get SVC mode into the state we 335 * need when doing the SMC below. 336 */ 337 cpsid f /* Disable FIQ also */ 338 sub lr, lr, #4 339 push {lr} 340 push {r12} 341 342 bl thread_save_state 343 344 mov r0, #THREAD_FLAGS_EXIT_ON_IRQ 345 mrs r1, spsr 346 pop {r12} 347 pop {r2} 348 blx thread_state_suspend 349 mov r4, r0 /* Supply thread index */ 350 351 /* 352 * Switch to SVC mode and copy current stack pointer as it already 353 * is the tmp stack. 354 */ 355 mov r0, sp 356 cps #CPSR_MODE_SVC 357 mov sp, r0 358 359 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 360 ldr r1, =TEESMC_RETURN_RPC_IRQ 361 mov r2, #0 362 mov r3, #0 363 /* r4 is already filled in above */ 364 smc #0 365 b . /* SMC should not return */ 366END_FUNC thread_irq_handler 367 368FUNC thread_init_vbar , : 369 /* Set vector (VBAR) */ 370 ldr r0, =thread_vect_table 371 write_vbar r0 372 bx lr 373END_FUNC thread_init_vbar 374 375/* 376 * Below are low level routines handling entry and return from user mode. 377 * 378 * thread_enter_user_mode() saves all that registers user mode can change 379 * so kernel mode can restore needed registers when resuming execution 380 * after the call to thread_enter_user_mode() has returned. 381 * thread_enter_user_mode() doesn't return directly since it enters user 382 * mode instead, it's thread_unwind_user_mode() that does the 383 * returning by restoring the registers saved by thread_enter_user_mode(). 384 * 385 * There's three ways for thread_enter_user_mode() to return to caller, 386 * user TA calls utee_return, user TA calls utee_panic or through an abort. 387 * 388 * Calls to utee_return or utee_panic are handled as: 389 * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which 390 * calls tee_svc_sys_return() or tee_svc_sys_panic(). 391 * 392 * These function calls returns normally except thread_svc_handler() which 393 * which is an exception handling routine so it reads return address and 394 * SPSR to restore from the stack. tee_svc_sys_return() and tee_svc_sys_panic() 395 * changes return address and SPSR used by thread_svc_handler() to instead of 396 * returning into user mode as with other syscalls it returns into 397 * thread_unwind_user_mode() in kernel mode instead. When 398 * thread_svc_handler() returns the stack pointer at the point where 399 * thread_enter_user_mode() left it so this is where 400 * thread_unwind_user_mode() can operate. 401 * 402 * Aborts are handled in a similar way but by thread_abort_handler() 403 * instead, when the pager sees that it's an abort from user mode that 404 * can't be handled it updates SPSR and return address used by 405 * thread_abort_handler() to return into thread_unwind_user_mode() 406 * instead. 407 */ 408 409/* 410 * TEE_Result thread_enter_user_mode( 411 * uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 412 * tee_uaddr_t sp, tee_uaddr_t user_func, 413 * uint32_t *panicked, uint32_t *panic_code); 414 * See description in thread.h 415 */ 416FUNC thread_enter_user_mode , : 417 /* 418 * Save all registers to allow tee_svc_sys_return() to 419 * resume execution as if this function would have returned. 420 * This is also used in tee_svc_sys_panic(). 421 * 422 * If stack usage of this function is changed 423 * thread_unwind_user_mode() has to be updated. 424 */ 425 push {r4-r12,lr} 426 427 ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */ 428 ldr r5, [sp, #(11 * 0x4)] /* user function */ 429 430 /* 431 * Save old user sp and set new user sp. 432 */ 433 cps #CPSR_MODE_SYS 434 mov r6, sp 435 mov sp, r4 436 cps #CPSR_MODE_SVC 437 push {r6} 438 439 /* 440 * Set the saved Processors Status Register to user mode to allow 441 * entry of user mode through movs below. Also update thumbstate 442 * since movs doesn't do that automatically. 443 */ 444 mrs r6, cpsr 445 bic r6, #CPSR_MODE_MASK 446 orr r6, #CPSR_MODE_USR 447 tst r5, #1 /* If it's odd we should switch to thumb mode */ 448 orrne r6, #CPSR_T /* Enable thumb mode */ 449 biceq r6, #CPSR_T /* Disable thumb mode */ 450 bicne r6, #CPSR_IT_MASK1 /* Clear IT state for thumb mode */ 451 bicne r6, #CPSR_IT_MASK2 /* Clear IT state for thumb mode */ 452 msr spsr_cxsf, r6 453 454 /* 455 * Don't allow return from this function, return is done through 456 * thread_unwind_user_mode() below. 457 */ 458 mov lr, #0 459 /* Call the user function with its arguments */ 460 movs pc, r5 461END_FUNC thread_enter_user_mode 462 463/* 464 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 465 * uint32_t exit_status1); 466 * See description in thread.h 467 */ 468FUNC thread_unwind_user_mode , : 469 ldr ip, [sp, #(13 * 0x4)] /* &ctx->panicked */ 470 str r1, [ip] 471 ldr ip, [sp, #(14 * 0x4)] /* &ctx->panic_code */ 472 str r2, [ip] 473 474 /* Restore old user sp */ 475 pop {r4} 476 cps #CPSR_MODE_SYS 477 mov sp, r4 478 cps #CPSR_MODE_SVC 479 480 pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/ 481END_FUNC thread_unwind_user_mode 482 483LOCAL_FUNC thread_abort_handler , : 484thread_abort_handler: 485thread_und_handler: 486 /* 487 * Switch to abort mode to use that stack instead. 488 */ 489 cps #CPSR_MODE_ABT 490 push {r0-r11, ip} 491 cps #CPSR_MODE_UND 492 sub r1, lr, #4 493 mrs r0, spsr 494 cps #CPSR_MODE_ABT 495 push {r0, r1} 496 msr spsr_fsxc, r0 /* In case some code reads spsr directly */ 497 mov r0, #THREAD_ABORT_UNDEF 498 b .thread_abort_generic 499 500thread_dabort_handler: 501 push {r0-r11, ip} 502 sub r1, lr, #8 503 mrs r0, spsr 504 push {r0, r1} 505 mov r0, #THREAD_ABORT_DATA 506 b .thread_abort_generic 507 508thread_pabort_handler: 509 push {r0-r11, ip} 510 sub r1, lr, #4 511 mrs r0, spsr 512 push {r0, r1} 513 mov r0, #THREAD_ABORT_PREFETCH 514 b .thread_abort_generic 515 516.thread_abort_generic: 517 cps #CPSR_MODE_SYS 518 mov r1, sp 519 mov r2, lr 520 cps #CPSR_MODE_ABT 521 push {r1-r3} 522 mov r1, sp 523 bl thread_handle_abort 524 pop {r1-r3} 525 cps #CPSR_MODE_SYS 526 mov sp, r1 527 mov lr, r2 528 cps #CPSR_MODE_ABT 529 pop {r0, r1} 530 mov lr, r1 531 msr spsr_fsxc, r0 532 pop {r0-r11, ip} 533 movs pc, lr 534END_FUNC thread_abort_handler 535 536LOCAL_FUNC thread_svc_handler , : 537 push {r0-r7, lr} 538 mrs r0, spsr 539 push {r0} 540 mov r0, sp 541 ldr lr, =thread_svc_handler_ptr; 542 ldr lr, [lr] 543 blx lr 544 pop {r0} 545 msr spsr_fsxc, r0 546 pop {r0-r7, lr} 547 movs pc, lr 548END_FUNC thread_svc_handler 549 550 .align 5 551LOCAL_FUNC thread_vect_table , : 552 b . /* Reset */ 553 b thread_und_handler /* Undefined instruction */ 554 b thread_svc_handler /* System call */ 555 b thread_pabort_handler /* Prefetch abort */ 556 b thread_dabort_handler /* Data abort */ 557 b . /* Reserved */ 558 b thread_irq_handler /* IRQ */ 559 b thread_fiq_handler /* FIQ */ 560END_FUNC thread_vect_table 561