1/* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <asm.S> 30#include <arm.h> 31#include <arm32_macros.S> 32#include <sm/optee_smc.h> 33#include <sm/teesmc_opteed_macros.h> 34#include <sm/teesmc_opteed.h> 35#include <kernel/abort.h> 36#include <kernel/thread_defs.h> 37#include <kernel/unwind.h> 38 39 .section .text.thread_asm 40 41LOCAL_FUNC vector_std_smc_entry , : 42UNWIND( .fnstart) 43UNWIND( .cantunwind) 44 push {r0-r7} 45 mov r0, sp 46 bl thread_handle_std_smc 47 /* 48 * Normally thread_handle_std_smc() should return via 49 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 50 * hasn't switched stack (error detected) it will do a normal "C" 51 * return. 52 */ 53 pop {r1-r8} 54 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 55 smc #0 56 b . /* SMC should not return */ 57UNWIND( .fnend) 58END_FUNC vector_std_smc_entry 59 60LOCAL_FUNC vector_fast_smc_entry , : 61UNWIND( .fnstart) 62UNWIND( .cantunwind) 63 push {r0-r7} 64 mov r0, sp 65 bl thread_handle_fast_smc 66 pop {r1-r8} 67 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 68 smc #0 69 b . /* SMC should not return */ 70UNWIND( .fnend) 71END_FUNC vector_fast_smc_entry 72 73LOCAL_FUNC vector_fiq_entry , : 74UNWIND( .fnstart) 75UNWIND( .cantunwind) 76 /* Secure Monitor received a FIQ and passed control to us. */ 77 bl thread_check_canaries 78 ldr lr, =thread_fiq_handler_ptr 79 ldr lr, [lr] 80 blx lr 81 mov r1, r0 82 ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE 83 smc #0 84 b . /* SMC should not return */ 85UNWIND( .fnend) 86END_FUNC vector_fiq_entry 87 88LOCAL_FUNC vector_cpu_on_entry , : 89UNWIND( .fnstart) 90UNWIND( .cantunwind) 91 ldr lr, =thread_cpu_on_handler_ptr 92 ldr lr, [lr] 93 blx lr 94 mov r1, r0 95 ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE 96 smc #0 97 b . /* SMC should not return */ 98UNWIND( .fnend) 99END_FUNC vector_cpu_on_entry 100 101LOCAL_FUNC vector_cpu_off_entry , : 102UNWIND( .fnstart) 103UNWIND( .cantunwind) 104 ldr lr, =thread_cpu_off_handler_ptr 105 ldr lr, [lr] 106 blx lr 107 mov r1, r0 108 ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE 109 smc #0 110 b . /* SMC should not return */ 111UNWIND( .fnend) 112END_FUNC vector_cpu_off_entry 113 114LOCAL_FUNC vector_cpu_suspend_entry , : 115UNWIND( .fnstart) 116UNWIND( .cantunwind) 117 ldr lr, =thread_cpu_suspend_handler_ptr 118 ldr lr, [lr] 119 blx lr 120 mov r1, r0 121 ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 122 smc #0 123 b . /* SMC should not return */ 124UNWIND( .fnend) 125END_FUNC vector_cpu_suspend_entry 126 127LOCAL_FUNC vector_cpu_resume_entry , : 128UNWIND( .fnstart) 129UNWIND( .cantunwind) 130 ldr lr, =thread_cpu_resume_handler_ptr 131 ldr lr, [lr] 132 blx lr 133 mov r1, r0 134 ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE 135 smc #0 136 b . /* SMC should not return */ 137UNWIND( .fnend) 138END_FUNC vector_cpu_resume_entry 139 140LOCAL_FUNC vector_system_off_entry , : 141UNWIND( .fnstart) 142UNWIND( .cantunwind) 143 ldr lr, =thread_system_off_handler_ptr 144 ldr lr, [lr] 145 blx lr 146 mov r1, r0 147 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 148 smc #0 149 b . /* SMC should not return */ 150UNWIND( .fnend) 151END_FUNC vector_system_off_entry 152 153LOCAL_FUNC vector_system_reset_entry , : 154UNWIND( .fnstart) 155UNWIND( .cantunwind) 156 ldr lr, =thread_system_reset_handler_ptr 157 ldr lr, [lr] 158 blx lr 159 mov r1, r0 160 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 161 smc #0 162 b . /* SMC should not return */ 163UNWIND( .fnend) 164END_FUNC vector_system_reset_entry 165 166/* 167 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 168 * initialization. Also used when compiled with the internal monitor, but 169 * the cpu_*_entry and system_*_entry are not used then. 170 * 171 * Note that ARM-TF depends on the layout of this vector table, any change 172 * in layout has to be synced with ARM-TF. This layout must also be kept in 173 * sync with sm_entry_vector in sm.c 174 */ 175FUNC thread_vector_table , : 176UNWIND( .fnstart) 177UNWIND( .cantunwind) 178 b vector_std_smc_entry 179 b vector_fast_smc_entry 180 b vector_cpu_on_entry 181 b vector_cpu_off_entry 182 b vector_cpu_resume_entry 183 b vector_cpu_suspend_entry 184 b vector_fiq_entry 185 b vector_system_off_entry 186 b vector_system_reset_entry 187UNWIND( .fnend) 188END_FUNC thread_vector_table 189 190FUNC thread_set_abt_sp , : 191UNWIND( .fnstart) 192UNWIND( .cantunwind) 193 mrs r1, cpsr 194 cps #CPSR_MODE_ABT 195 mov sp, r0 196 msr cpsr, r1 197 bx lr 198UNWIND( .fnend) 199END_FUNC thread_set_abt_sp 200 201FUNC thread_set_irq_sp , : 202UNWIND( .fnstart) 203UNWIND( .cantunwind) 204 mrs r1, cpsr 205 cps #CPSR_MODE_IRQ 206 mov sp, r0 207 msr cpsr, r1 208 bx lr 209UNWIND( .fnend) 210END_FUNC thread_set_irq_sp 211 212FUNC thread_set_fiq_sp , : 213UNWIND( .fnstart) 214UNWIND( .cantunwind) 215 mrs r1, cpsr 216 cps #CPSR_MODE_FIQ 217 mov sp, r0 218 msr cpsr, r1 219 bx lr 220UNWIND( .fnend) 221END_FUNC thread_set_fiq_sp 222 223/* void thread_resume(struct thread_ctx_regs *regs) */ 224FUNC thread_resume , : 225UNWIND( .fnstart) 226UNWIND( .cantunwind) 227 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 228 229 cps #CPSR_MODE_SYS 230 ldm r12!, {sp, lr} 231 232 cps #CPSR_MODE_SVC 233 ldm r12!, {r1, sp, lr} 234 msr spsr_fsxc, r1 235 236 cps #CPSR_MODE_SVC 237 ldm r12, {r1, r2} 238 push {r1, r2} 239 240 ldm r0, {r0-r12} 241 242 /* Restore CPSR and jump to the instruction to resume at */ 243 rfefd sp! 244UNWIND( .fnend) 245END_FUNC thread_resume 246 247/* 248 * Disables IRQ and FIQ and saves state of thread, returns original 249 * CPSR. 250 */ 251LOCAL_FUNC thread_save_state , : 252UNWIND( .fnstart) 253UNWIND( .cantunwind) 254 push {r12, lr} 255 /* 256 * Uses stack for temporary storage, while storing needed 257 * context in the thread context struct. 258 */ 259 260 mrs r12, cpsr 261 262 cpsid aif /* Disable Async abort, IRQ and FIQ */ 263 264 push {r4-r7} 265 push {r0-r3} 266 267 mov r5, r12 /* Save CPSR in a preserved register */ 268 mrs r6, cpsr /* Save current CPSR */ 269 270 bl thread_get_ctx_regs 271 272 pop {r1-r4} /* r0-r3 pushed above */ 273 stm r0!, {r1-r4} 274 pop {r1-r4} /* r4-r7 pushed above */ 275 stm r0!, {r1-r4} 276 stm r0!, {r8-r11} 277 278 pop {r12, lr} 279 stm r0!, {r12} 280 281 cps #CPSR_MODE_SYS 282 stm r0!, {sp, lr} 283 284 cps #CPSR_MODE_SVC 285 mrs r1, spsr 286 stm r0!, {r1, sp, lr} 287 288 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 289 msr cpsr, r6 /* Restore mode */ 290 291 mov r0, r5 /* Return original CPSR */ 292 bx lr 293UNWIND( .fnend) 294END_FUNC thread_save_state 295 296FUNC thread_std_smc_entry , : 297UNWIND( .fnstart) 298UNWIND( .cantunwind) 299 /* Pass r0-r7 in a struct thread_smc_args */ 300 push {r0-r7} 301 mov r0, sp 302 bl __thread_std_smc_entry 303 /* 304 * Load the returned r0-r3 into preserved registers and skip the 305 * "returned" r4-r7 since they will not be returned to normal 306 * world. 307 */ 308 pop {r4-r7} 309 add sp, #(4 * 4) 310 311 /* Disable interrupts before switching to temporary stack */ 312 cpsid aif 313 bl thread_get_tmp_sp 314 mov sp, r0 315 316 bl thread_state_free 317 318 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 319 mov r1, r4 320 mov r2, r5 321 mov r3, r6 322 mov r4, r7 323 smc #0 324 b . /* SMC should not return */ 325UNWIND( .fnend) 326END_FUNC thread_std_smc_entry 327 328 329/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 330FUNC thread_rpc , : 331/* 332 * r0-r2 are used to pass parameters to normal world 333 * r0-r5 are used to pass return vaule back from normal world 334 * 335 * note that r3 is used to pass "resume information", that is, which 336 * thread it is that should resume. 337 * 338 * Since the this function is following AAPCS we need to preserve r4-r5 339 * which are otherwise modified when returning back from normal world. 340 */ 341UNWIND( .fnstart) 342 push {r4-r5, lr} 343UNWIND( .save {r4-r5, lr}) 344 push {r0} 345UNWIND( .save {r0}) 346 347 bl thread_save_state 348 mov r4, r0 /* Save original CPSR */ 349 350 /* 351 * Switch to temporary stack and SVC mode. Save CPSR to resume into. 352 */ 353 bl thread_get_tmp_sp 354 ldr r5, [sp] /* Get pointer to rv[] */ 355 cps #CPSR_MODE_SVC /* Change to SVC mode */ 356 mov sp, r0 /* Switch to tmp stack */ 357 358 mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 359 mov r1, r4 /* CPSR to restore */ 360 ldr r2, =.thread_rpc_return 361 bl thread_state_suspend 362 mov r4, r0 /* Supply thread index */ 363 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 364 ldm r5, {r1-r3} /* Load rv[] into r0-r2 */ 365 smc #0 366 b . /* SMC should not return */ 367 368.thread_rpc_return: 369 /* 370 * At this point has the stack pointer been restored to the value 371 * it had when thread_save_state() was called above. 372 * 373 * Jumps here from thread_resume above when RPC has returned. The 374 * IRQ and FIQ bits are restored to what they where when this 375 * function was originally entered. 376 */ 377 pop {r12} /* Get pointer to rv[] */ 378 stm r12, {r0-r5} /* Store r0-r5 into rv[] */ 379 pop {r4-r5, pc} 380UNWIND( .fnend) 381END_FUNC thread_rpc 382 383LOCAL_FUNC thread_fiq_handler , : 384UNWIND( .fnstart) 385UNWIND( .cantunwind) 386 /* FIQ has a +4 offset for lr compared to preferred return address */ 387 sub lr, lr, #4 388 /* 389 * We're saving {r0-r3} and the banked fiq registers {r8-r12}. The 390 * banked fiq registers need to be saved because the secure monitor 391 * doesn't save those. The treatment of the banked fiq registers is 392 * somewhat analogous to the lazy save of VFP registers. 393 */ 394 push {r0-r3, r8-r12, lr} 395 bl thread_check_canaries 396 ldr lr, =thread_fiq_handler_ptr 397 ldr lr, [lr] 398 blx lr 399 pop {r0-r3, r8-r12, lr} 400 movs pc, lr 401UNWIND( .fnend) 402END_FUNC thread_fiq_handler 403 404LOCAL_FUNC thread_irq_handler , : 405UNWIND( .fnstart) 406UNWIND( .cantunwind) 407 /* 408 * IRQ mode is set up to use tmp stack so FIQ has to be 409 * disabled before touching the stack. We can also assign 410 * SVC sp from IRQ sp to get SVC mode into the state we 411 * need when doing the SMC below. 412 */ 413 cpsid f /* Disable FIQ also */ 414 sub lr, lr, #4 415 push {lr} 416 push {r12} 417 418 bl thread_save_state 419 420 mov r0, #THREAD_FLAGS_EXIT_ON_IRQ 421 mrs r1, spsr 422 pop {r12} 423 pop {r2} 424 blx thread_state_suspend 425 mov r4, r0 /* Supply thread index */ 426 427 /* 428 * Switch to SVC mode and copy current stack pointer as it already 429 * is the tmp stack. 430 */ 431 mov r0, sp 432 cps #CPSR_MODE_SVC 433 mov sp, r0 434 435 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 436 ldr r1, =OPTEE_SMC_RETURN_RPC_IRQ 437 mov r2, #0 438 mov r3, #0 439 /* r4 is already filled in above */ 440 smc #0 441 b . /* SMC should not return */ 442UNWIND( .fnend) 443END_FUNC thread_irq_handler 444 445FUNC thread_init_vbar , : 446UNWIND( .fnstart) 447 /* Set vector (VBAR) */ 448 ldr r0, =thread_vect_table 449 write_vbar r0 450 bx lr 451UNWIND( .fnend) 452END_FUNC thread_init_vbar 453 454/* 455 * Below are low level routines handling entry and return from user mode. 456 * 457 * thread_enter_user_mode() saves all that registers user mode can change 458 * so kernel mode can restore needed registers when resuming execution 459 * after the call to thread_enter_user_mode() has returned. 460 * thread_enter_user_mode() doesn't return directly since it enters user 461 * mode instead, it's thread_unwind_user_mode() that does the 462 * returning by restoring the registers saved by thread_enter_user_mode(). 463 * 464 * There's three ways for thread_enter_user_mode() to return to caller, 465 * user TA calls utee_return, user TA calls utee_panic or through an abort. 466 * 467 * Calls to utee_return or utee_panic are handled as: 468 * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which 469 * calls syscall_return() or syscall_panic(). 470 * 471 * These function calls returns normally except thread_svc_handler() which 472 * which is an exception handling routine so it reads return address and 473 * SPSR to restore from the stack. syscall_return() and syscall_panic() 474 * changes return address and SPSR used by thread_svc_handler() to instead of 475 * returning into user mode as with other syscalls it returns into 476 * thread_unwind_user_mode() in kernel mode instead. When 477 * thread_svc_handler() returns the stack pointer at the point where 478 * thread_enter_user_mode() left it so this is where 479 * thread_unwind_user_mode() can operate. 480 * 481 * Aborts are handled in a similar way but by thread_abort_handler() 482 * instead, when the pager sees that it's an abort from user mode that 483 * can't be handled it updates SPSR and return address used by 484 * thread_abort_handler() to return into thread_unwind_user_mode() 485 * instead. 486 */ 487 488/* 489 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 490 * unsigned long a2, unsigned long a3, unsigned long user_sp, 491 * unsigned long user_func, unsigned long spsr, 492 * uint32_t *exit_status0, uint32_t *exit_status1) 493 * 494 */ 495FUNC __thread_enter_user_mode , : 496UNWIND( .fnstart) 497UNWIND( .cantunwind) 498 /* 499 * Save all registers to allow syscall_return() to resume execution 500 * as if this function would have returned. This is also used in 501 * syscall_panic(). 502 * 503 * If stack usage of this function is changed 504 * thread_unwind_user_mode() has to be updated. 505 */ 506 push {r4-r12,lr} 507 508 ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */ 509 ldr r5, [sp, #(11 * 0x4)] /* user function */ 510 ldr r6, [sp, #(12 * 0x4)] /* spsr */ 511 512 /* 513 * Set the saved Processors Status Register to user mode to allow 514 * entry of user mode through movs below. 515 */ 516 msr spsr_cxsf, r6 517 518 /* 519 * Save old user sp and set new user sp. 520 */ 521 cps #CPSR_MODE_SYS 522 mov r6, sp 523 mov sp, r4 524 cps #CPSR_MODE_SVC 525 push {r6,r7} 526 527 /* 528 * Don't allow return from this function, return is done through 529 * thread_unwind_user_mode() below. 530 */ 531 mov lr, #0 532 /* Call the user function with its arguments */ 533 movs pc, r5 534UNWIND( .fnend) 535END_FUNC __thread_enter_user_mode 536 537/* 538 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 539 * uint32_t exit_status1); 540 * See description in thread.h 541 */ 542FUNC thread_unwind_user_mode , : 543UNWIND( .fnstart) 544UNWIND( .cantunwind) 545 ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */ 546 str r1, [ip] 547 ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */ 548 str r2, [ip] 549 550 /* Restore old user sp */ 551 pop {r4,r7} 552 cps #CPSR_MODE_SYS 553 mov sp, r4 554 cps #CPSR_MODE_SVC 555 556 pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/ 557UNWIND( .fnend) 558END_FUNC thread_unwind_user_mode 559 560LOCAL_FUNC thread_abort_handler , : 561thread_abort_handler: 562thread_und_handler: 563UNWIND( .fnstart) 564UNWIND( .cantunwind) 565 /* 566 * Switch to abort mode to use that stack instead. 567 */ 568 cps #CPSR_MODE_ABT 569 push {r0-r11, ip} 570 cps #CPSR_MODE_UND 571 mrs r0, spsr 572 tst r0, #CPSR_T 573 subne r1, lr, #2 574 subeq r1, lr, #4 575 cps #CPSR_MODE_ABT 576 push {r0, r1} 577 msr spsr_fsxc, r0 /* In case some code reads spsr directly */ 578 mov r0, #ABORT_TYPE_UNDEF 579 b .thread_abort_generic 580 581thread_dabort_handler: 582 push {r0-r11, ip} 583 sub r1, lr, #8 584 mrs r0, spsr 585 push {r0, r1} 586 mov r0, #ABORT_TYPE_DATA 587 b .thread_abort_generic 588 589thread_pabort_handler: 590 push {r0-r11, ip} 591 sub r1, lr, #4 592 mrs r0, spsr 593 push {r0, r1} 594 mov r0, #ABORT_TYPE_PREFETCH 595 b .thread_abort_generic 596 597.thread_abort_generic: 598 cps #CPSR_MODE_SYS 599 mov r1, sp 600 mov r2, lr 601 cps #CPSR_MODE_ABT 602 push {r1-r3} 603 mov r1, sp 604 bl abort_handler 605 pop {r1-r3} 606 cps #CPSR_MODE_SYS 607 mov sp, r1 608 mov lr, r2 609 cps #CPSR_MODE_ABT 610 pop {r0, r1} 611 mov lr, r1 612 msr spsr_fsxc, r0 613 pop {r0-r11, ip} 614 movs pc, lr 615UNWIND( .fnend) 616END_FUNC thread_abort_handler 617 618LOCAL_FUNC thread_svc_handler , : 619UNWIND( .fnstart) 620UNWIND( .cantunwind) 621 push {r0-r7, lr} 622 mrs r0, spsr 623 push {r0} 624 mov r0, sp 625 bl tee_svc_handler 626 pop {r0} 627 msr spsr_fsxc, r0 628 pop {r0-r7, lr} 629 movs pc, lr 630UNWIND( .fnend) 631END_FUNC thread_svc_handler 632 633 .align 5 634LOCAL_FUNC thread_vect_table , : 635UNWIND( .fnstart) 636UNWIND( .cantunwind) 637 b . /* Reset */ 638 b thread_und_handler /* Undefined instruction */ 639 b thread_svc_handler /* System call */ 640 b thread_pabort_handler /* Prefetch abort */ 641 b thread_dabort_handler /* Data abort */ 642 b . /* Reserved */ 643 b thread_irq_handler /* IRQ */ 644 b thread_fiq_handler /* FIQ */ 645UNWIND( .fnend) 646END_FUNC thread_vect_table 647