1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2017, Linaro Limited 4 */ 5 6#include <arm.h> 7#include <arm64_macros.S> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/thread_defs.h> 12#include <mm/core_mmu.h> 13#include <smccc.h> 14#include <sm/optee_smc.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17 18#include "thread_private.h" 19 20 .macro get_thread_ctx core_local, res, tmp0, tmp1 21 ldr w\tmp0, [\core_local, \ 22 #THREAD_CORE_LOCAL_CURR_THREAD] 23 adr x\res, threads 24 mov x\tmp1, #THREAD_CTX_SIZE 25 madd x\res, x\tmp0, x\tmp1, x\res 26 .endm 27 28 .macro b_if_spsr_is_el0 reg, label 29 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 30 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 31 b.eq \label 32 .endm 33 34LOCAL_FUNC vector_std_smc_entry , : 35 sub sp, sp, #THREAD_SMC_ARGS_SIZE 36 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 37 mov x0, sp 38 bl thread_handle_std_smc 39 /* 40 * Normally thread_handle_std_smc() should return via 41 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 42 * hasn't switched stack (error detected) it will do a normal "C" 43 * return. 44 */ 45 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 46 add sp, sp, #THREAD_SMC_ARGS_SIZE 47 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 48 smc #0 49 b . /* SMC should not return */ 50END_FUNC vector_std_smc_entry 51 52LOCAL_FUNC vector_fast_smc_entry , : 53 sub sp, sp, #THREAD_SMC_ARGS_SIZE 54 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 55 mov x0, sp 56 bl thread_handle_fast_smc 57 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 58 add sp, sp, #THREAD_SMC_ARGS_SIZE 59 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 60 smc #0 61 b . /* SMC should not return */ 62END_FUNC vector_fast_smc_entry 63 64LOCAL_FUNC vector_fiq_entry , : 65 /* Secure Monitor received a FIQ and passed control to us. */ 66 bl thread_check_canaries 67 adr x16, thread_nintr_handler_ptr 68 ldr x16, [x16] 69 blr x16 70 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 71 smc #0 72 b . /* SMC should not return */ 73END_FUNC vector_fiq_entry 74 75LOCAL_FUNC vector_cpu_on_entry , : 76 adr x16, thread_cpu_on_handler_ptr 77 ldr x16, [x16] 78 blr x16 79 mov x1, x0 80 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 81 smc #0 82 b . /* SMC should not return */ 83END_FUNC vector_cpu_on_entry 84 85LOCAL_FUNC vector_cpu_off_entry , : 86 adr x16, thread_cpu_off_handler_ptr 87 ldr x16, [x16] 88 blr x16 89 mov x1, x0 90 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 91 smc #0 92 b . /* SMC should not return */ 93END_FUNC vector_cpu_off_entry 94 95LOCAL_FUNC vector_cpu_suspend_entry , : 96 adr x16, thread_cpu_suspend_handler_ptr 97 ldr x16, [x16] 98 blr x16 99 mov x1, x0 100 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 101 smc #0 102 b . /* SMC should not return */ 103END_FUNC vector_cpu_suspend_entry 104 105LOCAL_FUNC vector_cpu_resume_entry , : 106 adr x16, thread_cpu_resume_handler_ptr 107 ldr x16, [x16] 108 blr x16 109 mov x1, x0 110 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 111 smc #0 112 b . /* SMC should not return */ 113END_FUNC vector_cpu_resume_entry 114 115LOCAL_FUNC vector_system_off_entry , : 116 adr x16, thread_system_off_handler_ptr 117 ldr x16, [x16] 118 blr x16 119 mov x1, x0 120 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 121 smc #0 122 b . /* SMC should not return */ 123END_FUNC vector_system_off_entry 124 125LOCAL_FUNC vector_system_reset_entry , : 126 adr x16, thread_system_reset_handler_ptr 127 ldr x16, [x16] 128 blr x16 129 mov x1, x0 130 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 131 smc #0 132 b . /* SMC should not return */ 133END_FUNC vector_system_reset_entry 134 135/* 136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 137 * initialization. 138 * 139 * Note that ARM-TF depends on the layout of this vector table, any change 140 * in layout has to be synced with ARM-TF. 141 */ 142FUNC thread_vector_table , : 143 b vector_std_smc_entry 144 b vector_fast_smc_entry 145 b vector_cpu_on_entry 146 b vector_cpu_off_entry 147 b vector_cpu_resume_entry 148 b vector_cpu_suspend_entry 149 b vector_fiq_entry 150 b vector_system_off_entry 151 b vector_system_reset_entry 152END_FUNC thread_vector_table 153KEEP_PAGER thread_vector_table 154 155 156/* void thread_resume(struct thread_ctx_regs *regs) */ 157FUNC thread_resume , : 158 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 159 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 160 mov sp, x1 161 msr elr_el1, x2 162 msr spsr_el1, x3 163 164 b_if_spsr_is_el0 w3, 1f 165 166 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 167 ldr x0, [x0, THREAD_CTX_REGS_X0] 168 eret 169 1701: load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 171 ldr x0, [x0, THREAD_CTX_REGS_X0] 172 173 msr spsel, #1 174 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 175 b eret_to_el0 176END_FUNC thread_resume 177 178FUNC thread_std_smc_entry , : 179 /* pass x0-x7 in a struct thread_smc_args */ 180 sub sp, sp, #THREAD_SMC_ARGS_SIZE 181 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 182 mov x0, sp 183 184 /* Call the registered handler */ 185 bl __thread_std_smc_entry 186 187 /* 188 * Load the returned x0-x3 into preserved registers and skip the 189 * "returned" x4-x7 since they will not be returned to normal 190 * world. 191 */ 192 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23 193 add sp, sp, #THREAD_SMC_ARGS_SIZE 194 195 /* Mask all maskable exceptions before switching to temporary stack */ 196 msr daifset, #DAIFBIT_ALL 197 bl thread_get_tmp_sp 198 mov sp, x0 199 200 bl thread_state_free 201 202 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 203 mov x1, x20 204 mov x2, x21 205 mov x3, x22 206 mov x4, x23 207 smc #0 208 b . /* SMC should not return */ 209END_FUNC thread_std_smc_entry 210 211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 212FUNC thread_rpc , : 213 /* Read daif and create an SPSR */ 214 mrs x1, daif 215 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 216 217 /* Mask all maskable exceptions before switching to temporary stack */ 218 msr daifset, #DAIFBIT_ALL 219 push x0, xzr 220 push x1, x30 221 bl thread_get_ctx_regs 222 ldr x30, [sp, #8] 223 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30 224 mov x19, x0 225 226 bl thread_get_tmp_sp 227 pop x1, xzr /* Match "push x1, x30" above */ 228 mov x2, sp 229 str x2, [x19, #THREAD_CTX_REGS_SP] 230 ldr x20, [sp] /* Get pointer to rv[] */ 231 mov sp, x0 /* Switch to tmp stack */ 232 233 adr x2, .thread_rpc_return 234 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 235 bl thread_state_suspend 236 mov x4, x0 /* Supply thread index */ 237 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 238 load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */ 239 smc #0 240 b . /* SMC should not return */ 241 242.thread_rpc_return: 243 /* 244 * At this point has the stack pointer been restored to the value 245 * stored in THREAD_CTX above. 246 * 247 * Jumps here from thread_resume above when RPC has returned. The 248 * IRQ and FIQ bits are restored to what they where when this 249 * function was originally entered. 250 */ 251 pop x16, xzr /* Get pointer to rv[] */ 252 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ 253 ret 254END_FUNC thread_rpc 255KEEP_PAGER thread_rpc 256 257FUNC thread_smc , : 258 smc #0 259 ret 260END_FUNC thread_smc 261 262FUNC thread_init_vbar , : 263 msr vbar_el1, x0 264 ret 265END_FUNC thread_init_vbar 266KEEP_PAGER thread_init_vbar 267 268/* 269 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 270 * unsigned long a2, unsigned long a3, unsigned long user_sp, 271 * unsigned long user_func, unsigned long spsr, 272 * uint32_t *exit_status0, uint32_t *exit_status1) 273 * 274 */ 275FUNC __thread_enter_user_mode , : 276 ldr x8, [sp] 277 /* 278 * Create the and fill in the struct thread_user_mode_rec 279 */ 280 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 281 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8 282 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 283 284 /* 285 * Switch to SP_EL1 286 * Disable exceptions 287 * Save kern sp in x19 288 */ 289 msr daifset, #DAIFBIT_ALL 290 mov x19, sp 291 msr spsel, #1 292 293 /* 294 * Save the kernel stack pointer in the thread context 295 */ 296 /* get pointer to current thread context */ 297 get_thread_ctx sp, 21, 20, 22 298 /* 299 * Save kernel stack pointer to ensure that el0_svc() uses 300 * correct stack pointer 301 */ 302 str x19, [x21, #THREAD_CTX_KERN_SP] 303 304 /* 305 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 306 */ 307 msr spsr_el1, x6 308 /* Set user sp */ 309 mov x13, x4 /* Used when running TA in Aarch32 */ 310 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 311 /* Set user function */ 312 msr elr_el1, x5 313 /* Set frame pointer (user stack can't be unwound past this point) */ 314 mov x29, #0 315 316 /* Jump into user mode */ 317 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 318 b eret_to_el0 319END_FUNC __thread_enter_user_mode 320KEEP_PAGER __thread_enter_user_mode 321 322/* 323 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 324 * uint32_t exit_status1); 325 * See description in thread.h 326 */ 327FUNC thread_unwind_user_mode , : 328 /* Store the exit status */ 329 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR] 330 str w1, [x3] 331 str w2, [x4] 332 /* Restore x19..x30 */ 333 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 334 add sp, sp, #THREAD_USER_MODE_REC_SIZE 335 /* Return from the call of thread_enter_user_mode() */ 336 ret 337END_FUNC thread_unwind_user_mode 338 339 /* 340 * This macro verifies that the a given vector doesn't exceed the 341 * architectural limit of 32 instructions. This is meant to be placed 342 * immedately after the last instruction in the vector. It takes the 343 * vector entry as the parameter 344 */ 345 .macro check_vector_size since 346 .if (. - \since) > (32 * 4) 347 .error "Vector exceeds 32 instructions" 348 .endif 349 .endm 350 351 .macro restore_mapping 352#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 353 /* Temporarily save x0, x1 */ 354 msr tpidr_el1, x0 355 msr tpidrro_el0, x1 356 357 /* Update the mapping to use the full kernel mapping */ 358 mrs x0, ttbr0_el1 359 sub x0, x0, #CORE_MMU_L1_TBL_OFFSET 360 /* switch to kernel mode ASID */ 361 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 362 msr ttbr0_el1, x0 363 isb 364 365 /* Jump into the full mapping and continue execution */ 366 ldr x0, =1f 367 br x0 368 1: 369 370 /* Point to the vector into the full mapping */ 371 adr x0, thread_user_kcode_offset 372 ldr x0, [x0] 373 mrs x1, vbar_el1 374 add x1, x1, x0 375 msr vbar_el1, x1 376 isb 377 378#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 379 /* 380 * Update the SP with thread_user_kdata_sp_offset as 381 * described in init_user_kcode(). 382 */ 383 adr x0, thread_user_kdata_sp_offset 384 ldr x0, [x0] 385 add sp, sp, x0 386#endif 387 388 /* Restore x0, x1 */ 389 mrs x0, tpidr_el1 390 mrs x1, tpidrro_el0 391 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 392#else 393 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 394 mrs x0, ttbr0_el1 395 /* switch to kernel mode ASID */ 396 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 397 msr ttbr0_el1, x0 398 isb 399#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 400 .endm 401 402#define INV_INSN 0 403 .section .text.thread_excp_vect 404 .align 11, INV_INSN 405FUNC thread_excp_vect , : 406 /* ----------------------------------------------------- 407 * EL1 with SP0 : 0x0 - 0x180 408 * ----------------------------------------------------- 409 */ 410 .align 7, INV_INSN 411el1_sync_sp0: 412 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 413 b el1_sync_abort 414 check_vector_size el1_sync_sp0 415 416 .align 7, INV_INSN 417el1_irq_sp0: 418 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 419 b elx_irq 420 check_vector_size el1_irq_sp0 421 422 .align 7, INV_INSN 423el1_fiq_sp0: 424 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 425 b elx_fiq 426 check_vector_size el1_fiq_sp0 427 428 .align 7, INV_INSN 429el1_serror_sp0: 430 b el1_serror_sp0 431 check_vector_size el1_serror_sp0 432 433 /* ----------------------------------------------------- 434 * Current EL with SP1: 0x200 - 0x380 435 * ----------------------------------------------------- 436 */ 437 .align 7, INV_INSN 438el1_sync_sp1: 439 b el1_sync_sp1 440 check_vector_size el1_sync_sp1 441 442 .align 7, INV_INSN 443el1_irq_sp1: 444 b el1_irq_sp1 445 check_vector_size el1_irq_sp1 446 447 .align 7, INV_INSN 448el1_fiq_sp1: 449 b el1_fiq_sp1 450 check_vector_size el1_fiq_sp1 451 452 .align 7, INV_INSN 453el1_serror_sp1: 454 b el1_serror_sp1 455 check_vector_size el1_serror_sp1 456 457 /* ----------------------------------------------------- 458 * Lower EL using AArch64 : 0x400 - 0x580 459 * ----------------------------------------------------- 460 */ 461 .align 7, INV_INSN 462el0_sync_a64: 463 restore_mapping 464 465 mrs x2, esr_el1 466 mrs x3, sp_el0 467 lsr x2, x2, #ESR_EC_SHIFT 468 cmp x2, #ESR_EC_AARCH64_SVC 469 b.eq el0_svc 470 b el0_sync_abort 471 check_vector_size el0_sync_a64 472 473 .align 7, INV_INSN 474el0_irq_a64: 475 restore_mapping 476 477 b elx_irq 478 check_vector_size el0_irq_a64 479 480 .align 7, INV_INSN 481el0_fiq_a64: 482 restore_mapping 483 484 b elx_fiq 485 check_vector_size el0_fiq_a64 486 487 .align 7, INV_INSN 488el0_serror_a64: 489 b el0_serror_a64 490 check_vector_size el0_serror_a64 491 492 /* ----------------------------------------------------- 493 * Lower EL using AArch32 : 0x0 - 0x180 494 * ----------------------------------------------------- 495 */ 496 .align 7, INV_INSN 497el0_sync_a32: 498 restore_mapping 499 500 mrs x2, esr_el1 501 mrs x3, sp_el0 502 lsr x2, x2, #ESR_EC_SHIFT 503 cmp x2, #ESR_EC_AARCH32_SVC 504 b.eq el0_svc 505 b el0_sync_abort 506 check_vector_size el0_sync_a32 507 508 .align 7, INV_INSN 509el0_irq_a32: 510 restore_mapping 511 512 b elx_irq 513 check_vector_size el0_irq_a32 514 515 .align 7, INV_INSN 516el0_fiq_a32: 517 restore_mapping 518 519 b elx_fiq 520 check_vector_size el0_fiq_a32 521 522 .align 7, INV_INSN 523el0_serror_a32: 524 b el0_serror_a32 525 check_vector_size el0_serror_a32 526 527#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 528 .macro invalidate_branch_predictor 529 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 530 mov_imm x0, SMCCC_ARCH_WORKAROUND_1 531 smc #0 532 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 533 .endm 534 535 .align 11, INV_INSN 536 .global thread_excp_vect_workaround 537thread_excp_vect_workaround: 538 /* ----------------------------------------------------- 539 * EL1 with SP0 : 0x0 - 0x180 540 * ----------------------------------------------------- 541 */ 542 .align 7, INV_INSN 543workaround_el1_sync_sp0: 544 b el1_sync_sp0 545 check_vector_size workaround_el1_sync_sp0 546 547 .align 7, INV_INSN 548workaround_el1_irq_sp0: 549 b el1_irq_sp0 550 check_vector_size workaround_el1_irq_sp0 551 552 .align 7, INV_INSN 553workaround_el1_fiq_sp0: 554 b el1_fiq_sp0 555 check_vector_size workaround_el1_fiq_sp0 556 557 .align 7, INV_INSN 558workaround_el1_serror_sp0: 559 b el1_serror_sp0 560 check_vector_size workaround_el1_serror_sp0 561 562 /* ----------------------------------------------------- 563 * Current EL with SP1: 0x200 - 0x380 564 * ----------------------------------------------------- 565 */ 566 .align 7, INV_INSN 567workaround_el1_sync_sp1: 568 b workaround_el1_sync_sp1 569 check_vector_size workaround_el1_sync_sp1 570 571 .align 7, INV_INSN 572workaround_el1_irq_sp1: 573 b workaround_el1_irq_sp1 574 check_vector_size workaround_el1_irq_sp1 575 576 .align 7, INV_INSN 577workaround_el1_fiq_sp1: 578 b workaround_el1_fiq_sp1 579 check_vector_size workaround_el1_fiq_sp1 580 581 .align 7, INV_INSN 582workaround_el1_serror_sp1: 583 b workaround_el1_serror_sp1 584 check_vector_size workaround_el1_serror_sp1 585 586 /* ----------------------------------------------------- 587 * Lower EL using AArch64 : 0x400 - 0x580 588 * ----------------------------------------------------- 589 */ 590 .align 7, INV_INSN 591workaround_el0_sync_a64: 592 invalidate_branch_predictor 593 b el0_sync_a64 594 check_vector_size workaround_el0_sync_a64 595 596 .align 7, INV_INSN 597workaround_el0_irq_a64: 598 invalidate_branch_predictor 599 b el0_irq_a64 600 check_vector_size workaround_el0_irq_a64 601 602 .align 7, INV_INSN 603workaround_el0_fiq_a64: 604 invalidate_branch_predictor 605 b el0_fiq_a64 606 check_vector_size workaround_el0_fiq_a64 607 608 .align 7, INV_INSN 609workaround_el0_serror_a64: 610 b workaround_el0_serror_a64 611 check_vector_size workaround_el0_serror_a64 612 613 /* ----------------------------------------------------- 614 * Lower EL using AArch32 : 0x0 - 0x180 615 * ----------------------------------------------------- 616 */ 617 .align 7, INV_INSN 618workaround_el0_sync_a32: 619 invalidate_branch_predictor 620 b el0_sync_a32 621 check_vector_size workaround_el0_sync_a32 622 623 .align 7, INV_INSN 624workaround_el0_irq_a32: 625 invalidate_branch_predictor 626 b el0_irq_a32 627 check_vector_size workaround_el0_irq_a32 628 629 .align 7, INV_INSN 630workaround_el0_fiq_a32: 631 invalidate_branch_predictor 632 b el0_fiq_a32 633 check_vector_size workaround_el0_fiq_a32 634 635 .align 7, INV_INSN 636workaround_el0_serror_a32: 637 b workaround_el0_serror_a32 638 check_vector_size workaround_el0_serror_a32 639#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 640 641/* 642 * We're keeping this code in the same section as the vector to make sure 643 * that it's always available. 644 */ 645eret_to_el0: 646 647#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 648 /* Point to the vector into the reduced mapping */ 649 adr x0, thread_user_kcode_offset 650 ldr x0, [x0] 651 mrs x1, vbar_el1 652 sub x1, x1, x0 653 msr vbar_el1, x1 654 isb 655 656#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 657 /* Store the SP offset in tpidr_el1 to be used below to update SP */ 658 adr x1, thread_user_kdata_sp_offset 659 ldr x1, [x1] 660 msr tpidr_el1, x1 661#endif 662 663 /* Jump into the reduced mapping and continue execution */ 664 ldr x1, =1f 665 sub x1, x1, x0 666 br x1 6671: 668 669 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 670 msr tpidrro_el0, x0 671 672 /* Update the mapping to exclude the full kernel mapping */ 673 mrs x0, ttbr0_el1 674 add x0, x0, #CORE_MMU_L1_TBL_OFFSET 675 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 676 msr ttbr0_el1, x0 677 isb 678 679#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 680 /* 681 * Update the SP with thread_user_kdata_sp_offset as described in 682 * init_user_kcode(). 683 */ 684 mrs x0, tpidr_el1 685 sub sp, sp, x0 686#endif 687 688 mrs x0, tpidrro_el0 689#else 690 mrs x0, ttbr0_el1 691 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 692 msr ttbr0_el1, x0 693 isb 694 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 695#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 696 697 eret 698 699 .global thread_excp_vect_end 700thread_excp_vect_end: 701END_FUNC thread_excp_vect 702 703LOCAL_FUNC el0_svc , : 704 /* get pointer to current thread context in x0 */ 705 get_thread_ctx sp, 0, 1, 2 706 /* load saved kernel sp */ 707 ldr x0, [x0, #THREAD_CTX_KERN_SP] 708 /* Keep pointer to initial recod in x1 */ 709 mov x1, sp 710 /* Switch to SP_EL0 and restore kernel sp */ 711 msr spsel, #0 712 mov x2, sp /* Save SP_EL0 */ 713 mov sp, x0 714 715 /* Make room for struct thread_svc_regs */ 716 sub sp, sp, #THREAD_SVC_REG_SIZE 717 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 718 719 /* Restore x0-x3 */ 720 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 721 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 722 723 /* Prepare the argument for the handler */ 724 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 725 mrs x0, elr_el1 726 mrs x1, spsr_el1 727 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 728 mov x0, sp 729 730 /* 731 * Unmask native interrupts, Serror, and debug exceptions since we have 732 * nothing left in sp_el1. Note that the SVC handler is excepted to 733 * re-enable foreign interrupts by itself. 734 */ 735#if defined(CFG_ARM_GICV3) 736 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 737#else 738 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 739#endif 740 741 /* Call the handler */ 742 bl tee_svc_handler 743 744 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 745 msr daifset, #DAIFBIT_ALL 746 747 /* 748 * Save kernel sp we'll had at the beginning of this function. 749 * This is when this TA has called another TA because 750 * __thread_enter_user_mode() also saves the stack pointer in this 751 * field. 752 */ 753 msr spsel, #1 754 get_thread_ctx sp, 0, 1, 2 755 msr spsel, #0 756 add x1, sp, #THREAD_SVC_REG_SIZE 757 str x1, [x0, #THREAD_CTX_KERN_SP] 758 759 /* Restore registers to the required state and return*/ 760 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 761 msr elr_el1, x0 762 msr spsr_el1, x1 763 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 764 mov x30, sp 765 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 766 mov sp, x0 767 b_if_spsr_is_el0 w1, 1f 768 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 769 ldr x30, [x30, #THREAD_SVC_REG_X30] 770 771 eret 772 7731: ldp x0, x1, [x30, THREAD_SVC_REG_X0] 774 ldr x30, [x30, #THREAD_SVC_REG_X30] 775 776 msr spsel, #1 777 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 778 b eret_to_el0 779END_FUNC el0_svc 780 781LOCAL_FUNC el1_sync_abort , : 782 mov x0, sp 783 msr spsel, #0 784 mov x3, sp /* Save original sp */ 785 786 /* 787 * Update core local flags. 788 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 789 */ 790 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 791 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 792 orr w1, w1, #THREAD_CLF_ABORT 793 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 794 .Lsel_tmp_sp 795 796 /* Select abort stack */ 797 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 798 b .Lset_sp 799 800.Lsel_tmp_sp: 801 /* Select tmp stack */ 802 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 803 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 804 805.Lset_sp: 806 mov sp, x2 807 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 808 809 /* 810 * Save state on stack 811 */ 812 sub sp, sp, #THREAD_ABT_REGS_SIZE 813 mrs x2, spsr_el1 814 /* Store spsr, sp_el0 */ 815 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 816 /* Store original x0, x1 */ 817 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 818 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 819 /* Store original x2, x3 and x4 to x29 */ 820 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 821 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 822 /* Store x30, elr_el1 */ 823 mrs x0, elr_el1 824 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 825 826 /* 827 * Call handler 828 */ 829 mov x0, #0 830 mov x1, sp 831 bl abort_handler 832 833 /* 834 * Restore state from stack 835 */ 836 /* Load x30, elr_el1 */ 837 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 838 msr elr_el1, x0 839 /* Load x0 to x29 */ 840 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 841 /* Switch to SP_EL1 */ 842 msr spsel, #1 843 /* Save x0 to x3 in CORE_LOCAL */ 844 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 845 /* Restore spsr_el1 and sp_el0 */ 846 mrs x3, sp_el0 847 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 848 msr spsr_el1, x0 849 msr sp_el0, x1 850 851 /* Update core local flags */ 852 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 853 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 854 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 855 856 /* Restore x0 to x3 */ 857 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 858 859 /* Return from exception */ 860 eret 861END_FUNC el1_sync_abort 862 863 /* sp_el0 in x3 */ 864LOCAL_FUNC el0_sync_abort , : 865 /* 866 * Update core local flags 867 */ 868 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 869 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 870 orr w1, w1, #THREAD_CLF_ABORT 871 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 872 873 /* 874 * Save state on stack 875 */ 876 877 /* load abt_stack_va_end */ 878 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 879 /* Keep pointer to initial record in x0 */ 880 mov x0, sp 881 /* Switch to SP_EL0 */ 882 msr spsel, #0 883 mov sp, x1 884 sub sp, sp, #THREAD_ABT_REGS_SIZE 885 mrs x2, spsr_el1 886 /* Store spsr, sp_el0 */ 887 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 888 /* Store original x0, x1 */ 889 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 890 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 891 /* Store original x2, x3 and x4 to x29 */ 892 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 893 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 894 /* Store x30, elr_el1 */ 895 mrs x0, elr_el1 896 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 897 898 /* 899 * Call handler 900 */ 901 mov x0, #0 902 mov x1, sp 903 bl abort_handler 904 905 /* 906 * Restore state from stack 907 */ 908 909 /* Load x30, elr_el1 */ 910 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 911 msr elr_el1, x0 912 /* Load x0 to x29 */ 913 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 914 /* Switch to SP_EL1 */ 915 msr spsel, #1 916 /* Save x0 to x3 in EL1_REC */ 917 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 918 /* Restore spsr_el1 and sp_el0 */ 919 mrs x3, sp_el0 920 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 921 msr spsr_el1, x0 922 msr sp_el0, x1 923 924 /* Update core local flags */ 925 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 926 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 927 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 928 929 /* Restore x2 to x3 */ 930 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 931 932 b_if_spsr_is_el0 w0, 1f 933 934 /* Restore x0 to x1 */ 935 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 936 937 /* Return from exception */ 938 eret 9391: b eret_to_el0 940END_FUNC el0_sync_abort 941 942/* The handler of foreign interrupt. */ 943.macro foreign_intr_handler mode:req 944 /* 945 * Update core local flags 946 */ 947 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 948 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 949 orr w1, w1, #THREAD_CLF_TMP 950 .ifc \mode\(),fiq 951 orr w1, w1, #THREAD_CLF_FIQ 952 .else 953 orr w1, w1, #THREAD_CLF_IRQ 954 .endif 955 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 956 957 /* get pointer to current thread context in x0 */ 958 get_thread_ctx sp, 0, 1, 2 959 /* Keep original SP_EL0 */ 960 mrs x2, sp_el0 961 962 /* Store original sp_el0 */ 963 str x2, [x0, #THREAD_CTX_REGS_SP] 964 /* store x4..x30 */ 965 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 966 /* Load original x0..x3 into x10..x13 */ 967 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 968 /* Save original x0..x3 */ 969 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 970 971 /* load tmp_stack_va_end */ 972 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 973 /* Switch to SP_EL0 */ 974 msr spsel, #0 975 mov sp, x1 976 977 /* 978 * Mark current thread as suspended 979 */ 980 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 981 mrs x1, spsr_el1 982 mrs x2, elr_el1 983 bl thread_state_suspend 984 mov w4, w0 /* Supply thread index */ 985 986 /* Update core local flags */ 987 /* Switch to SP_EL1 */ 988 msr spsel, #1 989 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 990 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 991 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 992 msr spsel, #0 993 994 /* 995 * Note that we're exiting with SP_EL0 selected since the entry 996 * functions expects to have SP_EL0 selected with the tmp stack 997 * set. 998 */ 999 1000 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 1001 ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 1002 mov w2, #0 1003 mov w3, #0 1004 /* w4 is already filled in above */ 1005 smc #0 1006 b . /* SMC should not return */ 1007.endm 1008 1009/* 1010 * This struct is never used from C it's only here to visualize the 1011 * layout. 1012 * 1013 * struct elx_nintr_rec { 1014 * uint64_t x[19 - 4]; x4..x18 1015 * uint64_t lr; 1016 * uint64_t sp_el0; 1017 * }; 1018 */ 1019#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 1020#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 1021#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 1022#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 1023 1024/* The handler of native interrupt. */ 1025.macro native_intr_handler mode:req 1026 /* 1027 * Update core local flags 1028 */ 1029 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1030 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 1031 .ifc \mode\(),fiq 1032 orr w1, w1, #THREAD_CLF_FIQ 1033 .else 1034 orr w1, w1, #THREAD_CLF_IRQ 1035 .endif 1036 orr w1, w1, #THREAD_CLF_TMP 1037 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1038 1039 /* load tmp_stack_va_end */ 1040 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1041 /* Keep original SP_EL0 */ 1042 mrs x2, sp_el0 1043 /* Switch to SP_EL0 */ 1044 msr spsel, #0 1045 mov sp, x1 1046 1047 /* 1048 * Save registers on stack that can be corrupted by a call to 1049 * a C function 1050 */ 1051 /* Make room for struct elx_nintr_rec */ 1052 sub sp, sp, #ELX_NINTR_REC_SIZE 1053 /* Store x4..x18 */ 1054 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1055 /* Store lr and original sp_el0 */ 1056 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1057 1058 bl thread_check_canaries 1059 adr x16, thread_nintr_handler_ptr 1060 ldr x16, [x16] 1061 blr x16 1062 1063 /* 1064 * Restore registers 1065 */ 1066 /* Restore x4..x18 */ 1067 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1068 /* Load lr and original sp_el0 */ 1069 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1070 /* Restore SP_El0 */ 1071 mov sp, x2 1072 /* Switch back to SP_EL1 */ 1073 msr spsel, #1 1074 1075 /* Update core local flags */ 1076 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1077 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1078 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1079 1080 mrs x0, spsr_el1 1081 /* Restore x2..x3 */ 1082 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1083 b_if_spsr_is_el0 w0, 1f 1084 1085 /* Restore x0..x1 */ 1086 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1087 1088 /* Return from exception */ 1089 eret 10901: b eret_to_el0 1091.endm 1092 1093LOCAL_FUNC elx_irq , : 1094#if defined(CFG_ARM_GICV3) 1095 native_intr_handler irq 1096#else 1097 foreign_intr_handler irq 1098#endif 1099END_FUNC elx_irq 1100 1101LOCAL_FUNC elx_fiq , : 1102#if defined(CFG_ARM_GICV3) 1103 foreign_intr_handler fiq 1104#else 1105 native_intr_handler fiq 1106#endif 1107END_FUNC elx_fiq 1108