1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2017, Linaro Limited 4 */ 5 6#include <arm.h> 7#include <arm64_macros.S> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/thread_defs.h> 12#include <mm/core_mmu.h> 13#include <smccc.h> 14#include <sm/optee_smc.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17 18#include "thread_private.h" 19 20 .macro get_thread_ctx core_local, res, tmp0, tmp1 21 ldr w\tmp0, [\core_local, \ 22 #THREAD_CORE_LOCAL_CURR_THREAD] 23 ldr x\res, =threads 24 mov x\tmp1, #THREAD_CTX_SIZE 25 madd x\res, x\tmp0, x\tmp1, x\res 26 .endm 27 28 .macro b_if_spsr_is_el0 reg, label 29 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 30 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 31 b.eq \label 32 .endm 33 34LOCAL_FUNC vector_std_smc_entry , : 35 sub sp, sp, #THREAD_SMC_ARGS_SIZE 36 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 37 mov x0, sp 38 bl thread_handle_std_smc 39 /* 40 * Normally thread_handle_std_smc() should return via 41 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 42 * hasn't switched stack (error detected) it will do a normal "C" 43 * return. 44 */ 45 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 46 add sp, sp, #THREAD_SMC_ARGS_SIZE 47 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 48 smc #0 49 b . /* SMC should not return */ 50END_FUNC vector_std_smc_entry 51 52LOCAL_FUNC vector_fast_smc_entry , : 53 sub sp, sp, #THREAD_SMC_ARGS_SIZE 54 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 55 mov x0, sp 56 bl thread_handle_fast_smc 57 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 58 add sp, sp, #THREAD_SMC_ARGS_SIZE 59 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 60 smc #0 61 b . /* SMC should not return */ 62END_FUNC vector_fast_smc_entry 63 64LOCAL_FUNC vector_fiq_entry , : 65 /* Secure Monitor received a FIQ and passed control to us. */ 66 bl thread_check_canaries 67 adr x16, thread_nintr_handler_ptr 68 ldr x16, [x16] 69 blr x16 70 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 71 smc #0 72 b . /* SMC should not return */ 73END_FUNC vector_fiq_entry 74 75LOCAL_FUNC vector_cpu_on_entry , : 76 adr x16, thread_cpu_on_handler_ptr 77 ldr x16, [x16] 78 blr x16 79 mov x1, x0 80 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 81 smc #0 82 b . /* SMC should not return */ 83END_FUNC vector_cpu_on_entry 84 85LOCAL_FUNC vector_cpu_off_entry , : 86 adr x16, thread_cpu_off_handler_ptr 87 ldr x16, [x16] 88 blr x16 89 mov x1, x0 90 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 91 smc #0 92 b . /* SMC should not return */ 93END_FUNC vector_cpu_off_entry 94 95LOCAL_FUNC vector_cpu_suspend_entry , : 96 adr x16, thread_cpu_suspend_handler_ptr 97 ldr x16, [x16] 98 blr x16 99 mov x1, x0 100 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 101 smc #0 102 b . /* SMC should not return */ 103END_FUNC vector_cpu_suspend_entry 104 105LOCAL_FUNC vector_cpu_resume_entry , : 106 adr x16, thread_cpu_resume_handler_ptr 107 ldr x16, [x16] 108 blr x16 109 mov x1, x0 110 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 111 smc #0 112 b . /* SMC should not return */ 113END_FUNC vector_cpu_resume_entry 114 115LOCAL_FUNC vector_system_off_entry , : 116 adr x16, thread_system_off_handler_ptr 117 ldr x16, [x16] 118 blr x16 119 mov x1, x0 120 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 121 smc #0 122 b . /* SMC should not return */ 123END_FUNC vector_system_off_entry 124 125LOCAL_FUNC vector_system_reset_entry , : 126 adr x16, thread_system_reset_handler_ptr 127 ldr x16, [x16] 128 blr x16 129 mov x1, x0 130 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 131 smc #0 132 b . /* SMC should not return */ 133END_FUNC vector_system_reset_entry 134 135/* 136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 137 * initialization. 138 * 139 * Note that ARM-TF depends on the layout of this vector table, any change 140 * in layout has to be synced with ARM-TF. 141 */ 142FUNC thread_vector_table , : 143 b vector_std_smc_entry 144 b vector_fast_smc_entry 145 b vector_cpu_on_entry 146 b vector_cpu_off_entry 147 b vector_cpu_resume_entry 148 b vector_cpu_suspend_entry 149 b vector_fiq_entry 150 b vector_system_off_entry 151 b vector_system_reset_entry 152END_FUNC thread_vector_table 153KEEP_PAGER thread_vector_table 154 155 156/* void thread_resume(struct thread_ctx_regs *regs) */ 157FUNC thread_resume , : 158 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 159 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 160 mov sp, x1 161 msr elr_el1, x2 162 msr spsr_el1, x3 163 164 b_if_spsr_is_el0 w3, 1f 165 166 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 167 ldr x0, [x0, THREAD_CTX_REGS_X0] 168 eret 169 1701: load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 171 ldr x0, [x0, THREAD_CTX_REGS_X0] 172 173 msr spsel, #1 174 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 175 b eret_to_el0 176END_FUNC thread_resume 177 178FUNC thread_std_smc_entry , : 179 /* pass x0-x7 in a struct thread_smc_args */ 180 sub sp, sp, #THREAD_SMC_ARGS_SIZE 181 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 182 mov x0, sp 183 184 /* Call the registered handler */ 185 bl __thread_std_smc_entry 186 187 /* 188 * Load the returned x0-x3 into preserved registers and skip the 189 * "returned" x4-x7 since they will not be returned to normal 190 * world. 191 */ 192 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23 193 add sp, sp, #THREAD_SMC_ARGS_SIZE 194 195 /* Mask all maskable exceptions before switching to temporary stack */ 196 msr daifset, #DAIFBIT_ALL 197 bl thread_get_tmp_sp 198 mov sp, x0 199 200 bl thread_state_free 201 202 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 203 mov x1, x20 204 mov x2, x21 205 mov x3, x22 206 mov x4, x23 207 smc #0 208 b . /* SMC should not return */ 209END_FUNC thread_std_smc_entry 210 211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 212FUNC thread_rpc , : 213 /* Read daif and create an SPSR */ 214 mrs x1, daif 215 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 216 217 /* Mask all maskable exceptions before switching to temporary stack */ 218 msr daifset, #DAIFBIT_ALL 219 push x0, xzr 220 push x1, x30 221 bl thread_get_ctx_regs 222 ldr x30, [sp, #8] 223 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30 224 mov x19, x0 225 226 bl thread_get_tmp_sp 227 pop x1, xzr /* Match "push x1, x30" above */ 228 mov x2, sp 229 str x2, [x19, #THREAD_CTX_REGS_SP] 230 ldr x20, [sp] /* Get pointer to rv[] */ 231 mov sp, x0 /* Switch to tmp stack */ 232 /* 233 * We need to read rv[] early, because thread_state_suspend 234 * can invoke virt_unset_guest() which will unmap pages, 235 * where rv[] resides 236 */ 237 load_wregs x20, 0, 21, 23 /* Load rv[] into w20-w22 */ 238 239 adr x2, .thread_rpc_return 240 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 241 bl thread_state_suspend 242 mov x4, x0 /* Supply thread index */ 243 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 244 mov x1, x21 245 mov x2, x22 246 mov x3, x23 247 smc #0 248 b . /* SMC should not return */ 249 250.thread_rpc_return: 251 /* 252 * At this point has the stack pointer been restored to the value 253 * stored in THREAD_CTX above. 254 * 255 * Jumps here from thread_resume above when RPC has returned. The 256 * IRQ and FIQ bits are restored to what they where when this 257 * function was originally entered. 258 */ 259 pop x16, xzr /* Get pointer to rv[] */ 260 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ 261 ret 262END_FUNC thread_rpc 263KEEP_PAGER thread_rpc 264 265FUNC thread_smc , : 266 smc #0 267 ret 268END_FUNC thread_smc 269 270FUNC thread_init_vbar , : 271 msr vbar_el1, x0 272 ret 273END_FUNC thread_init_vbar 274KEEP_PAGER thread_init_vbar 275 276/* 277 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 278 * unsigned long a2, unsigned long a3, unsigned long user_sp, 279 * unsigned long user_func, unsigned long spsr, 280 * uint32_t *exit_status0, uint32_t *exit_status1) 281 * 282 */ 283FUNC __thread_enter_user_mode , : 284 ldr x8, [sp] 285 /* 286 * Create the and fill in the struct thread_user_mode_rec 287 */ 288 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 289 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8 290 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 291 292 /* 293 * Switch to SP_EL1 294 * Disable exceptions 295 * Save kern sp in x19 296 */ 297 msr daifset, #DAIFBIT_ALL 298 mov x19, sp 299 msr spsel, #1 300 301 /* 302 * Save the kernel stack pointer in the thread context 303 */ 304 /* get pointer to current thread context */ 305 get_thread_ctx sp, 21, 20, 22 306 /* 307 * Save kernel stack pointer to ensure that el0_svc() uses 308 * correct stack pointer 309 */ 310 str x19, [x21, #THREAD_CTX_KERN_SP] 311 312 /* 313 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 314 */ 315 msr spsr_el1, x6 316 /* Set user sp */ 317 mov x13, x4 /* Used when running TA in Aarch32 */ 318 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 319 /* Set user function */ 320 msr elr_el1, x5 321 /* Set frame pointer (user stack can't be unwound past this point) */ 322 mov x29, #0 323 324 /* Jump into user mode */ 325 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 326 b eret_to_el0 327END_FUNC __thread_enter_user_mode 328KEEP_PAGER __thread_enter_user_mode 329 330/* 331 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 332 * uint32_t exit_status1); 333 * See description in thread.h 334 */ 335FUNC thread_unwind_user_mode , : 336 /* Store the exit status */ 337 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR] 338 str w1, [x3] 339 str w2, [x4] 340 /* Restore x19..x30 */ 341 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 342 add sp, sp, #THREAD_USER_MODE_REC_SIZE 343 /* Return from the call of thread_enter_user_mode() */ 344 ret 345END_FUNC thread_unwind_user_mode 346 347 /* 348 * This macro verifies that the a given vector doesn't exceed the 349 * architectural limit of 32 instructions. This is meant to be placed 350 * immedately after the last instruction in the vector. It takes the 351 * vector entry as the parameter 352 */ 353 .macro check_vector_size since 354 .if (. - \since) > (32 * 4) 355 .error "Vector exceeds 32 instructions" 356 .endif 357 .endm 358 359 .macro restore_mapping 360#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 361 /* Temporarily save x0, x1 */ 362 msr tpidr_el1, x0 363 msr tpidrro_el0, x1 364 365 /* Update the mapping to use the full kernel mapping */ 366 mrs x0, ttbr0_el1 367 sub x0, x0, #CORE_MMU_L1_TBL_OFFSET 368 /* switch to kernel mode ASID */ 369 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 370 msr ttbr0_el1, x0 371 isb 372 373 /* Jump into the full mapping and continue execution */ 374 ldr x0, =1f 375 br x0 376 1: 377 378 /* Point to the vector into the full mapping */ 379 adr x0, thread_user_kcode_offset 380 ldr x0, [x0] 381 mrs x1, vbar_el1 382 add x1, x1, x0 383 msr vbar_el1, x1 384 isb 385 386#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 387 /* 388 * Update the SP with thread_user_kdata_sp_offset as 389 * described in init_user_kcode(). 390 */ 391 adr x0, thread_user_kdata_sp_offset 392 ldr x0, [x0] 393 add sp, sp, x0 394#endif 395 396 /* Restore x0, x1 */ 397 mrs x0, tpidr_el1 398 mrs x1, tpidrro_el0 399 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 400#else 401 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 402 mrs x0, ttbr0_el1 403 /* switch to kernel mode ASID */ 404 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 405 msr ttbr0_el1, x0 406 isb 407#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 408 .endm 409 410#define INV_INSN 0 411 .section .text.thread_excp_vect 412 .align 11, INV_INSN 413FUNC thread_excp_vect , : 414 /* ----------------------------------------------------- 415 * EL1 with SP0 : 0x0 - 0x180 416 * ----------------------------------------------------- 417 */ 418 .align 7, INV_INSN 419el1_sync_sp0: 420 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 421 b el1_sync_abort 422 check_vector_size el1_sync_sp0 423 424 .align 7, INV_INSN 425el1_irq_sp0: 426 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 427 b elx_irq 428 check_vector_size el1_irq_sp0 429 430 .align 7, INV_INSN 431el1_fiq_sp0: 432 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 433 b elx_fiq 434 check_vector_size el1_fiq_sp0 435 436 .align 7, INV_INSN 437el1_serror_sp0: 438 b el1_serror_sp0 439 check_vector_size el1_serror_sp0 440 441 /* ----------------------------------------------------- 442 * Current EL with SP1: 0x200 - 0x380 443 * ----------------------------------------------------- 444 */ 445 .align 7, INV_INSN 446el1_sync_sp1: 447 b el1_sync_sp1 448 check_vector_size el1_sync_sp1 449 450 .align 7, INV_INSN 451el1_irq_sp1: 452 b el1_irq_sp1 453 check_vector_size el1_irq_sp1 454 455 .align 7, INV_INSN 456el1_fiq_sp1: 457 b el1_fiq_sp1 458 check_vector_size el1_fiq_sp1 459 460 .align 7, INV_INSN 461el1_serror_sp1: 462 b el1_serror_sp1 463 check_vector_size el1_serror_sp1 464 465 /* ----------------------------------------------------- 466 * Lower EL using AArch64 : 0x400 - 0x580 467 * ----------------------------------------------------- 468 */ 469 .align 7, INV_INSN 470el0_sync_a64: 471 restore_mapping 472 473 mrs x2, esr_el1 474 mrs x3, sp_el0 475 lsr x2, x2, #ESR_EC_SHIFT 476 cmp x2, #ESR_EC_AARCH64_SVC 477 b.eq el0_svc 478 b el0_sync_abort 479 check_vector_size el0_sync_a64 480 481 .align 7, INV_INSN 482el0_irq_a64: 483 restore_mapping 484 485 b elx_irq 486 check_vector_size el0_irq_a64 487 488 .align 7, INV_INSN 489el0_fiq_a64: 490 restore_mapping 491 492 b elx_fiq 493 check_vector_size el0_fiq_a64 494 495 .align 7, INV_INSN 496el0_serror_a64: 497 b el0_serror_a64 498 check_vector_size el0_serror_a64 499 500 /* ----------------------------------------------------- 501 * Lower EL using AArch32 : 0x0 - 0x180 502 * ----------------------------------------------------- 503 */ 504 .align 7, INV_INSN 505el0_sync_a32: 506 restore_mapping 507 508 mrs x2, esr_el1 509 mrs x3, sp_el0 510 lsr x2, x2, #ESR_EC_SHIFT 511 cmp x2, #ESR_EC_AARCH32_SVC 512 b.eq el0_svc 513 b el0_sync_abort 514 check_vector_size el0_sync_a32 515 516 .align 7, INV_INSN 517el0_irq_a32: 518 restore_mapping 519 520 b elx_irq 521 check_vector_size el0_irq_a32 522 523 .align 7, INV_INSN 524el0_fiq_a32: 525 restore_mapping 526 527 b elx_fiq 528 check_vector_size el0_fiq_a32 529 530 .align 7, INV_INSN 531el0_serror_a32: 532 b el0_serror_a32 533 check_vector_size el0_serror_a32 534 535#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 536 .macro invalidate_branch_predictor 537 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 538 mov_imm x0, SMCCC_ARCH_WORKAROUND_1 539 smc #0 540 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 541 .endm 542 543 .align 11, INV_INSN 544 .global thread_excp_vect_workaround 545thread_excp_vect_workaround: 546 /* ----------------------------------------------------- 547 * EL1 with SP0 : 0x0 - 0x180 548 * ----------------------------------------------------- 549 */ 550 .align 7, INV_INSN 551workaround_el1_sync_sp0: 552 b el1_sync_sp0 553 check_vector_size workaround_el1_sync_sp0 554 555 .align 7, INV_INSN 556workaround_el1_irq_sp0: 557 b el1_irq_sp0 558 check_vector_size workaround_el1_irq_sp0 559 560 .align 7, INV_INSN 561workaround_el1_fiq_sp0: 562 b el1_fiq_sp0 563 check_vector_size workaround_el1_fiq_sp0 564 565 .align 7, INV_INSN 566workaround_el1_serror_sp0: 567 b el1_serror_sp0 568 check_vector_size workaround_el1_serror_sp0 569 570 /* ----------------------------------------------------- 571 * Current EL with SP1: 0x200 - 0x380 572 * ----------------------------------------------------- 573 */ 574 .align 7, INV_INSN 575workaround_el1_sync_sp1: 576 b workaround_el1_sync_sp1 577 check_vector_size workaround_el1_sync_sp1 578 579 .align 7, INV_INSN 580workaround_el1_irq_sp1: 581 b workaround_el1_irq_sp1 582 check_vector_size workaround_el1_irq_sp1 583 584 .align 7, INV_INSN 585workaround_el1_fiq_sp1: 586 b workaround_el1_fiq_sp1 587 check_vector_size workaround_el1_fiq_sp1 588 589 .align 7, INV_INSN 590workaround_el1_serror_sp1: 591 b workaround_el1_serror_sp1 592 check_vector_size workaround_el1_serror_sp1 593 594 /* ----------------------------------------------------- 595 * Lower EL using AArch64 : 0x400 - 0x580 596 * ----------------------------------------------------- 597 */ 598 .align 7, INV_INSN 599workaround_el0_sync_a64: 600 invalidate_branch_predictor 601 b el0_sync_a64 602 check_vector_size workaround_el0_sync_a64 603 604 .align 7, INV_INSN 605workaround_el0_irq_a64: 606 invalidate_branch_predictor 607 b el0_irq_a64 608 check_vector_size workaround_el0_irq_a64 609 610 .align 7, INV_INSN 611workaround_el0_fiq_a64: 612 invalidate_branch_predictor 613 b el0_fiq_a64 614 check_vector_size workaround_el0_fiq_a64 615 616 .align 7, INV_INSN 617workaround_el0_serror_a64: 618 b workaround_el0_serror_a64 619 check_vector_size workaround_el0_serror_a64 620 621 /* ----------------------------------------------------- 622 * Lower EL using AArch32 : 0x0 - 0x180 623 * ----------------------------------------------------- 624 */ 625 .align 7, INV_INSN 626workaround_el0_sync_a32: 627 invalidate_branch_predictor 628 b el0_sync_a32 629 check_vector_size workaround_el0_sync_a32 630 631 .align 7, INV_INSN 632workaround_el0_irq_a32: 633 invalidate_branch_predictor 634 b el0_irq_a32 635 check_vector_size workaround_el0_irq_a32 636 637 .align 7, INV_INSN 638workaround_el0_fiq_a32: 639 invalidate_branch_predictor 640 b el0_fiq_a32 641 check_vector_size workaround_el0_fiq_a32 642 643 .align 7, INV_INSN 644workaround_el0_serror_a32: 645 b workaround_el0_serror_a32 646 check_vector_size workaround_el0_serror_a32 647#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 648 649/* 650 * We're keeping this code in the same section as the vector to make sure 651 * that it's always available. 652 */ 653eret_to_el0: 654 655#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 656 /* Point to the vector into the reduced mapping */ 657 adr x0, thread_user_kcode_offset 658 ldr x0, [x0] 659 mrs x1, vbar_el1 660 sub x1, x1, x0 661 msr vbar_el1, x1 662 isb 663 664#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 665 /* Store the SP offset in tpidr_el1 to be used below to update SP */ 666 adr x1, thread_user_kdata_sp_offset 667 ldr x1, [x1] 668 msr tpidr_el1, x1 669#endif 670 671 /* Jump into the reduced mapping and continue execution */ 672 ldr x1, =1f 673 sub x1, x1, x0 674 br x1 6751: 676 677 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 678 msr tpidrro_el0, x0 679 680 /* Update the mapping to exclude the full kernel mapping */ 681 mrs x0, ttbr0_el1 682 add x0, x0, #CORE_MMU_L1_TBL_OFFSET 683 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 684 msr ttbr0_el1, x0 685 isb 686 687#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 688 /* 689 * Update the SP with thread_user_kdata_sp_offset as described in 690 * init_user_kcode(). 691 */ 692 mrs x0, tpidr_el1 693 sub sp, sp, x0 694#endif 695 696 mrs x0, tpidrro_el0 697#else 698 mrs x0, ttbr0_el1 699 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 700 msr ttbr0_el1, x0 701 isb 702 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 703#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 704 705 eret 706 /* 707 * Make sure that literals are placed before the 708 * thread_excp_vect_end label. 709 */ 710 .pool 711 .global thread_excp_vect_end 712thread_excp_vect_end: 713END_FUNC thread_excp_vect 714 715LOCAL_FUNC el0_svc , : 716 /* get pointer to current thread context in x0 */ 717 get_thread_ctx sp, 0, 1, 2 718 /* load saved kernel sp */ 719 ldr x0, [x0, #THREAD_CTX_KERN_SP] 720 /* Keep pointer to initial recod in x1 */ 721 mov x1, sp 722 /* Switch to SP_EL0 and restore kernel sp */ 723 msr spsel, #0 724 mov x2, sp /* Save SP_EL0 */ 725 mov sp, x0 726 727 /* Make room for struct thread_svc_regs */ 728 sub sp, sp, #THREAD_SVC_REG_SIZE 729 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 730 731 /* Restore x0-x3 */ 732 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 733 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 734 735 /* Prepare the argument for the handler */ 736 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 737 mrs x0, elr_el1 738 mrs x1, spsr_el1 739 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 740 mov x0, sp 741 742 /* 743 * Unmask native interrupts, Serror, and debug exceptions since we have 744 * nothing left in sp_el1. Note that the SVC handler is excepted to 745 * re-enable foreign interrupts by itself. 746 */ 747#if defined(CFG_ARM_GICV3) 748 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 749#else 750 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 751#endif 752 753 /* Call the handler */ 754 bl tee_svc_handler 755 756 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 757 msr daifset, #DAIFBIT_ALL 758 759 /* 760 * Save kernel sp we'll had at the beginning of this function. 761 * This is when this TA has called another TA because 762 * __thread_enter_user_mode() also saves the stack pointer in this 763 * field. 764 */ 765 msr spsel, #1 766 get_thread_ctx sp, 0, 1, 2 767 msr spsel, #0 768 add x1, sp, #THREAD_SVC_REG_SIZE 769 str x1, [x0, #THREAD_CTX_KERN_SP] 770 771 /* Restore registers to the required state and return*/ 772 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 773 msr elr_el1, x0 774 msr spsr_el1, x1 775 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 776 mov x30, sp 777 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 778 mov sp, x0 779 b_if_spsr_is_el0 w1, 1f 780 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 781 ldr x30, [x30, #THREAD_SVC_REG_X30] 782 783 eret 784 7851: ldp x0, x1, [x30, THREAD_SVC_REG_X0] 786 ldr x30, [x30, #THREAD_SVC_REG_X30] 787 788 msr spsel, #1 789 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 790 b eret_to_el0 791END_FUNC el0_svc 792 793LOCAL_FUNC el1_sync_abort , : 794 mov x0, sp 795 msr spsel, #0 796 mov x3, sp /* Save original sp */ 797 798 /* 799 * Update core local flags. 800 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 801 */ 802 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 803 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 804 orr w1, w1, #THREAD_CLF_ABORT 805 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 806 .Lsel_tmp_sp 807 808 /* Select abort stack */ 809 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 810 b .Lset_sp 811 812.Lsel_tmp_sp: 813 /* Select tmp stack */ 814 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 815 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 816 817.Lset_sp: 818 mov sp, x2 819 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 820 821 /* 822 * Save state on stack 823 */ 824 sub sp, sp, #THREAD_ABT_REGS_SIZE 825 mrs x2, spsr_el1 826 /* Store spsr, sp_el0 */ 827 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 828 /* Store original x0, x1 */ 829 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 830 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 831 /* Store original x2, x3 and x4 to x29 */ 832 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 833 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 834 /* Store x30, elr_el1 */ 835 mrs x0, elr_el1 836 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 837 838 /* 839 * Call handler 840 */ 841 mov x0, #0 842 mov x1, sp 843 bl abort_handler 844 845 /* 846 * Restore state from stack 847 */ 848 /* Load x30, elr_el1 */ 849 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 850 msr elr_el1, x0 851 /* Load x0 to x29 */ 852 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 853 /* Switch to SP_EL1 */ 854 msr spsel, #1 855 /* Save x0 to x3 in CORE_LOCAL */ 856 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 857 /* Restore spsr_el1 and sp_el0 */ 858 mrs x3, sp_el0 859 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 860 msr spsr_el1, x0 861 msr sp_el0, x1 862 863 /* Update core local flags */ 864 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 865 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 866 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 867 868 /* Restore x0 to x3 */ 869 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 870 871 /* Return from exception */ 872 eret 873END_FUNC el1_sync_abort 874 875 /* sp_el0 in x3 */ 876LOCAL_FUNC el0_sync_abort , : 877 /* 878 * Update core local flags 879 */ 880 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 881 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 882 orr w1, w1, #THREAD_CLF_ABORT 883 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 884 885 /* 886 * Save state on stack 887 */ 888 889 /* load abt_stack_va_end */ 890 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 891 /* Keep pointer to initial record in x0 */ 892 mov x0, sp 893 /* Switch to SP_EL0 */ 894 msr spsel, #0 895 mov sp, x1 896 sub sp, sp, #THREAD_ABT_REGS_SIZE 897 mrs x2, spsr_el1 898 /* Store spsr, sp_el0 */ 899 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 900 /* Store original x0, x1 */ 901 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 902 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 903 /* Store original x2, x3 and x4 to x29 */ 904 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 905 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 906 /* Store x30, elr_el1 */ 907 mrs x0, elr_el1 908 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 909 910 /* 911 * Call handler 912 */ 913 mov x0, #0 914 mov x1, sp 915 bl abort_handler 916 917 /* 918 * Restore state from stack 919 */ 920 921 /* Load x30, elr_el1 */ 922 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 923 msr elr_el1, x0 924 /* Load x0 to x29 */ 925 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 926 /* Switch to SP_EL1 */ 927 msr spsel, #1 928 /* Save x0 to x3 in EL1_REC */ 929 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 930 /* Restore spsr_el1 and sp_el0 */ 931 mrs x3, sp_el0 932 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 933 msr spsr_el1, x0 934 msr sp_el0, x1 935 936 /* Update core local flags */ 937 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 938 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 939 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 940 941 /* Restore x2 to x3 */ 942 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 943 944 b_if_spsr_is_el0 w0, 1f 945 946 /* Restore x0 to x1 */ 947 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 948 949 /* Return from exception */ 950 eret 9511: b eret_to_el0 952END_FUNC el0_sync_abort 953 954/* The handler of foreign interrupt. */ 955.macro foreign_intr_handler mode:req 956 /* 957 * Update core local flags 958 */ 959 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 960 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 961 orr w1, w1, #THREAD_CLF_TMP 962 .ifc \mode\(),fiq 963 orr w1, w1, #THREAD_CLF_FIQ 964 .else 965 orr w1, w1, #THREAD_CLF_IRQ 966 .endif 967 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 968 969 /* get pointer to current thread context in x0 */ 970 get_thread_ctx sp, 0, 1, 2 971 /* Keep original SP_EL0 */ 972 mrs x2, sp_el0 973 974 /* Store original sp_el0 */ 975 str x2, [x0, #THREAD_CTX_REGS_SP] 976 /* store x4..x30 */ 977 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 978 /* Load original x0..x3 into x10..x13 */ 979 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 980 /* Save original x0..x3 */ 981 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 982 983 /* load tmp_stack_va_end */ 984 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 985 /* Switch to SP_EL0 */ 986 msr spsel, #0 987 mov sp, x1 988 989 /* 990 * Mark current thread as suspended 991 */ 992 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 993 mrs x1, spsr_el1 994 mrs x2, elr_el1 995 bl thread_state_suspend 996 mov w4, w0 /* Supply thread index */ 997 998 /* Update core local flags */ 999 /* Switch to SP_EL1 */ 1000 msr spsel, #1 1001 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1002 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1003 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1004 msr spsel, #0 1005 1006 /* 1007 * Note that we're exiting with SP_EL0 selected since the entry 1008 * functions expects to have SP_EL0 selected with the tmp stack 1009 * set. 1010 */ 1011 1012 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 1013 ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 1014 mov w2, #0 1015 mov w3, #0 1016 /* w4 is already filled in above */ 1017 smc #0 1018 b . /* SMC should not return */ 1019.endm 1020 1021/* 1022 * This struct is never used from C it's only here to visualize the 1023 * layout. 1024 * 1025 * struct elx_nintr_rec { 1026 * uint64_t x[19 - 4]; x4..x18 1027 * uint64_t lr; 1028 * uint64_t sp_el0; 1029 * }; 1030 */ 1031#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 1032#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 1033#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 1034#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 1035 1036/* The handler of native interrupt. */ 1037.macro native_intr_handler mode:req 1038 /* 1039 * Update core local flags 1040 */ 1041 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1042 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 1043 .ifc \mode\(),fiq 1044 orr w1, w1, #THREAD_CLF_FIQ 1045 .else 1046 orr w1, w1, #THREAD_CLF_IRQ 1047 .endif 1048 orr w1, w1, #THREAD_CLF_TMP 1049 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1050 1051 /* load tmp_stack_va_end */ 1052 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1053 /* Keep original SP_EL0 */ 1054 mrs x2, sp_el0 1055 /* Switch to SP_EL0 */ 1056 msr spsel, #0 1057 mov sp, x1 1058 1059 /* 1060 * Save registers on stack that can be corrupted by a call to 1061 * a C function 1062 */ 1063 /* Make room for struct elx_nintr_rec */ 1064 sub sp, sp, #ELX_NINTR_REC_SIZE 1065 /* Store x4..x18 */ 1066 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1067 /* Store lr and original sp_el0 */ 1068 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1069 1070 bl thread_check_canaries 1071 adr x16, thread_nintr_handler_ptr 1072 ldr x16, [x16] 1073 blr x16 1074 1075 /* 1076 * Restore registers 1077 */ 1078 /* Restore x4..x18 */ 1079 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1080 /* Load lr and original sp_el0 */ 1081 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1082 /* Restore SP_El0 */ 1083 mov sp, x2 1084 /* Switch back to SP_EL1 */ 1085 msr spsel, #1 1086 1087 /* Update core local flags */ 1088 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1089 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1090 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1091 1092 mrs x0, spsr_el1 1093 /* Restore x2..x3 */ 1094 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1095 b_if_spsr_is_el0 w0, 1f 1096 1097 /* Restore x0..x1 */ 1098 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1099 1100 /* Return from exception */ 1101 eret 11021: b eret_to_el0 1103.endm 1104 1105LOCAL_FUNC elx_irq , : 1106#if defined(CFG_ARM_GICV3) 1107 native_intr_handler irq 1108#else 1109 foreign_intr_handler irq 1110#endif 1111END_FUNC elx_irq 1112 1113LOCAL_FUNC elx_fiq , : 1114#if defined(CFG_ARM_GICV3) 1115 foreign_intr_handler fiq 1116#else 1117 native_intr_handler fiq 1118#endif 1119END_FUNC elx_fiq 1120