1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2017, Linaro Limited 4 */ 5 6#include <arm.h> 7#include <arm64_macros.S> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/thread_defs.h> 12#include <mm/core_mmu.h> 13#include <smccc.h> 14#include <sm/optee_smc.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17 18#include "thread_private.h" 19 20 .macro get_thread_ctx core_local, res, tmp0, tmp1 21 ldr w\tmp0, [\core_local, \ 22 #THREAD_CORE_LOCAL_CURR_THREAD] 23 adr x\res, threads 24 mov x\tmp1, #THREAD_CTX_SIZE 25 madd x\res, x\tmp0, x\tmp1, x\res 26 .endm 27 28 .macro b_if_spsr_is_el0 reg, label 29 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 30 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 31 b.eq \label 32 .endm 33 34LOCAL_FUNC vector_std_smc_entry , : 35 sub sp, sp, #THREAD_SMC_ARGS_SIZE 36 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 37 mov x0, sp 38 bl thread_handle_std_smc 39 /* 40 * Normally thread_handle_std_smc() should return via 41 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 42 * hasn't switched stack (error detected) it will do a normal "C" 43 * return. 44 */ 45 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 46 add sp, sp, #THREAD_SMC_ARGS_SIZE 47 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 48 smc #0 49 b . /* SMC should not return */ 50END_FUNC vector_std_smc_entry 51 52LOCAL_FUNC vector_fast_smc_entry , : 53 sub sp, sp, #THREAD_SMC_ARGS_SIZE 54 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 55 mov x0, sp 56 bl thread_handle_fast_smc 57 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 58 add sp, sp, #THREAD_SMC_ARGS_SIZE 59 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 60 smc #0 61 b . /* SMC should not return */ 62END_FUNC vector_fast_smc_entry 63 64LOCAL_FUNC vector_fiq_entry , : 65 /* Secure Monitor received a FIQ and passed control to us. */ 66 bl thread_check_canaries 67 adr x16, thread_nintr_handler_ptr 68 ldr x16, [x16] 69 blr x16 70 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 71 smc #0 72 b . /* SMC should not return */ 73END_FUNC vector_fiq_entry 74 75LOCAL_FUNC vector_cpu_on_entry , : 76 adr x16, thread_cpu_on_handler_ptr 77 ldr x16, [x16] 78 blr x16 79 mov x1, x0 80 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 81 smc #0 82 b . /* SMC should not return */ 83END_FUNC vector_cpu_on_entry 84 85LOCAL_FUNC vector_cpu_off_entry , : 86 adr x16, thread_cpu_off_handler_ptr 87 ldr x16, [x16] 88 blr x16 89 mov x1, x0 90 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 91 smc #0 92 b . /* SMC should not return */ 93END_FUNC vector_cpu_off_entry 94 95LOCAL_FUNC vector_cpu_suspend_entry , : 96 adr x16, thread_cpu_suspend_handler_ptr 97 ldr x16, [x16] 98 blr x16 99 mov x1, x0 100 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 101 smc #0 102 b . /* SMC should not return */ 103END_FUNC vector_cpu_suspend_entry 104 105LOCAL_FUNC vector_cpu_resume_entry , : 106 adr x16, thread_cpu_resume_handler_ptr 107 ldr x16, [x16] 108 blr x16 109 mov x1, x0 110 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 111 smc #0 112 b . /* SMC should not return */ 113END_FUNC vector_cpu_resume_entry 114 115LOCAL_FUNC vector_system_off_entry , : 116 adr x16, thread_system_off_handler_ptr 117 ldr x16, [x16] 118 blr x16 119 mov x1, x0 120 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 121 smc #0 122 b . /* SMC should not return */ 123END_FUNC vector_system_off_entry 124 125LOCAL_FUNC vector_system_reset_entry , : 126 adr x16, thread_system_reset_handler_ptr 127 ldr x16, [x16] 128 blr x16 129 mov x1, x0 130 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 131 smc #0 132 b . /* SMC should not return */ 133END_FUNC vector_system_reset_entry 134 135/* 136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 137 * initialization. 138 * 139 * Note that ARM-TF depends on the layout of this vector table, any change 140 * in layout has to be synced with ARM-TF. 141 */ 142FUNC thread_vector_table , : 143 b vector_std_smc_entry 144 b vector_fast_smc_entry 145 b vector_cpu_on_entry 146 b vector_cpu_off_entry 147 b vector_cpu_resume_entry 148 b vector_cpu_suspend_entry 149 b vector_fiq_entry 150 b vector_system_off_entry 151 b vector_system_reset_entry 152END_FUNC thread_vector_table 153KEEP_PAGER thread_vector_table 154 155 156/* void thread_resume(struct thread_ctx_regs *regs) */ 157FUNC thread_resume , : 158 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 159 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 160 mov sp, x1 161 msr elr_el1, x2 162 msr spsr_el1, x3 163 164 b_if_spsr_is_el0 w3, 1f 165 166 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 167 ldr x0, [x0, THREAD_CTX_REGS_X0] 168 eret 169 1701: load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 171 ldr x0, [x0, THREAD_CTX_REGS_X0] 172 173 msr spsel, #1 174 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 175 b eret_to_el0 176END_FUNC thread_resume 177 178FUNC thread_std_smc_entry , : 179 /* pass x0-x7 in a struct thread_smc_args */ 180 sub sp, sp, #THREAD_SMC_ARGS_SIZE 181 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 182 mov x0, sp 183 184 /* Call the registered handler */ 185 bl __thread_std_smc_entry 186 187 /* 188 * Load the returned x0-x3 into preserved registers and skip the 189 * "returned" x4-x7 since they will not be returned to normal 190 * world. 191 */ 192 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23 193 add sp, sp, #THREAD_SMC_ARGS_SIZE 194 195 /* Mask all maskable exceptions before switching to temporary stack */ 196 msr daifset, #DAIFBIT_ALL 197 bl thread_get_tmp_sp 198 mov sp, x0 199 200 bl thread_state_free 201 202 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 203 mov x1, x20 204 mov x2, x21 205 mov x3, x22 206 mov x4, x23 207 smc #0 208 b . /* SMC should not return */ 209END_FUNC thread_std_smc_entry 210 211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 212FUNC thread_rpc , : 213 /* Read daif and create an SPSR */ 214 mrs x1, daif 215 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 216 217 /* Mask all maskable exceptions before switching to temporary stack */ 218 msr daifset, #DAIFBIT_ALL 219 push x0, xzr 220 push x1, x30 221 bl thread_get_ctx_regs 222 ldr x30, [sp, #8] 223 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30 224 mov x19, x0 225 226 bl thread_get_tmp_sp 227 pop x1, xzr /* Match "push x1, x30" above */ 228 mov x2, sp 229 str x2, [x19, #THREAD_CTX_REGS_SP] 230 ldr x20, [sp] /* Get pointer to rv[] */ 231 mov sp, x0 /* Switch to tmp stack */ 232 233 adr x2, .thread_rpc_return 234 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 235 bl thread_state_suspend 236 mov x4, x0 /* Supply thread index */ 237 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 238 load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */ 239 smc #0 240 b . /* SMC should not return */ 241 242.thread_rpc_return: 243 /* 244 * At this point has the stack pointer been restored to the value 245 * stored in THREAD_CTX above. 246 * 247 * Jumps here from thread_resume above when RPC has returned. The 248 * IRQ and FIQ bits are restored to what they where when this 249 * function was originally entered. 250 */ 251 pop x16, xzr /* Get pointer to rv[] */ 252 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ 253 ret 254END_FUNC thread_rpc 255KEEP_PAGER thread_rpc 256 257FUNC thread_smc , : 258 smc #0 259 ret 260END_FUNC thread_smc 261 262FUNC thread_init_vbar , : 263 msr vbar_el1, x0 264 ret 265END_FUNC thread_init_vbar 266KEEP_PAGER thread_init_vbar 267 268/* 269 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 270 * unsigned long a2, unsigned long a3, unsigned long user_sp, 271 * unsigned long user_func, unsigned long spsr, 272 * uint32_t *exit_status0, uint32_t *exit_status1) 273 * 274 */ 275FUNC __thread_enter_user_mode , : 276 ldr x8, [sp] 277 /* 278 * Create the and fill in the struct thread_user_mode_rec 279 */ 280 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 281 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8 282 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 283 284 /* 285 * Switch to SP_EL1 286 * Disable exceptions 287 * Save kern sp in x19 288 */ 289 msr daifset, #DAIFBIT_ALL 290 mov x19, sp 291 msr spsel, #1 292 293 /* 294 * Save the kernel stack pointer in the thread context 295 */ 296 /* get pointer to current thread context */ 297 get_thread_ctx sp, 21, 20, 22 298 /* 299 * Save kernel stack pointer to ensure that el0_svc() uses 300 * correct stack pointer 301 */ 302 str x19, [x21, #THREAD_CTX_KERN_SP] 303 304 /* 305 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 306 */ 307 msr spsr_el1, x6 308 /* Set user sp */ 309 mov x13, x4 /* Used when running TA in Aarch32 */ 310 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 311 /* Set user function */ 312 msr elr_el1, x5 313 /* Set frame pointer (user stack can't be unwound past this point) */ 314 mov x29, #0 315 316 /* Jump into user mode */ 317 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 318 b eret_to_el0 319END_FUNC __thread_enter_user_mode 320 321/* 322 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 323 * uint32_t exit_status1); 324 * See description in thread.h 325 */ 326FUNC thread_unwind_user_mode , : 327 /* Store the exit status */ 328 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR] 329 str w1, [x3] 330 str w2, [x4] 331 /* Restore x19..x30 */ 332 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 333 add sp, sp, #THREAD_USER_MODE_REC_SIZE 334 /* Return from the call of thread_enter_user_mode() */ 335 ret 336END_FUNC thread_unwind_user_mode 337 338 /* 339 * This macro verifies that the a given vector doesn't exceed the 340 * architectural limit of 32 instructions. This is meant to be placed 341 * immedately after the last instruction in the vector. It takes the 342 * vector entry as the parameter 343 */ 344 .macro check_vector_size since 345 .if (. - \since) > (32 * 4) 346 .error "Vector exceeds 32 instructions" 347 .endif 348 .endm 349 350 .macro restore_mapping 351#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 352 /* Temporarily save x0, x1 */ 353 msr tpidr_el1, x0 354 msr tpidrro_el0, x1 355 356 /* Update the mapping to use the full kernel mapping */ 357 mrs x0, ttbr0_el1 358 sub x0, x0, #CORE_MMU_L1_TBL_OFFSET 359 /* switch to kernel mode ASID */ 360 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 361 msr ttbr0_el1, x0 362 isb 363 364 /* Jump into the full mapping and continue execution */ 365 ldr x0, =1f 366 br x0 367 1: 368 369 /* Point to the vector into the full mapping */ 370 adr x0, thread_user_kcode_offset 371 ldr x0, [x0] 372 mrs x1, vbar_el1 373 add x1, x1, x0 374 msr vbar_el1, x1 375 isb 376 377#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 378 /* 379 * Update the SP with thread_user_kdata_sp_offset as 380 * described in init_user_kcode(). 381 */ 382 adr x0, thread_user_kdata_sp_offset 383 ldr x0, [x0] 384 add sp, sp, x0 385#endif 386 387 /* Restore x0, x1 */ 388 mrs x0, tpidr_el1 389 mrs x1, tpidrro_el0 390 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 391#else 392 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 393 mrs x0, ttbr0_el1 394 /* switch to kernel mode ASID */ 395 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 396 msr ttbr0_el1, x0 397 isb 398#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 399 .endm 400 401#define INV_INSN 0 402 .section .text.thread_excp_vect 403 .align 11, INV_INSN 404FUNC thread_excp_vect , : 405 /* ----------------------------------------------------- 406 * EL1 with SP0 : 0x0 - 0x180 407 * ----------------------------------------------------- 408 */ 409 .align 7, INV_INSN 410el1_sync_sp0: 411 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 412 b el1_sync_abort 413 check_vector_size el1_sync_sp0 414 415 .align 7, INV_INSN 416el1_irq_sp0: 417 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 418 b elx_irq 419 check_vector_size el1_irq_sp0 420 421 .align 7, INV_INSN 422el1_fiq_sp0: 423 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 424 b elx_fiq 425 check_vector_size el1_fiq_sp0 426 427 .align 7, INV_INSN 428el1_serror_sp0: 429 b el1_serror_sp0 430 check_vector_size el1_serror_sp0 431 432 /* ----------------------------------------------------- 433 * Current EL with SP1: 0x200 - 0x380 434 * ----------------------------------------------------- 435 */ 436 .align 7, INV_INSN 437el1_sync_sp1: 438 b el1_sync_sp1 439 check_vector_size el1_sync_sp1 440 441 .align 7, INV_INSN 442el1_irq_sp1: 443 b el1_irq_sp1 444 check_vector_size el1_irq_sp1 445 446 .align 7, INV_INSN 447el1_fiq_sp1: 448 b el1_fiq_sp1 449 check_vector_size el1_fiq_sp1 450 451 .align 7, INV_INSN 452el1_serror_sp1: 453 b el1_serror_sp1 454 check_vector_size el1_serror_sp1 455 456 /* ----------------------------------------------------- 457 * Lower EL using AArch64 : 0x400 - 0x580 458 * ----------------------------------------------------- 459 */ 460 .align 7, INV_INSN 461el0_sync_a64: 462 restore_mapping 463 464 mrs x2, esr_el1 465 mrs x3, sp_el0 466 lsr x2, x2, #ESR_EC_SHIFT 467 cmp x2, #ESR_EC_AARCH64_SVC 468 b.eq el0_svc 469 b el0_sync_abort 470 check_vector_size el0_sync_a64 471 472 .align 7, INV_INSN 473el0_irq_a64: 474 restore_mapping 475 476 b elx_irq 477 check_vector_size el0_irq_a64 478 479 .align 7, INV_INSN 480el0_fiq_a64: 481 restore_mapping 482 483 b elx_fiq 484 check_vector_size el0_fiq_a64 485 486 .align 7, INV_INSN 487el0_serror_a64: 488 b el0_serror_a64 489 check_vector_size el0_serror_a64 490 491 /* ----------------------------------------------------- 492 * Lower EL using AArch32 : 0x0 - 0x180 493 * ----------------------------------------------------- 494 */ 495 .align 7, INV_INSN 496el0_sync_a32: 497 restore_mapping 498 499 mrs x2, esr_el1 500 mrs x3, sp_el0 501 lsr x2, x2, #ESR_EC_SHIFT 502 cmp x2, #ESR_EC_AARCH32_SVC 503 b.eq el0_svc 504 b el0_sync_abort 505 check_vector_size el0_sync_a32 506 507 .align 7, INV_INSN 508el0_irq_a32: 509 restore_mapping 510 511 b elx_irq 512 check_vector_size el0_irq_a32 513 514 .align 7, INV_INSN 515el0_fiq_a32: 516 restore_mapping 517 518 b elx_fiq 519 check_vector_size el0_fiq_a32 520 521 .align 7, INV_INSN 522el0_serror_a32: 523 b el0_serror_a32 524 check_vector_size el0_serror_a32 525 526#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 527 .macro invalidate_branch_predictor 528 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 529 mov_imm x0, SMCCC_ARCH_WORKAROUND_1 530 smc #0 531 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 532 .endm 533 534 .align 11, INV_INSN 535 .global thread_excp_vect_workaround 536thread_excp_vect_workaround: 537 /* ----------------------------------------------------- 538 * EL1 with SP0 : 0x0 - 0x180 539 * ----------------------------------------------------- 540 */ 541 .align 7, INV_INSN 542workaround_el1_sync_sp0: 543 b el1_sync_sp0 544 check_vector_size workaround_el1_sync_sp0 545 546 .align 7, INV_INSN 547workaround_el1_irq_sp0: 548 b el1_irq_sp0 549 check_vector_size workaround_el1_irq_sp0 550 551 .align 7, INV_INSN 552workaround_el1_fiq_sp0: 553 b el1_fiq_sp0 554 check_vector_size workaround_el1_fiq_sp0 555 556 .align 7, INV_INSN 557workaround_el1_serror_sp0: 558 b el1_serror_sp0 559 check_vector_size workaround_el1_serror_sp0 560 561 /* ----------------------------------------------------- 562 * Current EL with SP1: 0x200 - 0x380 563 * ----------------------------------------------------- 564 */ 565 .align 7, INV_INSN 566workaround_el1_sync_sp1: 567 b workaround_el1_sync_sp1 568 check_vector_size workaround_el1_sync_sp1 569 570 .align 7, INV_INSN 571workaround_el1_irq_sp1: 572 b workaround_el1_irq_sp1 573 check_vector_size workaround_el1_irq_sp1 574 575 .align 7, INV_INSN 576workaround_el1_fiq_sp1: 577 b workaround_el1_fiq_sp1 578 check_vector_size workaround_el1_fiq_sp1 579 580 .align 7, INV_INSN 581workaround_el1_serror_sp1: 582 b workaround_el1_serror_sp1 583 check_vector_size workaround_el1_serror_sp1 584 585 /* ----------------------------------------------------- 586 * Lower EL using AArch64 : 0x400 - 0x580 587 * ----------------------------------------------------- 588 */ 589 .align 7, INV_INSN 590workaround_el0_sync_a64: 591 invalidate_branch_predictor 592 b el0_sync_a64 593 check_vector_size workaround_el0_sync_a64 594 595 .align 7, INV_INSN 596workaround_el0_irq_a64: 597 invalidate_branch_predictor 598 b el0_irq_a64 599 check_vector_size workaround_el0_irq_a64 600 601 .align 7, INV_INSN 602workaround_el0_fiq_a64: 603 invalidate_branch_predictor 604 b el0_fiq_a64 605 check_vector_size workaround_el0_fiq_a64 606 607 .align 7, INV_INSN 608workaround_el0_serror_a64: 609 b workaround_el0_serror_a64 610 check_vector_size workaround_el0_serror_a64 611 612 /* ----------------------------------------------------- 613 * Lower EL using AArch32 : 0x0 - 0x180 614 * ----------------------------------------------------- 615 */ 616 .align 7, INV_INSN 617workaround_el0_sync_a32: 618 invalidate_branch_predictor 619 b el0_sync_a32 620 check_vector_size workaround_el0_sync_a32 621 622 .align 7, INV_INSN 623workaround_el0_irq_a32: 624 invalidate_branch_predictor 625 b el0_irq_a32 626 check_vector_size workaround_el0_irq_a32 627 628 .align 7, INV_INSN 629workaround_el0_fiq_a32: 630 invalidate_branch_predictor 631 b el0_fiq_a32 632 check_vector_size workaround_el0_fiq_a32 633 634 .align 7, INV_INSN 635workaround_el0_serror_a32: 636 b workaround_el0_serror_a32 637 check_vector_size workaround_el0_serror_a32 638#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 639 640/* 641 * We're keeping this code in the same section as the vector to make sure 642 * that it's always available. 643 */ 644eret_to_el0: 645 646#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 647 /* Point to the vector into the reduced mapping */ 648 adr x0, thread_user_kcode_offset 649 ldr x0, [x0] 650 mrs x1, vbar_el1 651 sub x1, x1, x0 652 msr vbar_el1, x1 653 isb 654 655#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 656 /* Store the SP offset in tpidr_el1 to be used below to update SP */ 657 adr x1, thread_user_kdata_sp_offset 658 ldr x1, [x1] 659 msr tpidr_el1, x1 660#endif 661 662 /* Jump into the reduced mapping and continue execution */ 663 ldr x1, =1f 664 sub x1, x1, x0 665 br x1 6661: 667 668 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 669 msr tpidrro_el0, x0 670 671 /* Update the mapping to exclude the full kernel mapping */ 672 mrs x0, ttbr0_el1 673 add x0, x0, #CORE_MMU_L1_TBL_OFFSET 674 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 675 msr ttbr0_el1, x0 676 isb 677 678#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 679 /* 680 * Update the SP with thread_user_kdata_sp_offset as described in 681 * init_user_kcode(). 682 */ 683 mrs x0, tpidr_el1 684 sub sp, sp, x0 685#endif 686 687 mrs x0, tpidrro_el0 688#else 689 mrs x0, ttbr0_el1 690 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 691 msr ttbr0_el1, x0 692 isb 693 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 694#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 695 696 eret 697 698 .global thread_excp_vect_end 699thread_excp_vect_end: 700END_FUNC thread_excp_vect 701 702LOCAL_FUNC el0_svc , : 703 /* get pointer to current thread context in x0 */ 704 get_thread_ctx sp, 0, 1, 2 705 /* load saved kernel sp */ 706 ldr x0, [x0, #THREAD_CTX_KERN_SP] 707 /* Keep pointer to initial recod in x1 */ 708 mov x1, sp 709 /* Switch to SP_EL0 and restore kernel sp */ 710 msr spsel, #0 711 mov x2, sp /* Save SP_EL0 */ 712 mov sp, x0 713 714 /* Make room for struct thread_svc_regs */ 715 sub sp, sp, #THREAD_SVC_REG_SIZE 716 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 717 718 /* Restore x0-x3 */ 719 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 720 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 721 722 /* Prepare the argument for the handler */ 723 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 724 mrs x0, elr_el1 725 mrs x1, spsr_el1 726 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 727 mov x0, sp 728 729 /* 730 * Unmask native interrupts, Serror, and debug exceptions since we have 731 * nothing left in sp_el1. Note that the SVC handler is excepted to 732 * re-enable foreign interrupts by itself. 733 */ 734#if defined(CFG_ARM_GICV3) 735 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 736#else 737 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 738#endif 739 740 /* Call the handler */ 741 bl tee_svc_handler 742 743 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 744 msr daifset, #DAIFBIT_ALL 745 746 /* 747 * Save kernel sp we'll had at the beginning of this function. 748 * This is when this TA has called another TA because 749 * __thread_enter_user_mode() also saves the stack pointer in this 750 * field. 751 */ 752 msr spsel, #1 753 get_thread_ctx sp, 0, 1, 2 754 msr spsel, #0 755 add x1, sp, #THREAD_SVC_REG_SIZE 756 str x1, [x0, #THREAD_CTX_KERN_SP] 757 758 /* Restore registers to the required state and return*/ 759 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 760 msr elr_el1, x0 761 msr spsr_el1, x1 762 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 763 mov x30, sp 764 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 765 mov sp, x0 766 b_if_spsr_is_el0 w1, 1f 767 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 768 ldr x30, [x30, #THREAD_SVC_REG_X30] 769 770 eret 771 7721: ldp x0, x1, [x30, THREAD_SVC_REG_X0] 773 ldr x30, [x30, #THREAD_SVC_REG_X30] 774 775 msr spsel, #1 776 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 777 b eret_to_el0 778END_FUNC el0_svc 779 780LOCAL_FUNC el1_sync_abort , : 781 mov x0, sp 782 msr spsel, #0 783 mov x3, sp /* Save original sp */ 784 785 /* 786 * Update core local flags. 787 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 788 */ 789 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 790 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 791 orr w1, w1, #THREAD_CLF_ABORT 792 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 793 .Lsel_tmp_sp 794 795 /* Select abort stack */ 796 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 797 b .Lset_sp 798 799.Lsel_tmp_sp: 800 /* Select tmp stack */ 801 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 802 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 803 804.Lset_sp: 805 mov sp, x2 806 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 807 808 /* 809 * Save state on stack 810 */ 811 sub sp, sp, #THREAD_ABT_REGS_SIZE 812 mrs x2, spsr_el1 813 /* Store spsr, sp_el0 */ 814 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 815 /* Store original x0, x1 */ 816 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 817 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 818 /* Store original x2, x3 and x4 to x29 */ 819 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 820 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 821 /* Store x30, elr_el1 */ 822 mrs x0, elr_el1 823 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 824 825 /* 826 * Call handler 827 */ 828 mov x0, #0 829 mov x1, sp 830 bl abort_handler 831 832 /* 833 * Restore state from stack 834 */ 835 /* Load x30, elr_el1 */ 836 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 837 msr elr_el1, x0 838 /* Load x0 to x29 */ 839 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 840 /* Switch to SP_EL1 */ 841 msr spsel, #1 842 /* Save x0 to x3 in CORE_LOCAL */ 843 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 844 /* Restore spsr_el1 and sp_el0 */ 845 mrs x3, sp_el0 846 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 847 msr spsr_el1, x0 848 msr sp_el0, x1 849 850 /* Update core local flags */ 851 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 852 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 853 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 854 855 /* Restore x0 to x3 */ 856 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 857 858 /* Return from exception */ 859 eret 860END_FUNC el1_sync_abort 861 862 /* sp_el0 in x3 */ 863LOCAL_FUNC el0_sync_abort , : 864 /* 865 * Update core local flags 866 */ 867 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 868 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 869 orr w1, w1, #THREAD_CLF_ABORT 870 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 871 872 /* 873 * Save state on stack 874 */ 875 876 /* load abt_stack_va_end */ 877 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 878 /* Keep pointer to initial record in x0 */ 879 mov x0, sp 880 /* Switch to SP_EL0 */ 881 msr spsel, #0 882 mov sp, x1 883 sub sp, sp, #THREAD_ABT_REGS_SIZE 884 mrs x2, spsr_el1 885 /* Store spsr, sp_el0 */ 886 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 887 /* Store original x0, x1 */ 888 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 889 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 890 /* Store original x2, x3 and x4 to x29 */ 891 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 892 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 893 /* Store x30, elr_el1 */ 894 mrs x0, elr_el1 895 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 896 897 /* 898 * Call handler 899 */ 900 mov x0, #0 901 mov x1, sp 902 bl abort_handler 903 904 /* 905 * Restore state from stack 906 */ 907 908 /* Load x30, elr_el1 */ 909 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 910 msr elr_el1, x0 911 /* Load x0 to x29 */ 912 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 913 /* Switch to SP_EL1 */ 914 msr spsel, #1 915 /* Save x0 to x3 in EL1_REC */ 916 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 917 /* Restore spsr_el1 and sp_el0 */ 918 mrs x3, sp_el0 919 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 920 msr spsr_el1, x0 921 msr sp_el0, x1 922 923 /* Update core local flags */ 924 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 925 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 926 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 927 928 /* Restore x2 to x3 */ 929 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 930 931 b_if_spsr_is_el0 w0, 1f 932 933 /* Restore x0 to x1 */ 934 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 935 936 /* Return from exception */ 937 eret 9381: b eret_to_el0 939END_FUNC el0_sync_abort 940 941/* The handler of foreign interrupt. */ 942.macro foreign_intr_handler mode:req 943 /* 944 * Update core local flags 945 */ 946 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 947 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 948 orr w1, w1, #THREAD_CLF_TMP 949 .ifc \mode\(),fiq 950 orr w1, w1, #THREAD_CLF_FIQ 951 .else 952 orr w1, w1, #THREAD_CLF_IRQ 953 .endif 954 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 955 956 /* get pointer to current thread context in x0 */ 957 get_thread_ctx sp, 0, 1, 2 958 /* Keep original SP_EL0 */ 959 mrs x2, sp_el0 960 961 /* Store original sp_el0 */ 962 str x2, [x0, #THREAD_CTX_REGS_SP] 963 /* store x4..x30 */ 964 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 965 /* Load original x0..x3 into x10..x13 */ 966 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 967 /* Save original x0..x3 */ 968 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 969 970 /* load tmp_stack_va_end */ 971 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 972 /* Switch to SP_EL0 */ 973 msr spsel, #0 974 mov sp, x1 975 976 /* 977 * Mark current thread as suspended 978 */ 979 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 980 mrs x1, spsr_el1 981 mrs x2, elr_el1 982 bl thread_state_suspend 983 mov w4, w0 /* Supply thread index */ 984 985 /* Update core local flags */ 986 /* Switch to SP_EL1 */ 987 msr spsel, #1 988 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 989 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 990 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 991 msr spsel, #0 992 993 /* 994 * Note that we're exiting with SP_EL0 selected since the entry 995 * functions expects to have SP_EL0 selected with the tmp stack 996 * set. 997 */ 998 999 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 1000 ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 1001 mov w2, #0 1002 mov w3, #0 1003 /* w4 is already filled in above */ 1004 smc #0 1005 b . /* SMC should not return */ 1006.endm 1007 1008/* 1009 * This struct is never used from C it's only here to visualize the 1010 * layout. 1011 * 1012 * struct elx_nintr_rec { 1013 * uint64_t x[19 - 4]; x4..x18 1014 * uint64_t lr; 1015 * uint64_t sp_el0; 1016 * }; 1017 */ 1018#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 1019#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 1020#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 1021#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 1022 1023/* The handler of native interrupt. */ 1024.macro native_intr_handler mode:req 1025 /* 1026 * Update core local flags 1027 */ 1028 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1029 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 1030 .ifc \mode\(),fiq 1031 orr w1, w1, #THREAD_CLF_FIQ 1032 .else 1033 orr w1, w1, #THREAD_CLF_IRQ 1034 .endif 1035 orr w1, w1, #THREAD_CLF_TMP 1036 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1037 1038 /* load tmp_stack_va_end */ 1039 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1040 /* Keep original SP_EL0 */ 1041 mrs x2, sp_el0 1042 /* Switch to SP_EL0 */ 1043 msr spsel, #0 1044 mov sp, x1 1045 1046 /* 1047 * Save registers on stack that can be corrupted by a call to 1048 * a C function 1049 */ 1050 /* Make room for struct elx_nintr_rec */ 1051 sub sp, sp, #ELX_NINTR_REC_SIZE 1052 /* Store x4..x18 */ 1053 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1054 /* Store lr and original sp_el0 */ 1055 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1056 1057 bl thread_check_canaries 1058 adr x16, thread_nintr_handler_ptr 1059 ldr x16, [x16] 1060 blr x16 1061 1062 /* 1063 * Restore registers 1064 */ 1065 /* Restore x4..x18 */ 1066 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1067 /* Load lr and original sp_el0 */ 1068 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1069 /* Restore SP_El0 */ 1070 mov sp, x2 1071 /* Switch back to SP_EL1 */ 1072 msr spsel, #1 1073 1074 /* Update core local flags */ 1075 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1076 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1077 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1078 1079 mrs x0, spsr_el1 1080 /* Restore x2..x3 */ 1081 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1082 b_if_spsr_is_el0 w0, 1f 1083 1084 /* Restore x0..x1 */ 1085 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1086 1087 /* Return from exception */ 1088 eret 10891: b eret_to_el0 1090.endm 1091 1092LOCAL_FUNC elx_irq , : 1093#if defined(CFG_ARM_GICV3) 1094 native_intr_handler irq 1095#else 1096 foreign_intr_handler irq 1097#endif 1098END_FUNC elx_irq 1099 1100LOCAL_FUNC elx_fiq , : 1101#if defined(CFG_ARM_GICV3) 1102 foreign_intr_handler fiq 1103#else 1104 native_intr_handler fiq 1105#endif 1106END_FUNC elx_fiq 1107