1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2017, Linaro Limited 4 */ 5 6#include <arm64_macros.S> 7#include <arm.h> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/thread_defs.h> 12#include <mm/core_mmu.h> 13#include <sm/optee_smc.h> 14#include <sm/teesmc_opteed.h> 15#include <sm/teesmc_opteed_macros.h> 16 17#include "thread_private.h" 18 19 .macro get_thread_ctx core_local, res, tmp0, tmp1 20 ldr w\tmp0, [\core_local, \ 21 #THREAD_CORE_LOCAL_CURR_THREAD] 22 adr x\res, threads 23 mov x\tmp1, #THREAD_CTX_SIZE 24 madd x\res, x\tmp0, x\tmp1, x\res 25 .endm 26 27 .macro b_if_spsr_is_el0 reg, label 28 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 29 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 30 b.eq \label 31 .endm 32 33LOCAL_FUNC vector_std_smc_entry , : 34 sub sp, sp, #THREAD_SMC_ARGS_SIZE 35 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 36 mov x0, sp 37 bl thread_handle_std_smc 38 /* 39 * Normally thread_handle_std_smc() should return via 40 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 41 * hasn't switched stack (error detected) it will do a normal "C" 42 * return. 43 */ 44 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 45 add sp, sp, #THREAD_SMC_ARGS_SIZE 46 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 47 smc #0 48 b . /* SMC should not return */ 49END_FUNC vector_std_smc_entry 50 51LOCAL_FUNC vector_fast_smc_entry , : 52 sub sp, sp, #THREAD_SMC_ARGS_SIZE 53 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 54 mov x0, sp 55 bl thread_handle_fast_smc 56 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 57 add sp, sp, #THREAD_SMC_ARGS_SIZE 58 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 59 smc #0 60 b . /* SMC should not return */ 61END_FUNC vector_fast_smc_entry 62 63LOCAL_FUNC vector_fiq_entry , : 64 /* Secure Monitor received a FIQ and passed control to us. */ 65 bl thread_check_canaries 66 adr x16, thread_nintr_handler_ptr 67 ldr x16, [x16] 68 blr x16 69 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 70 smc #0 71 b . /* SMC should not return */ 72END_FUNC vector_fiq_entry 73 74LOCAL_FUNC vector_cpu_on_entry , : 75 adr x16, thread_cpu_on_handler_ptr 76 ldr x16, [x16] 77 blr x16 78 mov x1, x0 79 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 80 smc #0 81 b . /* SMC should not return */ 82END_FUNC vector_cpu_on_entry 83 84LOCAL_FUNC vector_cpu_off_entry , : 85 adr x16, thread_cpu_off_handler_ptr 86 ldr x16, [x16] 87 blr x16 88 mov x1, x0 89 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 90 smc #0 91 b . /* SMC should not return */ 92END_FUNC vector_cpu_off_entry 93 94LOCAL_FUNC vector_cpu_suspend_entry , : 95 adr x16, thread_cpu_suspend_handler_ptr 96 ldr x16, [x16] 97 blr x16 98 mov x1, x0 99 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 100 smc #0 101 b . /* SMC should not return */ 102END_FUNC vector_cpu_suspend_entry 103 104LOCAL_FUNC vector_cpu_resume_entry , : 105 adr x16, thread_cpu_resume_handler_ptr 106 ldr x16, [x16] 107 blr x16 108 mov x1, x0 109 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 110 smc #0 111 b . /* SMC should not return */ 112END_FUNC vector_cpu_resume_entry 113 114LOCAL_FUNC vector_system_off_entry , : 115 adr x16, thread_system_off_handler_ptr 116 ldr x16, [x16] 117 blr x16 118 mov x1, x0 119 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 120 smc #0 121 b . /* SMC should not return */ 122END_FUNC vector_system_off_entry 123 124LOCAL_FUNC vector_system_reset_entry , : 125 adr x16, thread_system_reset_handler_ptr 126 ldr x16, [x16] 127 blr x16 128 mov x1, x0 129 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 130 smc #0 131 b . /* SMC should not return */ 132END_FUNC vector_system_reset_entry 133 134/* 135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 136 * initialization. 137 * 138 * Note that ARM-TF depends on the layout of this vector table, any change 139 * in layout has to be synced with ARM-TF. 140 */ 141FUNC thread_vector_table , : 142 b vector_std_smc_entry 143 b vector_fast_smc_entry 144 b vector_cpu_on_entry 145 b vector_cpu_off_entry 146 b vector_cpu_resume_entry 147 b vector_cpu_suspend_entry 148 b vector_fiq_entry 149 b vector_system_off_entry 150 b vector_system_reset_entry 151END_FUNC thread_vector_table 152KEEP_PAGER thread_vector_table 153 154 155/* void thread_resume(struct thread_ctx_regs *regs) */ 156FUNC thread_resume , : 157 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 158 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 159 mov sp, x1 160 msr elr_el1, x2 161 msr spsr_el1, x3 162 163 b_if_spsr_is_el0 w3, 1f 164 165 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 166 ldr x0, [x0, THREAD_CTX_REGS_X0] 167 eret 168 1691: load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 170 ldr x0, [x0, THREAD_CTX_REGS_X0] 171 172 msr spsel, #1 173 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 174 b eret_to_el0 175END_FUNC thread_resume 176 177FUNC thread_std_smc_entry , : 178 /* pass x0-x7 in a struct thread_smc_args */ 179 sub sp, sp, #THREAD_SMC_ARGS_SIZE 180 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 181 mov x0, sp 182 183 /* Call the registered handler */ 184 bl __thread_std_smc_entry 185 186 /* 187 * Load the returned x0-x3 into preserved registers and skip the 188 * "returned" x4-x7 since they will not be returned to normal 189 * world. 190 */ 191 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23 192 add sp, sp, #THREAD_SMC_ARGS_SIZE 193 194 /* Mask all maskable exceptions before switching to temporary stack */ 195 msr daifset, #DAIFBIT_ALL 196 bl thread_get_tmp_sp 197 mov sp, x0 198 199 bl thread_state_free 200 201 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 202 mov x1, x20 203 mov x2, x21 204 mov x3, x22 205 mov x4, x23 206 smc #0 207 b . /* SMC should not return */ 208END_FUNC thread_std_smc_entry 209 210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 211FUNC thread_rpc , : 212 /* Read daif and create an SPSR */ 213 mrs x1, daif 214 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 215 216 /* Mask all maskable exceptions before switching to temporary stack */ 217 msr daifset, #DAIFBIT_ALL 218 push x0, xzr 219 push x1, x30 220 bl thread_get_ctx_regs 221 ldr x30, [sp, #8] 222 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30 223 mov x19, x0 224 225 bl thread_get_tmp_sp 226 pop x1, xzr /* Match "push x1, x30" above */ 227 mov x2, sp 228 str x2, [x19, #THREAD_CTX_REGS_SP] 229 ldr x20, [sp] /* Get pointer to rv[] */ 230 mov sp, x0 /* Switch to tmp stack */ 231 232 adr x2, .thread_rpc_return 233 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 234 bl thread_state_suspend 235 mov x4, x0 /* Supply thread index */ 236 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 237 load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */ 238 smc #0 239 b . /* SMC should not return */ 240 241.thread_rpc_return: 242 /* 243 * At this point has the stack pointer been restored to the value 244 * stored in THREAD_CTX above. 245 * 246 * Jumps here from thread_resume above when RPC has returned. The 247 * IRQ and FIQ bits are restored to what they where when this 248 * function was originally entered. 249 */ 250 pop x16, xzr /* Get pointer to rv[] */ 251 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ 252 ret 253END_FUNC thread_rpc 254KEEP_PAGER thread_rpc 255 256FUNC thread_smc , : 257 smc #0 258 ret 259END_FUNC thread_smc 260 261FUNC thread_init_vbar , : 262 msr vbar_el1, x0 263 ret 264END_FUNC thread_init_vbar 265KEEP_PAGER thread_init_vbar 266 267/* 268 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 269 * unsigned long a2, unsigned long a3, unsigned long user_sp, 270 * unsigned long user_func, unsigned long spsr, 271 * uint32_t *exit_status0, uint32_t *exit_status1) 272 * 273 */ 274FUNC __thread_enter_user_mode , : 275 ldr x8, [sp] 276 /* 277 * Create the and fill in the struct thread_user_mode_rec 278 */ 279 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 280 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8 281 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 282 283 /* 284 * Switch to SP_EL1 285 * Disable exceptions 286 * Save kern sp in x19 287 */ 288 msr daifset, #DAIFBIT_ALL 289 mov x19, sp 290 msr spsel, #1 291 292 /* 293 * Save the kernel stack pointer in the thread context 294 */ 295 /* get pointer to current thread context */ 296 get_thread_ctx sp, 21, 20, 22 297 /* 298 * Save kernel stack pointer to ensure that el0_svc() uses 299 * correct stack pointer 300 */ 301 str x19, [x21, #THREAD_CTX_KERN_SP] 302 303 /* 304 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 305 */ 306 msr spsr_el1, x6 307 /* Set user sp */ 308 mov x13, x4 /* Used when running TA in Aarch32 */ 309 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 310 /* Set user function */ 311 msr elr_el1, x5 312 /* Set frame pointer (user stack can't be unwound past this point) */ 313 mov x29, #0 314 315 /* Jump into user mode */ 316 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 317 b eret_to_el0 318END_FUNC __thread_enter_user_mode 319 320/* 321 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 322 * uint32_t exit_status1); 323 * See description in thread.h 324 */ 325FUNC thread_unwind_user_mode , : 326 /* Store the exit status */ 327 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR] 328 str w1, [x3] 329 str w2, [x4] 330 /* Restore x19..x30 */ 331 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 332 add sp, sp, #THREAD_USER_MODE_REC_SIZE 333 /* Return from the call of thread_enter_user_mode() */ 334 ret 335END_FUNC thread_unwind_user_mode 336 337 /* 338 * This macro verifies that the a given vector doesn't exceed the 339 * architectural limit of 32 instructions. This is meant to be placed 340 * immedately after the last instruction in the vector. It takes the 341 * vector entry as the parameter 342 */ 343 .macro check_vector_size since 344 .if (. - \since) > (32 * 4) 345 .error "Vector exceeds 32 instructions" 346 .endif 347 .endm 348 349 .macro restore_mapping 350#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 351 /* Temporarily save x0, x1 */ 352 msr tpidr_el1, x0 353 msr tpidrro_el0, x1 354 355 /* Update the mapping to use the full kernel mapping */ 356 mrs x0, ttbr0_el1 357 sub x0, x0, #CORE_MMU_L1_TBL_OFFSET 358 /* switch to kernel mode ASID */ 359 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 360 msr ttbr0_el1, x0 361 isb 362 363 /* Jump into the full mapping and continue execution */ 364 ldr x0, =1f 365 br x0 366 1: 367 368 /* Point to the vector into the full mapping */ 369 adr x0, thread_user_kcode_offset 370 ldr x0, [x0] 371 mrs x1, vbar_el1 372 add x1, x1, x0 373 msr vbar_el1, x1 374 isb 375 376 /* Restore x0, x1 */ 377 mrs x0, tpidr_el1 378 mrs x1, tpidrro_el0 379 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 380#else 381 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 382 mrs x0, ttbr0_el1 383 /* switch to kernel mode ASID */ 384 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 385 msr ttbr0_el1, x0 386 isb 387#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 388 .endm 389 390#define INV_INSN 0 391 .section .text.thread_excp_vect 392 .align 11, INV_INSN 393FUNC thread_excp_vect , : 394 /* ----------------------------------------------------- 395 * EL1 with SP0 : 0x0 - 0x180 396 * ----------------------------------------------------- 397 */ 398 .align 7, INV_INSN 399el1_sync_sp0: 400 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 401 b el1_sync_abort 402 check_vector_size el1_sync_sp0 403 404 .align 7, INV_INSN 405el1_irq_sp0: 406 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 407 b elx_irq 408 check_vector_size el1_irq_sp0 409 410 .align 7, INV_INSN 411el1_fiq_sp0: 412 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 413 b elx_fiq 414 check_vector_size el1_fiq_sp0 415 416 .align 7, INV_INSN 417el1_serror_sp0: 418 b el1_serror_sp0 419 check_vector_size el1_serror_sp0 420 421 /* ----------------------------------------------------- 422 * Current EL with SP1: 0x200 - 0x380 423 * ----------------------------------------------------- 424 */ 425 .align 7, INV_INSN 426el1_sync_sp1: 427 b el1_sync_sp1 428 check_vector_size el1_sync_sp1 429 430 .align 7, INV_INSN 431el1_irq_sp1: 432 b el1_irq_sp1 433 check_vector_size el1_irq_sp1 434 435 .align 7, INV_INSN 436el1_fiq_sp1: 437 b el1_fiq_sp1 438 check_vector_size el1_fiq_sp1 439 440 .align 7, INV_INSN 441el1_serror_sp1: 442 b el1_serror_sp1 443 check_vector_size el1_serror_sp1 444 445 /* ----------------------------------------------------- 446 * Lower EL using AArch64 : 0x400 - 0x580 447 * ----------------------------------------------------- 448 */ 449 .align 7, INV_INSN 450el0_sync_a64: 451 restore_mapping 452 453 mrs x2, esr_el1 454 mrs x3, sp_el0 455 lsr x2, x2, #ESR_EC_SHIFT 456 cmp x2, #ESR_EC_AARCH64_SVC 457 b.eq el0_svc 458 b el0_sync_abort 459 check_vector_size el0_sync_a64 460 461 .align 7, INV_INSN 462el0_irq_a64: 463 restore_mapping 464 465 b elx_irq 466 check_vector_size el0_irq_a64 467 468 .align 7, INV_INSN 469el0_fiq_a64: 470 restore_mapping 471 472 b elx_fiq 473 check_vector_size el0_fiq_a64 474 475 .align 7, INV_INSN 476el0_serror_a64: 477 b el0_serror_a64 478 check_vector_size el0_serror_a64 479 480 /* ----------------------------------------------------- 481 * Lower EL using AArch32 : 0x0 - 0x180 482 * ----------------------------------------------------- 483 */ 484 .align 7, INV_INSN 485el0_sync_a32: 486 restore_mapping 487 488 mrs x2, esr_el1 489 mrs x3, sp_el0 490 lsr x2, x2, #ESR_EC_SHIFT 491 cmp x2, #ESR_EC_AARCH32_SVC 492 b.eq el0_svc 493 b el0_sync_abort 494 check_vector_size el0_sync_a32 495 496 .align 7, INV_INSN 497el0_irq_a32: 498 restore_mapping 499 500 b elx_irq 501 check_vector_size el0_irq_a32 502 503 .align 7, INV_INSN 504el0_fiq_a32: 505 restore_mapping 506 507 b elx_fiq 508 check_vector_size el0_fiq_a32 509 510 .align 7, INV_INSN 511el0_serror_a32: 512 b el0_serror_a32 513 check_vector_size el0_serror_a32 514 515#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 516 .macro invalidate_branch_predictor 517 ic iallu 518 isb 519 .endm 520 521 .align 11, INV_INSN 522 .global thread_excp_vect_workaround 523thread_excp_vect_workaround: 524 /* ----------------------------------------------------- 525 * EL1 with SP0 : 0x0 - 0x180 526 * ----------------------------------------------------- 527 */ 528 .align 7, INV_INSN 529workaround_el1_sync_sp0: 530 b el1_sync_sp0 531 check_vector_size workaround_el1_sync_sp0 532 533 .align 7, INV_INSN 534workaround_el1_irq_sp0: 535 b el1_irq_sp0 536 check_vector_size workaround_el1_irq_sp0 537 538 .align 7, INV_INSN 539workaround_el1_fiq_sp0: 540 b el1_fiq_sp0 541 check_vector_size workaround_el1_fiq_sp0 542 543 .align 7, INV_INSN 544workaround_el1_serror_sp0: 545 b el1_serror_sp0 546 check_vector_size workaround_el1_serror_sp0 547 548 /* ----------------------------------------------------- 549 * Current EL with SP1: 0x200 - 0x380 550 * ----------------------------------------------------- 551 */ 552 .align 7, INV_INSN 553workaround_el1_sync_sp1: 554 b workaround_el1_sync_sp1 555 check_vector_size workaround_el1_sync_sp1 556 557 .align 7, INV_INSN 558workaround_el1_irq_sp1: 559 b workaround_el1_irq_sp1 560 check_vector_size workaround_el1_irq_sp1 561 562 .align 7, INV_INSN 563workaround_el1_fiq_sp1: 564 b workaround_el1_fiq_sp1 565 check_vector_size workaround_el1_fiq_sp1 566 567 .align 7, INV_INSN 568workaround_el1_serror_sp1: 569 b workaround_el1_serror_sp1 570 check_vector_size workaround_el1_serror_sp1 571 572 /* ----------------------------------------------------- 573 * Lower EL using AArch64 : 0x400 - 0x580 574 * ----------------------------------------------------- 575 */ 576 .align 7, INV_INSN 577workaround_el0_sync_a64: 578 invalidate_branch_predictor 579 b el0_sync_a64 580 check_vector_size workaround_el0_sync_a64 581 582 .align 7, INV_INSN 583workaround_el0_irq_a64: 584 invalidate_branch_predictor 585 b el0_irq_a64 586 check_vector_size workaround_el0_irq_a64 587 588 .align 7, INV_INSN 589workaround_el0_fiq_a64: 590 invalidate_branch_predictor 591 b el0_fiq_a64 592 check_vector_size workaround_el0_fiq_a64 593 594 .align 7, INV_INSN 595workaround_el0_serror_a64: 596 b workaround_el0_serror_a64 597 check_vector_size workaround_el0_serror_a64 598 599 /* ----------------------------------------------------- 600 * Lower EL using AArch32 : 0x0 - 0x180 601 * ----------------------------------------------------- 602 */ 603 .align 7, INV_INSN 604workaround_el0_sync_a32: 605 invalidate_branch_predictor 606 b el0_sync_a32 607 check_vector_size workaround_el0_sync_a32 608 609 .align 7, INV_INSN 610workaround_el0_irq_a32: 611 invalidate_branch_predictor 612 b el0_irq_a32 613 check_vector_size workaround_el0_irq_a32 614 615 .align 7, INV_INSN 616workaround_el0_fiq_a32: 617 invalidate_branch_predictor 618 b el0_fiq_a32 619 check_vector_size workaround_el0_fiq_a32 620 621 .align 7, INV_INSN 622workaround_el0_serror_a32: 623 b workaround_el0_serror_a32 624 check_vector_size workaround_el0_serror_a32 625#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 626 627/* 628 * We're keeping this code in the same section as the vector to make sure 629 * that it's always available. 630 */ 631eret_to_el0: 632 633#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 634 /* Point to the vector into the reduced mapping */ 635 adr x0, thread_user_kcode_offset 636 ldr x0, [x0] 637 mrs x1, vbar_el1 638 sub x1, x1, x0 639 msr vbar_el1, x1 640 isb 641 642 /* Jump into the reduced mapping and continue execution */ 643 ldr x1, =1f 644 sub x1, x1, x0 645 br x1 6461: 647 648 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 649 msr tpidr_el1, x0 650 651 /* Update the mapping to exclude the full kernel mapping */ 652 mrs x0, ttbr0_el1 653 add x0, x0, #CORE_MMU_L1_TBL_OFFSET 654 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 655 msr ttbr0_el1, x0 656 isb 657 658 mrs x0, tpidr_el1 659#else 660 mrs x0, ttbr0_el1 661 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 662 msr ttbr0_el1, x0 663 isb 664 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 665#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 666 667 eret 668 669END_FUNC thread_excp_vect 670 671LOCAL_FUNC el0_svc , : 672 /* get pointer to current thread context in x0 */ 673 get_thread_ctx sp, 0, 1, 2 674 /* load saved kernel sp */ 675 ldr x0, [x0, #THREAD_CTX_KERN_SP] 676 /* Keep pointer to initial recod in x1 */ 677 mov x1, sp 678 /* Switch to SP_EL0 and restore kernel sp */ 679 msr spsel, #0 680 mov x2, sp /* Save SP_EL0 */ 681 mov sp, x0 682 683 /* Make room for struct thread_svc_regs */ 684 sub sp, sp, #THREAD_SVC_REG_SIZE 685 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 686 687 /* Restore x0-x3 */ 688 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 689 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 690 691 /* Prepare the argument for the handler */ 692 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 693 mrs x0, elr_el1 694 mrs x1, spsr_el1 695 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 696 mov x0, sp 697 698 /* 699 * Unmask native interrupts, Serror, and debug exceptions since we have 700 * nothing left in sp_el1. Note that the SVC handler is excepted to 701 * re-enable foreign interrupts by itself. 702 */ 703#if defined(CFG_ARM_GICV3) 704 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 705#else 706 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 707#endif 708 709 /* Call the handler */ 710 bl tee_svc_handler 711 712 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 713 msr daifset, #DAIFBIT_ALL 714 715 /* 716 * Save kernel sp we'll had at the beginning of this function. 717 * This is when this TA has called another TA because 718 * __thread_enter_user_mode() also saves the stack pointer in this 719 * field. 720 */ 721 msr spsel, #1 722 get_thread_ctx sp, 0, 1, 2 723 msr spsel, #0 724 add x1, sp, #THREAD_SVC_REG_SIZE 725 str x1, [x0, #THREAD_CTX_KERN_SP] 726 727 /* Restore registers to the required state and return*/ 728 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 729 msr elr_el1, x0 730 msr spsr_el1, x1 731 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 732 mov x30, sp 733 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 734 mov sp, x0 735 b_if_spsr_is_el0 w1, 1f 736 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 737 ldr x30, [x30, #THREAD_SVC_REG_X30] 738 739 eret 740 7411: ldp x0, x1, [x30, THREAD_SVC_REG_X0] 742 ldr x30, [x30, #THREAD_SVC_REG_X30] 743 744 msr spsel, #1 745 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 746 b eret_to_el0 747END_FUNC el0_svc 748 749LOCAL_FUNC el1_sync_abort , : 750 mov x0, sp 751 msr spsel, #0 752 mov x3, sp /* Save original sp */ 753 754 /* 755 * Update core local flags. 756 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 757 */ 758 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 759 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 760 orr w1, w1, #THREAD_CLF_ABORT 761 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 762 .Lsel_tmp_sp 763 764 /* Select abort stack */ 765 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 766 b .Lset_sp 767 768.Lsel_tmp_sp: 769 /* Select tmp stack */ 770 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 771 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 772 773.Lset_sp: 774 mov sp, x2 775 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 776 777 /* 778 * Save state on stack 779 */ 780 sub sp, sp, #THREAD_ABT_REGS_SIZE 781 mrs x2, spsr_el1 782 /* Store spsr, sp_el0 */ 783 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 784 /* Store original x0, x1 */ 785 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 786 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 787 /* Store original x2, x3 and x4 to x29 */ 788 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 789 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 790 /* Store x30, elr_el1 */ 791 mrs x0, elr_el1 792 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 793 794 /* 795 * Call handler 796 */ 797 mov x0, #0 798 mov x1, sp 799 bl abort_handler 800 801 /* 802 * Restore state from stack 803 */ 804 /* Load x30, elr_el1 */ 805 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 806 msr elr_el1, x0 807 /* Load x0 to x29 */ 808 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 809 /* Switch to SP_EL1 */ 810 msr spsel, #1 811 /* Save x0 to x3 in CORE_LOCAL */ 812 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 813 /* Restore spsr_el1 and sp_el0 */ 814 mrs x3, sp_el0 815 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 816 msr spsr_el1, x0 817 msr sp_el0, x1 818 819 /* Update core local flags */ 820 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 821 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 822 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 823 824 /* Restore x0 to x3 */ 825 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 826 827 /* Return from exception */ 828 eret 829END_FUNC el1_sync_abort 830 831 /* sp_el0 in x3 */ 832LOCAL_FUNC el0_sync_abort , : 833 /* 834 * Update core local flags 835 */ 836 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 837 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 838 orr w1, w1, #THREAD_CLF_ABORT 839 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 840 841 /* 842 * Save state on stack 843 */ 844 845 /* load abt_stack_va_end */ 846 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 847 /* Keep pointer to initial record in x0 */ 848 mov x0, sp 849 /* Switch to SP_EL0 */ 850 msr spsel, #0 851 mov sp, x1 852 sub sp, sp, #THREAD_ABT_REGS_SIZE 853 mrs x2, spsr_el1 854 /* Store spsr, sp_el0 */ 855 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 856 /* Store original x0, x1 */ 857 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 858 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 859 /* Store original x2, x3 and x4 to x29 */ 860 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 861 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 862 /* Store x30, elr_el1 */ 863 mrs x0, elr_el1 864 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 865 866 /* 867 * Call handler 868 */ 869 mov x0, #0 870 mov x1, sp 871 bl abort_handler 872 873 /* 874 * Restore state from stack 875 */ 876 877 /* Load x30, elr_el1 */ 878 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 879 msr elr_el1, x0 880 /* Load x0 to x29 */ 881 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 882 /* Switch to SP_EL1 */ 883 msr spsel, #1 884 /* Save x0 to x3 in EL1_REC */ 885 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 886 /* Restore spsr_el1 and sp_el0 */ 887 mrs x3, sp_el0 888 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 889 msr spsr_el1, x0 890 msr sp_el0, x1 891 892 /* Update core local flags */ 893 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 894 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 895 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 896 897 /* Restore x2 to x3 */ 898 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 899 900 b_if_spsr_is_el0 w0, 1f 901 902 /* Restore x0 to x1 */ 903 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 904 905 /* Return from exception */ 906 eret 9071: b eret_to_el0 908END_FUNC el0_sync_abort 909 910/* The handler of foreign interrupt. */ 911.macro foreign_intr_handler mode:req 912 /* 913 * Update core local flags 914 */ 915 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 916 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 917 orr w1, w1, #THREAD_CLF_TMP 918 .ifc \mode\(),fiq 919 orr w1, w1, #THREAD_CLF_FIQ 920 .else 921 orr w1, w1, #THREAD_CLF_IRQ 922 .endif 923 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 924 925 /* get pointer to current thread context in x0 */ 926 get_thread_ctx sp, 0, 1, 2 927 /* Keep original SP_EL0 */ 928 mrs x2, sp_el0 929 930 /* Store original sp_el0 */ 931 str x2, [x0, #THREAD_CTX_REGS_SP] 932 /* store x4..x30 */ 933 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 934 /* Load original x0..x3 into x10..x13 */ 935 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 936 /* Save original x0..x3 */ 937 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 938 939 /* load tmp_stack_va_end */ 940 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 941 /* Switch to SP_EL0 */ 942 msr spsel, #0 943 mov sp, x1 944 945 /* 946 * Mark current thread as suspended 947 */ 948 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 949 mrs x1, spsr_el1 950 mrs x2, elr_el1 951 bl thread_state_suspend 952 mov w4, w0 /* Supply thread index */ 953 954 /* Update core local flags */ 955 /* Switch to SP_EL1 */ 956 msr spsel, #1 957 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 958 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 959 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 960 msr spsel, #0 961 962 /* 963 * Note that we're exiting with SP_EL0 selected since the entry 964 * functions expects to have SP_EL0 selected with the tmp stack 965 * set. 966 */ 967 968 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 969 ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 970 mov w2, #0 971 mov w3, #0 972 /* w4 is already filled in above */ 973 smc #0 974 b . /* SMC should not return */ 975.endm 976 977/* 978 * This struct is never used from C it's only here to visualize the 979 * layout. 980 * 981 * struct elx_nintr_rec { 982 * uint64_t x[19 - 4]; x4..x18 983 * uint64_t lr; 984 * uint64_t sp_el0; 985 * }; 986 */ 987#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 988#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 989#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 990#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 991 992/* The handler of native interrupt. */ 993.macro native_intr_handler mode:req 994 /* 995 * Update core local flags 996 */ 997 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 998 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 999 .ifc \mode\(),fiq 1000 orr w1, w1, #THREAD_CLF_FIQ 1001 .else 1002 orr w1, w1, #THREAD_CLF_IRQ 1003 .endif 1004 orr w1, w1, #THREAD_CLF_TMP 1005 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1006 1007 /* load tmp_stack_va_end */ 1008 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1009 /* Keep original SP_EL0 */ 1010 mrs x2, sp_el0 1011 /* Switch to SP_EL0 */ 1012 msr spsel, #0 1013 mov sp, x1 1014 1015 /* 1016 * Save registers on stack that can be corrupted by a call to 1017 * a C function 1018 */ 1019 /* Make room for struct elx_nintr_rec */ 1020 sub sp, sp, #ELX_NINTR_REC_SIZE 1021 /* Store x4..x18 */ 1022 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1023 /* Store lr and original sp_el0 */ 1024 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1025 1026 bl thread_check_canaries 1027 adr x16, thread_nintr_handler_ptr 1028 ldr x16, [x16] 1029 blr x16 1030 1031 /* 1032 * Restore registers 1033 */ 1034 /* Restore x4..x18 */ 1035 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1036 /* Load lr and original sp_el0 */ 1037 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1038 /* Restore SP_El0 */ 1039 mov sp, x2 1040 /* Switch back to SP_EL1 */ 1041 msr spsel, #1 1042 1043 /* Update core local flags */ 1044 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1045 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1046 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1047 1048 mrs x0, spsr_el1 1049 /* Restore x2..x3 */ 1050 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1051 b_if_spsr_is_el0 w0, 1f 1052 1053 /* Restore x0..x1 */ 1054 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1055 1056 /* Return from exception */ 1057 eret 10581: b eret_to_el0 1059.endm 1060 1061LOCAL_FUNC elx_irq , : 1062#if defined(CFG_ARM_GICV3) 1063 native_intr_handler irq 1064#else 1065 foreign_intr_handler irq 1066#endif 1067END_FUNC elx_irq 1068 1069LOCAL_FUNC elx_fiq , : 1070#if defined(CFG_ARM_GICV3) 1071 foreign_intr_handler fiq 1072#else 1073 native_intr_handler fiq 1074#endif 1075END_FUNC elx_fiq 1076