1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2017, Linaro Limited 4 */ 5 6#include <arm.h> 7#include <arm64_macros.S> 8#include <asm-defines.h> 9#include <asm.S> 10#include <keep.h> 11#include <kernel/thread_defs.h> 12#include <mm/core_mmu.h> 13#include <sm/optee_smc.h> 14#include <sm/teesmc_opteed.h> 15#include <sm/teesmc_opteed_macros.h> 16 17#include "thread_private.h" 18 19 .macro get_thread_ctx core_local, res, tmp0, tmp1 20 ldr w\tmp0, [\core_local, \ 21 #THREAD_CORE_LOCAL_CURR_THREAD] 22 adr x\res, threads 23 mov x\tmp1, #THREAD_CTX_SIZE 24 madd x\res, x\tmp0, x\tmp1, x\res 25 .endm 26 27 .macro b_if_spsr_is_el0 reg, label 28 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 29 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 30 b.eq \label 31 .endm 32 33LOCAL_FUNC vector_std_smc_entry , : 34 sub sp, sp, #THREAD_SMC_ARGS_SIZE 35 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 36 mov x0, sp 37 bl thread_handle_std_smc 38 /* 39 * Normally thread_handle_std_smc() should return via 40 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 41 * hasn't switched stack (error detected) it will do a normal "C" 42 * return. 43 */ 44 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 45 add sp, sp, #THREAD_SMC_ARGS_SIZE 46 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 47 smc #0 48 b . /* SMC should not return */ 49END_FUNC vector_std_smc_entry 50 51LOCAL_FUNC vector_fast_smc_entry , : 52 sub sp, sp, #THREAD_SMC_ARGS_SIZE 53 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 54 mov x0, sp 55 bl thread_handle_fast_smc 56 load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8 57 add sp, sp, #THREAD_SMC_ARGS_SIZE 58 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 59 smc #0 60 b . /* SMC should not return */ 61END_FUNC vector_fast_smc_entry 62 63LOCAL_FUNC vector_fiq_entry , : 64 /* Secure Monitor received a FIQ and passed control to us. */ 65 bl thread_check_canaries 66 adr x16, thread_nintr_handler_ptr 67 ldr x16, [x16] 68 blr x16 69 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 70 smc #0 71 b . /* SMC should not return */ 72END_FUNC vector_fiq_entry 73 74LOCAL_FUNC vector_cpu_on_entry , : 75 adr x16, thread_cpu_on_handler_ptr 76 ldr x16, [x16] 77 blr x16 78 mov x1, x0 79 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 80 smc #0 81 b . /* SMC should not return */ 82END_FUNC vector_cpu_on_entry 83 84LOCAL_FUNC vector_cpu_off_entry , : 85 adr x16, thread_cpu_off_handler_ptr 86 ldr x16, [x16] 87 blr x16 88 mov x1, x0 89 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 90 smc #0 91 b . /* SMC should not return */ 92END_FUNC vector_cpu_off_entry 93 94LOCAL_FUNC vector_cpu_suspend_entry , : 95 adr x16, thread_cpu_suspend_handler_ptr 96 ldr x16, [x16] 97 blr x16 98 mov x1, x0 99 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 100 smc #0 101 b . /* SMC should not return */ 102END_FUNC vector_cpu_suspend_entry 103 104LOCAL_FUNC vector_cpu_resume_entry , : 105 adr x16, thread_cpu_resume_handler_ptr 106 ldr x16, [x16] 107 blr x16 108 mov x1, x0 109 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 110 smc #0 111 b . /* SMC should not return */ 112END_FUNC vector_cpu_resume_entry 113 114LOCAL_FUNC vector_system_off_entry , : 115 adr x16, thread_system_off_handler_ptr 116 ldr x16, [x16] 117 blr x16 118 mov x1, x0 119 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 120 smc #0 121 b . /* SMC should not return */ 122END_FUNC vector_system_off_entry 123 124LOCAL_FUNC vector_system_reset_entry , : 125 adr x16, thread_system_reset_handler_ptr 126 ldr x16, [x16] 127 blr x16 128 mov x1, x0 129 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 130 smc #0 131 b . /* SMC should not return */ 132END_FUNC vector_system_reset_entry 133 134/* 135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 136 * initialization. 137 * 138 * Note that ARM-TF depends on the layout of this vector table, any change 139 * in layout has to be synced with ARM-TF. 140 */ 141FUNC thread_vector_table , : 142 b vector_std_smc_entry 143 b vector_fast_smc_entry 144 b vector_cpu_on_entry 145 b vector_cpu_off_entry 146 b vector_cpu_resume_entry 147 b vector_cpu_suspend_entry 148 b vector_fiq_entry 149 b vector_system_off_entry 150 b vector_system_reset_entry 151END_FUNC thread_vector_table 152KEEP_PAGER thread_vector_table 153 154 155/* void thread_resume(struct thread_ctx_regs *regs) */ 156FUNC thread_resume , : 157 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 158 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 159 mov sp, x1 160 msr elr_el1, x2 161 msr spsr_el1, x3 162 163 b_if_spsr_is_el0 w3, 1f 164 165 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 166 ldr x0, [x0, THREAD_CTX_REGS_X0] 167 eret 168 1691: load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 170 ldr x0, [x0, THREAD_CTX_REGS_X0] 171 172 msr spsel, #1 173 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 174 b eret_to_el0 175END_FUNC thread_resume 176 177FUNC thread_std_smc_entry , : 178 /* pass x0-x7 in a struct thread_smc_args */ 179 sub sp, sp, #THREAD_SMC_ARGS_SIZE 180 store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7 181 mov x0, sp 182 183 /* Call the registered handler */ 184 bl __thread_std_smc_entry 185 186 /* 187 * Load the returned x0-x3 into preserved registers and skip the 188 * "returned" x4-x7 since they will not be returned to normal 189 * world. 190 */ 191 load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23 192 add sp, sp, #THREAD_SMC_ARGS_SIZE 193 194 /* Mask all maskable exceptions before switching to temporary stack */ 195 msr daifset, #DAIFBIT_ALL 196 bl thread_get_tmp_sp 197 mov sp, x0 198 199 bl thread_state_free 200 201 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 202 mov x1, x20 203 mov x2, x21 204 mov x3, x22 205 mov x4, x23 206 smc #0 207 b . /* SMC should not return */ 208END_FUNC thread_std_smc_entry 209 210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 211FUNC thread_rpc , : 212 /* Read daif and create an SPSR */ 213 mrs x1, daif 214 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 215 216 /* Mask all maskable exceptions before switching to temporary stack */ 217 msr daifset, #DAIFBIT_ALL 218 push x0, xzr 219 push x1, x30 220 bl thread_get_ctx_regs 221 ldr x30, [sp, #8] 222 store_xregs x0, THREAD_CTX_REGS_X19, 19, 30 223 mov x19, x0 224 225 bl thread_get_tmp_sp 226 pop x1, xzr /* Match "push x1, x30" above */ 227 mov x2, sp 228 str x2, [x19, #THREAD_CTX_REGS_SP] 229 ldr x20, [sp] /* Get pointer to rv[] */ 230 mov sp, x0 /* Switch to tmp stack */ 231 232 adr x2, .thread_rpc_return 233 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 234 bl thread_state_suspend 235 mov x4, x0 /* Supply thread index */ 236 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 237 load_wregs x20, 0, 1, 3 /* Load rv[] into w0-w2 */ 238 smc #0 239 b . /* SMC should not return */ 240 241.thread_rpc_return: 242 /* 243 * At this point has the stack pointer been restored to the value 244 * stored in THREAD_CTX above. 245 * 246 * Jumps here from thread_resume above when RPC has returned. The 247 * IRQ and FIQ bits are restored to what they where when this 248 * function was originally entered. 249 */ 250 pop x16, xzr /* Get pointer to rv[] */ 251 store_wregs x16, 0, 0, 5 /* Store w0-w5 into rv[] */ 252 ret 253END_FUNC thread_rpc 254KEEP_PAGER thread_rpc 255 256FUNC thread_init_vbar , : 257#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 258 /* 259 * For unrecognized CPUs we fall back to the vector used for 260 * unaffected CPUs. 261 */ 262 mrs x1, midr_el1 263 ubfx x2, x1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 264 cmp x2, #MIDR_IMPLEMENTER_ARM 265 b.ne 1f 266 267 adr x0, workaround_vect_table 268 ubfx x2, x1, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 269 #MIDR_PRIMARY_PART_NUM_WIDTH 270 cmp x2, #CORTEX_A57_PART_NUM 271 b.eq 2f 272 cmp x2, #CORTEX_A72_PART_NUM 273 b.eq 2f 274 cmp x2, #CORTEX_A73_PART_NUM 275 b.eq 2f 276 cmp x2, #CORTEX_A75_PART_NUM 277 b.eq 2f 278#endif 2791: adr x0, thread_vect_table 2802: msr vbar_el1, x0 281 ret 282END_FUNC thread_init_vbar 283KEEP_PAGER thread_init_vbar 284 285/* 286 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 287 * unsigned long a2, unsigned long a3, unsigned long user_sp, 288 * unsigned long user_func, unsigned long spsr, 289 * uint32_t *exit_status0, uint32_t *exit_status1) 290 * 291 */ 292FUNC __thread_enter_user_mode , : 293 ldr x8, [sp] 294 /* 295 * Create the and fill in the struct thread_user_mode_rec 296 */ 297 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 298 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8 299 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 300 301 /* 302 * Switch to SP_EL1 303 * Disable exceptions 304 * Save kern sp in x19 305 */ 306 msr daifset, #DAIFBIT_ALL 307 mov x19, sp 308 msr spsel, #1 309 310 /* 311 * Save the kernel stack pointer in the thread context 312 */ 313 /* get pointer to current thread context */ 314 get_thread_ctx sp, 21, 20, 22 315 /* 316 * Save kernel stack pointer to ensure that el0_svc() uses 317 * correct stack pointer 318 */ 319 str x19, [x21, #THREAD_CTX_KERN_SP] 320 321 /* 322 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 323 */ 324 msr spsr_el1, x6 325 /* Set user sp */ 326 mov x13, x4 /* Used when running TA in Aarch32 */ 327 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 328 /* Set user function */ 329 msr elr_el1, x5 330 /* Set frame pointer (user stack can't be unwound past this point) */ 331 mov x29, #0 332 333 /* Jump into user mode */ 334 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 335 b eret_to_el0 336END_FUNC __thread_enter_user_mode 337 338/* 339 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 340 * uint32_t exit_status1); 341 * See description in thread.h 342 */ 343FUNC thread_unwind_user_mode , : 344 /* Store the exit status */ 345 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR] 346 str w1, [x3] 347 str w2, [x4] 348 /* Restore x19..x30 */ 349 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 350 add sp, sp, #THREAD_USER_MODE_REC_SIZE 351 /* Return from the call of thread_enter_user_mode() */ 352 ret 353END_FUNC thread_unwind_user_mode 354 355 /* 356 * This macro verifies that the a given vector doesn't exceed the 357 * architectural limit of 32 instructions. This is meant to be placed 358 * immedately after the last instruction in the vector. It takes the 359 * vector entry as the parameter 360 */ 361 .macro check_vector_size since 362 .if (. - \since) > (32 * 4) 363 .error "Vector exceeds 32 instructions" 364 .endif 365 .endm 366 367 .macro restore_mapping 368#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 369 /* Temporarily save x0, x1 */ 370 msr tpidr_el1, x0 371 msr tpidrro_el0, x1 372 373 /* Update the mapping to use the full kernel mapping */ 374 mrs x0, ttbr0_el1 375 sub x0, x0, #CORE_MMU_L1_TBL_OFFSET 376 /* switch to kernel mode ASID */ 377 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 378 msr ttbr0_el1, x0 379 isb 380 381 /* Jump into the full mapping and continue execution */ 382 ldr x0, =1f 383 br x0 384 1: 385 386 /* Point to the vector into the full mapping */ 387 adr x0, thread_user_kcode_offset 388 ldr x0, [x0] 389 mrs x1, vbar_el1 390 add x1, x1, x0 391 msr vbar_el1, x1 392 isb 393 394 /* Restore x0, x1 */ 395 mrs x0, tpidr_el1 396 mrs x1, tpidrro_el0 397 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 398#else 399 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 400 mrs x0, ttbr0_el1 401 /* switch to kernel mode ASID */ 402 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 403 msr ttbr0_el1, x0 404 isb 405#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 406 .endm 407 408#define INV_INSN 0 409 .section .text.thread_vect_table 410 .align 11, INV_INSN 411FUNC thread_vect_table , : 412 /* ----------------------------------------------------- 413 * EL1 with SP0 : 0x0 - 0x180 414 * ----------------------------------------------------- 415 */ 416 .align 7, INV_INSN 417el1_sync_sp0: 418 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 419 b el1_sync_abort 420 check_vector_size el1_sync_sp0 421 422 .align 7, INV_INSN 423el1_irq_sp0: 424 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 425 b elx_irq 426 check_vector_size el1_irq_sp0 427 428 .align 7, INV_INSN 429el1_fiq_sp0: 430 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 431 b elx_fiq 432 check_vector_size el1_fiq_sp0 433 434 .align 7, INV_INSN 435el1_serror_sp0: 436 b el1_serror_sp0 437 check_vector_size el1_serror_sp0 438 439 /* ----------------------------------------------------- 440 * Current EL with SP1: 0x200 - 0x380 441 * ----------------------------------------------------- 442 */ 443 .align 7, INV_INSN 444el1_sync_sp1: 445 b el1_sync_sp1 446 check_vector_size el1_sync_sp1 447 448 .align 7, INV_INSN 449el1_irq_sp1: 450 b el1_irq_sp1 451 check_vector_size el1_irq_sp1 452 453 .align 7, INV_INSN 454el1_fiq_sp1: 455 b el1_fiq_sp1 456 check_vector_size el1_fiq_sp1 457 458 .align 7, INV_INSN 459el1_serror_sp1: 460 b el1_serror_sp1 461 check_vector_size el1_serror_sp1 462 463 /* ----------------------------------------------------- 464 * Lower EL using AArch64 : 0x400 - 0x580 465 * ----------------------------------------------------- 466 */ 467 .align 7, INV_INSN 468el0_sync_a64: 469 restore_mapping 470 471 mrs x2, esr_el1 472 mrs x3, sp_el0 473 lsr x2, x2, #ESR_EC_SHIFT 474 cmp x2, #ESR_EC_AARCH64_SVC 475 b.eq el0_svc 476 b el0_sync_abort 477 check_vector_size el0_sync_a64 478 479 .align 7, INV_INSN 480el0_irq_a64: 481 restore_mapping 482 483 b elx_irq 484 check_vector_size el0_irq_a64 485 486 .align 7, INV_INSN 487el0_fiq_a64: 488 restore_mapping 489 490 b elx_fiq 491 check_vector_size el0_fiq_a64 492 493 .align 7, INV_INSN 494el0_serror_a64: 495 b el0_serror_a64 496 check_vector_size el0_serror_a64 497 498 /* ----------------------------------------------------- 499 * Lower EL using AArch32 : 0x0 - 0x180 500 * ----------------------------------------------------- 501 */ 502 .align 7, INV_INSN 503el0_sync_a32: 504 restore_mapping 505 506 mrs x2, esr_el1 507 mrs x3, sp_el0 508 lsr x2, x2, #ESR_EC_SHIFT 509 cmp x2, #ESR_EC_AARCH32_SVC 510 b.eq el0_svc 511 b el0_sync_abort 512 check_vector_size el0_sync_a32 513 514 .align 7, INV_INSN 515el0_irq_a32: 516 restore_mapping 517 518 b elx_irq 519 check_vector_size el0_irq_a32 520 521 .align 7, INV_INSN 522el0_fiq_a32: 523 restore_mapping 524 525 b elx_fiq 526 check_vector_size el0_fiq_a32 527 528 .align 7, INV_INSN 529el0_serror_a32: 530 b el0_serror_a32 531 check_vector_size el0_serror_a32 532 533#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 534 .macro invalidate_branch_predictor 535 ic iallu 536 isb 537 .endm 538 539 .align 11, INV_INSN 540workaround_vect_table: 541 /* ----------------------------------------------------- 542 * EL1 with SP0 : 0x0 - 0x180 543 * ----------------------------------------------------- 544 */ 545 .align 7, INV_INSN 546workaround_el1_sync_sp0: 547 b el1_sync_sp0 548 check_vector_size workaround_el1_sync_sp0 549 550 .align 7, INV_INSN 551workaround_el1_irq_sp0: 552 b el1_irq_sp0 553 check_vector_size workaround_el1_irq_sp0 554 555 .align 7, INV_INSN 556workaround_el1_fiq_sp0: 557 b el1_fiq_sp0 558 check_vector_size workaround_el1_fiq_sp0 559 560 .align 7, INV_INSN 561workaround_el1_serror_sp0: 562 b el1_serror_sp0 563 check_vector_size workaround_el1_serror_sp0 564 565 /* ----------------------------------------------------- 566 * Current EL with SP1: 0x200 - 0x380 567 * ----------------------------------------------------- 568 */ 569 .align 7, INV_INSN 570workaround_el1_sync_sp1: 571 b workaround_el1_sync_sp1 572 check_vector_size workaround_el1_sync_sp1 573 574 .align 7, INV_INSN 575workaround_el1_irq_sp1: 576 b workaround_el1_irq_sp1 577 check_vector_size workaround_el1_irq_sp1 578 579 .align 7, INV_INSN 580workaround_el1_fiq_sp1: 581 b workaround_el1_fiq_sp1 582 check_vector_size workaround_el1_fiq_sp1 583 584 .align 7, INV_INSN 585workaround_el1_serror_sp1: 586 b workaround_el1_serror_sp1 587 check_vector_size workaround_el1_serror_sp1 588 589 /* ----------------------------------------------------- 590 * Lower EL using AArch64 : 0x400 - 0x580 591 * ----------------------------------------------------- 592 */ 593 .align 7, INV_INSN 594workaround_el0_sync_a64: 595 invalidate_branch_predictor 596 b el0_sync_a64 597 check_vector_size workaround_el0_sync_a64 598 599 .align 7, INV_INSN 600workaround_el0_irq_a64: 601 invalidate_branch_predictor 602 b el0_irq_a64 603 check_vector_size workaround_el0_irq_a64 604 605 .align 7, INV_INSN 606workaround_el0_fiq_a64: 607 invalidate_branch_predictor 608 b el0_fiq_a64 609 check_vector_size workaround_el0_fiq_a64 610 611 .align 7, INV_INSN 612workaround_el0_serror_a64: 613 b workaround_el0_serror_a64 614 check_vector_size workaround_el0_serror_a64 615 616 /* ----------------------------------------------------- 617 * Lower EL using AArch32 : 0x0 - 0x180 618 * ----------------------------------------------------- 619 */ 620 .align 7, INV_INSN 621workaround_el0_sync_a32: 622 invalidate_branch_predictor 623 b el0_sync_a32 624 check_vector_size workaround_el0_sync_a32 625 626 .align 7, INV_INSN 627workaround_el0_irq_a32: 628 invalidate_branch_predictor 629 b el0_irq_a32 630 check_vector_size workaround_el0_irq_a32 631 632 .align 7, INV_INSN 633workaround_el0_fiq_a32: 634 invalidate_branch_predictor 635 b el0_fiq_a32 636 check_vector_size workaround_el0_fiq_a32 637 638 .align 7, INV_INSN 639workaround_el0_serror_a32: 640 b workaround_el0_serror_a32 641 check_vector_size workaround_el0_serror_a32 642#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 643 644/* 645 * We're keeping this code in the same section as the vector to make sure 646 * that it's always available. 647 */ 648eret_to_el0: 649 650#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 651 /* Point to the vector into the reduced mapping */ 652 adr x0, thread_user_kcode_offset 653 ldr x0, [x0] 654 mrs x1, vbar_el1 655 sub x1, x1, x0 656 msr vbar_el1, x1 657 isb 658 659 /* Jump into the reduced mapping and continue execution */ 660 ldr x1, =1f 661 sub x1, x1, x0 662 br x1 6631: 664 665 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 666 msr tpidr_el1, x0 667 668 /* Update the mapping to exclude the full kernel mapping */ 669 mrs x0, ttbr0_el1 670 add x0, x0, #CORE_MMU_L1_TBL_OFFSET 671 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 672 msr ttbr0_el1, x0 673 isb 674 675 mrs x0, tpidr_el1 676#else 677 mrs x0, ttbr0_el1 678 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 679 msr ttbr0_el1, x0 680 isb 681 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 682#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 683 684 eret 685 686END_FUNC thread_vect_table 687 688LOCAL_FUNC el0_svc , : 689 /* get pointer to current thread context in x0 */ 690 get_thread_ctx sp, 0, 1, 2 691 /* load saved kernel sp */ 692 ldr x0, [x0, #THREAD_CTX_KERN_SP] 693 /* Keep pointer to initial recod in x1 */ 694 mov x1, sp 695 /* Switch to SP_EL0 and restore kernel sp */ 696 msr spsel, #0 697 mov x2, sp /* Save SP_EL0 */ 698 mov sp, x0 699 700 /* Make room for struct thread_svc_regs */ 701 sub sp, sp, #THREAD_SVC_REG_SIZE 702 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 703 704 /* Restore x0-x3 */ 705 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 706 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 707 708 /* Prepare the argument for the handler */ 709 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 710 mrs x0, elr_el1 711 mrs x1, spsr_el1 712 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 713 mov x0, sp 714 715 /* 716 * Unmask native interrupts, Serror, and debug exceptions since we have 717 * nothing left in sp_el1. Note that the SVC handler is excepted to 718 * re-enable foreign interrupts by itself. 719 */ 720#if defined(CFG_ARM_GICV3) 721 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 722#else 723 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 724#endif 725 726 /* Call the handler */ 727 bl tee_svc_handler 728 729 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 730 msr daifset, #DAIFBIT_ALL 731 732 /* 733 * Save kernel sp we'll had at the beginning of this function. 734 * This is when this TA has called another TA because 735 * __thread_enter_user_mode() also saves the stack pointer in this 736 * field. 737 */ 738 msr spsel, #1 739 get_thread_ctx sp, 0, 1, 2 740 msr spsel, #0 741 add x1, sp, #THREAD_SVC_REG_SIZE 742 str x1, [x0, #THREAD_CTX_KERN_SP] 743 744 /* Restore registers to the required state and return*/ 745 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 746 msr elr_el1, x0 747 msr spsr_el1, x1 748 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 749 mov x30, sp 750 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 751 mov sp, x0 752 b_if_spsr_is_el0 w1, 1f 753 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 754 ldr x30, [x30, #THREAD_SVC_REG_X30] 755 756 eret 757 7581: ldp x0, x1, [x30, THREAD_SVC_REG_X0] 759 ldr x30, [x30, #THREAD_SVC_REG_X30] 760 761 msr spsel, #1 762 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 763 b eret_to_el0 764END_FUNC el0_svc 765 766LOCAL_FUNC el1_sync_abort , : 767 mov x0, sp 768 msr spsel, #0 769 mov x3, sp /* Save original sp */ 770 771 /* 772 * Update core local flags. 773 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 774 */ 775 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 776 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 777 orr w1, w1, #THREAD_CLF_ABORT 778 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 779 .Lsel_tmp_sp 780 781 /* Select abort stack */ 782 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 783 b .Lset_sp 784 785.Lsel_tmp_sp: 786 /* Select tmp stack */ 787 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 788 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 789 790.Lset_sp: 791 mov sp, x2 792 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 793 794 /* 795 * Save state on stack 796 */ 797 sub sp, sp, #THREAD_ABT_REGS_SIZE 798 mrs x2, spsr_el1 799 /* Store spsr, sp_el0 */ 800 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 801 /* Store original x0, x1 */ 802 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 803 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 804 /* Store original x2, x3 and x4 to x29 */ 805 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 806 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 807 /* Store x30, elr_el1 */ 808 mrs x0, elr_el1 809 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 810 811 /* 812 * Call handler 813 */ 814 mov x0, #0 815 mov x1, sp 816 bl abort_handler 817 818 /* 819 * Restore state from stack 820 */ 821 /* Load x30, elr_el1 */ 822 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 823 msr elr_el1, x0 824 /* Load x0 to x29 */ 825 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 826 /* Switch to SP_EL1 */ 827 msr spsel, #1 828 /* Save x0 to x3 in CORE_LOCAL */ 829 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 830 /* Restore spsr_el1 and sp_el0 */ 831 mrs x3, sp_el0 832 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 833 msr spsr_el1, x0 834 msr sp_el0, x1 835 836 /* Update core local flags */ 837 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 838 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 839 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 840 841 /* Restore x0 to x3 */ 842 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 843 844 /* Return from exception */ 845 eret 846END_FUNC el1_sync_abort 847 848 /* sp_el0 in x3 */ 849LOCAL_FUNC el0_sync_abort , : 850 /* 851 * Update core local flags 852 */ 853 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 854 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 855 orr w1, w1, #THREAD_CLF_ABORT 856 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 857 858 /* 859 * Save state on stack 860 */ 861 862 /* load abt_stack_va_end */ 863 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 864 /* Keep pointer to initial record in x0 */ 865 mov x0, sp 866 /* Switch to SP_EL0 */ 867 msr spsel, #0 868 mov sp, x1 869 sub sp, sp, #THREAD_ABT_REGS_SIZE 870 mrs x2, spsr_el1 871 /* Store spsr, sp_el0 */ 872 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 873 /* Store original x0, x1 */ 874 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 875 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 876 /* Store original x2, x3 and x4 to x29 */ 877 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 878 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 879 /* Store x30, elr_el1 */ 880 mrs x0, elr_el1 881 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 882 883 /* 884 * Call handler 885 */ 886 mov x0, #0 887 mov x1, sp 888 bl abort_handler 889 890 /* 891 * Restore state from stack 892 */ 893 894 /* Load x30, elr_el1 */ 895 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 896 msr elr_el1, x0 897 /* Load x0 to x29 */ 898 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 899 /* Switch to SP_EL1 */ 900 msr spsel, #1 901 /* Save x0 to x3 in EL1_REC */ 902 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 903 /* Restore spsr_el1 and sp_el0 */ 904 mrs x3, sp_el0 905 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 906 msr spsr_el1, x0 907 msr sp_el0, x1 908 909 /* Update core local flags */ 910 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 911 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 912 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 913 914 /* Restore x2 to x3 */ 915 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 916 917 b_if_spsr_is_el0 w0, 1f 918 919 /* Restore x0 to x1 */ 920 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 921 922 /* Return from exception */ 923 eret 9241: b eret_to_el0 925END_FUNC el0_sync_abort 926 927/* The handler of foreign interrupt. */ 928.macro foreign_intr_handler mode:req 929 /* 930 * Update core local flags 931 */ 932 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 933 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 934 orr w1, w1, #THREAD_CLF_TMP 935 .ifc \mode\(),fiq 936 orr w1, w1, #THREAD_CLF_FIQ 937 .else 938 orr w1, w1, #THREAD_CLF_IRQ 939 .endif 940 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 941 942 /* get pointer to current thread context in x0 */ 943 get_thread_ctx sp, 0, 1, 2 944 /* Keep original SP_EL0 */ 945 mrs x2, sp_el0 946 947 /* Store original sp_el0 */ 948 str x2, [x0, #THREAD_CTX_REGS_SP] 949 /* store x4..x30 */ 950 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 951 /* Load original x0..x3 into x10..x13 */ 952 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 953 /* Save original x0..x3 */ 954 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 955 956 /* load tmp_stack_va_end */ 957 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 958 /* Switch to SP_EL0 */ 959 msr spsel, #0 960 mov sp, x1 961 962 /* 963 * Mark current thread as suspended 964 */ 965 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 966 mrs x1, spsr_el1 967 mrs x2, elr_el1 968 bl thread_state_suspend 969 mov w4, w0 /* Supply thread index */ 970 971 /* Update core local flags */ 972 /* Switch to SP_EL1 */ 973 msr spsel, #1 974 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 975 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 976 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 977 msr spsel, #0 978 979 /* 980 * Note that we're exiting with SP_EL0 selected since the entry 981 * functions expects to have SP_EL0 selected with the tmp stack 982 * set. 983 */ 984 985 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 986 ldr w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 987 mov w2, #0 988 mov w3, #0 989 /* w4 is already filled in above */ 990 smc #0 991 b . /* SMC should not return */ 992.endm 993 994/* 995 * This struct is never used from C it's only here to visualize the 996 * layout. 997 * 998 * struct elx_nintr_rec { 999 * uint64_t x[19 - 4]; x4..x18 1000 * uint64_t lr; 1001 * uint64_t sp_el0; 1002 * }; 1003 */ 1004#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 1005#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 1006#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 1007#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 1008 1009/* The handler of native interrupt. */ 1010.macro native_intr_handler mode:req 1011 /* 1012 * Update core local flags 1013 */ 1014 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1015 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 1016 .ifc \mode\(),fiq 1017 orr w1, w1, #THREAD_CLF_FIQ 1018 .else 1019 orr w1, w1, #THREAD_CLF_IRQ 1020 .endif 1021 orr w1, w1, #THREAD_CLF_TMP 1022 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1023 1024 /* load tmp_stack_va_end */ 1025 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1026 /* Keep original SP_EL0 */ 1027 mrs x2, sp_el0 1028 /* Switch to SP_EL0 */ 1029 msr spsel, #0 1030 mov sp, x1 1031 1032 /* 1033 * Save registers on stack that can be corrupted by a call to 1034 * a C function 1035 */ 1036 /* Make room for struct elx_nintr_rec */ 1037 sub sp, sp, #ELX_NINTR_REC_SIZE 1038 /* Store x4..x18 */ 1039 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1040 /* Store lr and original sp_el0 */ 1041 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1042 1043 bl thread_check_canaries 1044 adr x16, thread_nintr_handler_ptr 1045 ldr x16, [x16] 1046 blr x16 1047 1048 /* 1049 * Restore registers 1050 */ 1051 /* Restore x4..x18 */ 1052 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1053 /* Load lr and original sp_el0 */ 1054 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1055 /* Restore SP_El0 */ 1056 mov sp, x2 1057 /* Switch back to SP_EL1 */ 1058 msr spsel, #1 1059 1060 /* Update core local flags */ 1061 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1062 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1063 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1064 1065 mrs x0, spsr_el1 1066 /* Restore x2..x3 */ 1067 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1068 b_if_spsr_is_el0 w0, 1f 1069 1070 /* Restore x0..x1 */ 1071 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1072 1073 /* Return from exception */ 1074 eret 10751: b eret_to_el0 1076.endm 1077 1078LOCAL_FUNC elx_irq , : 1079#if defined(CFG_ARM_GICV3) 1080 native_intr_handler irq 1081#else 1082 foreign_intr_handler irq 1083#endif 1084END_FUNC elx_irq 1085 1086LOCAL_FUNC elx_fiq , : 1087#if defined(CFG_ARM_GICV3) 1088 foreign_intr_handler fiq 1089#else 1090 native_intr_handler fiq 1091#endif 1092END_FUNC elx_fiq 1093