1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2020, Linaro Limited 4 */ 5 6#include <arm64_macros.S> 7#include <arm.h> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/cache_helpers.h> 12#include <kernel/thread.h> 13#include <kernel/thread_private.h> 14#include <mm/core_mmu.h> 15#include <smccc.h> 16 17 .macro get_thread_ctx core_local, res, tmp0, tmp1 18 ldrh w\tmp0, [\core_local, \ 19 #THREAD_CORE_LOCAL_CURR_THREAD] 20 adr_l x\res, threads 21 mov x\tmp1, #THREAD_CTX_SIZE 22 madd x\res, x\tmp0, x\tmp1, x\res 23 .endm 24 25 .macro return_from_exception 26 eret 27 /* Guard against speculation past ERET */ 28 dsb nsh 29 isb 30 .endm 31 32 .macro b_if_spsr_is_el0 reg, label 33 tbnz \reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label 34 tst \reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT) 35 b.eq \label 36 .endm 37 38 .macro disable_pauth reg 39#ifdef CFG_TA_PAUTH 40 mrs \reg, sctlr_el1 41 bic \reg, \reg, #SCTLR_ENIA 42#ifdef CFG_TA_BTI 43 orr \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1) 44#endif 45 msr sctlr_el1, \reg 46#endif 47 .endm 48 49 .macro enable_pauth reg 50#ifdef CFG_TA_PAUTH 51 mrs \reg, sctlr_el1 52 orr \reg, \reg, #SCTLR_ENIA 53#ifdef CFG_TA_BTI 54 bic \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1) 55#endif 56 msr sctlr_el1, \reg 57#endif 58 .endm 59 60/* void thread_resume(struct thread_ctx_regs *regs) */ 61FUNC thread_resume , : 62 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 63 load_xregs x0, THREAD_CTX_REGS_X4, 4, 30 64 mov sp, x1 65 msr elr_el1, x2 66 msr spsr_el1, x3 67 ldr x1, [x0, THREAD_CTX_REGS_TPIDR_EL0] 68 msr tpidr_el0, x1 69 70 b_if_spsr_is_el0 w3, 1f 71 72 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 73 ldr x0, [x0, THREAD_CTX_REGS_X0] 74 return_from_exception 75 761: 77#ifdef CFG_TA_PAUTH 78 /* Restore PAC keys before return to el0 */ 79 load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2 80 write_apiakeyhi x1 81 write_apiakeylo x2 82#endif 83 84 load_xregs x0, THREAD_CTX_REGS_X1, 1, 3 85 ldr x0, [x0, THREAD_CTX_REGS_X0] 86 87 msr spsel, #1 88 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 89 b eret_to_el0 90END_FUNC thread_resume 91 92FUNC thread_smc , : 93 smc #0 94 ret 95END_FUNC thread_smc 96 97/* void thread_smccc(struct thread_smc_args *arg_res) */ 98FUNC thread_smccc , : 99 push x0, xzr 100 mov x8, x0 101 load_xregs x8, 0, 0, 7 102#ifdef CFG_CORE_SEL2_SPMC 103 hvc #0 104#else 105 smc #0 106#endif 107 pop x8, xzr 108 store_xregs x8, 0, 0, 7 109 ret 110END_FUNC thread_smccc 111 112FUNC thread_init_vbar , : 113 msr vbar_el1, x0 114 ret 115END_FUNC thread_init_vbar 116DECLARE_KEEP_PAGER thread_init_vbar 117 118/* 119 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, 120 * uint32_t *exit_status0, 121 * uint32_t *exit_status1); 122 * 123 * This function depends on being called with exceptions masked. 124 */ 125FUNC __thread_enter_user_mode , : 126 /* 127 * Create the and fill in the struct thread_user_mode_rec 128 */ 129 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 130 store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2 131 store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 132 133 /* 134 * Save kern sp in x19 135 * Switch to SP_EL1 136 */ 137 mov x19, sp 138 msr spsel, #1 139 140 /* 141 * Save the kernel stack pointer in the thread context 142 */ 143 /* get pointer to current thread context */ 144 get_thread_ctx sp, 21, 20, 22 145 /* 146 * Save kernel stack pointer to ensure that el0_svc() uses 147 * correct stack pointer 148 */ 149 str x19, [x21, #THREAD_CTX_KERN_SP] 150 151 /* 152 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 153 */ 154 load_xregs x0, THREAD_CTX_REGS_SP, 1, 3 155 msr sp_el0, x1 156 msr elr_el1, x2 157 msr spsr_el1, x3 158 159#ifdef CFG_TA_PAUTH 160 /* Load APIAKEY */ 161 load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2 162 write_apiakeyhi x1 163 write_apiakeylo x2 164#endif 165 166 /* 167 * Save the values for x0 and x1 in struct thread_core_local to be 168 * restored later just before the eret. 169 */ 170 load_xregs x0, THREAD_CTX_REGS_X0, 1, 2 171 store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2 172 173 /* Load the rest of the general purpose registers */ 174 load_xregs x0, THREAD_CTX_REGS_X2, 2, 30 175 176 /* Jump into user mode */ 177 b eret_to_el0 178END_FUNC __thread_enter_user_mode 179DECLARE_KEEP_PAGER __thread_enter_user_mode 180 181/* 182 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 183 * uint32_t exit_status1); 184 * See description in thread.h 185 */ 186FUNC thread_unwind_user_mode , : 187 /* Store the exit status */ 188 load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5 189 str w1, [x4] 190 str w2, [x5] 191 /* Save x19..x30 */ 192 store_xregs x3, THREAD_CTX_REGS_X19, 19, 30 193 /* Restore x19..x30 */ 194 load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30 195 add sp, sp, #THREAD_USER_MODE_REC_SIZE 196 /* Return from the call of thread_enter_user_mode() */ 197 ret 198END_FUNC thread_unwind_user_mode 199 200 /* 201 * This macro verifies that the a given vector doesn't exceed the 202 * architectural limit of 32 instructions. This is meant to be placed 203 * immedately after the last instruction in the vector. It takes the 204 * vector entry as the parameter 205 */ 206 .macro check_vector_size since 207 .if (. - \since) > (32 * 4) 208 .error "Vector exceeds 32 instructions" 209 .endif 210 .endm 211 212 .macro restore_mapping 213#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 214 /* Temporarily save x0, x1 */ 215 msr tpidr_el1, x0 216 msr tpidrro_el0, x1 217 218 /* Update the mapping to use the full kernel mapping */ 219 mrs x0, ttbr0_el1 220 sub x0, x0, #CORE_MMU_BASE_TABLE_OFFSET 221 /* switch to kernel mode ASID */ 222 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 223 msr ttbr0_el1, x0 224 isb 225 226 /* Jump into the full mapping and continue execution */ 227 ldr x0, =1f 228 br x0 229 1: 230BTI( bti j) 231 /* Point to the vector into the full mapping */ 232 adr_l x0, thread_user_kcode_offset 233 ldr x0, [x0] 234 mrs x1, vbar_el1 235 add x1, x1, x0 236 msr vbar_el1, x1 237 isb 238 239#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 240 /* 241 * Update the SP with thread_user_kdata_sp_offset as 242 * described in init_user_kcode(). 243 */ 244 adr_l x0, thread_user_kdata_sp_offset 245 ldr x0, [x0] 246 add sp, sp, x0 247#endif 248 249 /* Restore x0, x1 */ 250 mrs x0, tpidr_el1 251 mrs x1, tpidrro_el0 252 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 253#else 254 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 255 mrs x0, ttbr0_el1 256 /* switch to kernel mode ASID */ 257 bic x0, x0, #BIT(TTBR_ASID_SHIFT) 258 msr ttbr0_el1, x0 259 isb 260#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 261 .endm 262 263#define INV_INSN 0 264FUNC thread_excp_vect , : , default, 2048, nobti 265 /* ----------------------------------------------------- 266 * EL1 with SP0 : 0x0 - 0x180 267 * ----------------------------------------------------- 268 */ 269 .balign 128, INV_INSN 270el1_sync_sp0: 271 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 272 b el1_sync_abort 273 check_vector_size el1_sync_sp0 274 275 .balign 128, INV_INSN 276el1_irq_sp0: 277 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 278 b elx_irq 279 check_vector_size el1_irq_sp0 280 281 .balign 128, INV_INSN 282el1_fiq_sp0: 283 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 284 b elx_fiq 285 check_vector_size el1_fiq_sp0 286 287 .balign 128, INV_INSN 288el1_serror_sp0: 289 b el1_serror_sp0 290 check_vector_size el1_serror_sp0 291 292 /* ----------------------------------------------------- 293 * Current EL with SP1: 0x200 - 0x380 294 * ----------------------------------------------------- 295 */ 296 .balign 128, INV_INSN 297el1_sync_sp1: 298 b el1_sync_sp1 299 check_vector_size el1_sync_sp1 300 301 .balign 128, INV_INSN 302el1_irq_sp1: 303 b el1_irq_sp1 304 check_vector_size el1_irq_sp1 305 306 .balign 128, INV_INSN 307el1_fiq_sp1: 308 b el1_fiq_sp1 309 check_vector_size el1_fiq_sp1 310 311 .balign 128, INV_INSN 312el1_serror_sp1: 313 b el1_serror_sp1 314 check_vector_size el1_serror_sp1 315 316 /* ----------------------------------------------------- 317 * Lower EL using AArch64 : 0x400 - 0x580 318 * ----------------------------------------------------- 319 */ 320 .balign 128, INV_INSN 321el0_sync_a64: 322 restore_mapping 323 /* PAuth will be disabled later else check_vector_size will fail */ 324 325 mrs x2, esr_el1 326 mrs x3, sp_el0 327 lsr x2, x2, #ESR_EC_SHIFT 328 cmp x2, #ESR_EC_AARCH64_SVC 329 b.eq el0_svc 330 b el0_sync_abort 331 check_vector_size el0_sync_a64 332 333 .balign 128, INV_INSN 334el0_irq_a64: 335 restore_mapping 336 disable_pauth x1 337 338 b elx_irq 339 check_vector_size el0_irq_a64 340 341 .balign 128, INV_INSN 342el0_fiq_a64: 343 restore_mapping 344 disable_pauth x1 345 346 b elx_fiq 347 check_vector_size el0_fiq_a64 348 349 .balign 128, INV_INSN 350el0_serror_a64: 351 b el0_serror_a64 352 check_vector_size el0_serror_a64 353 354 /* ----------------------------------------------------- 355 * Lower EL using AArch32 : 0x0 - 0x180 356 * ----------------------------------------------------- 357 */ 358 .balign 128, INV_INSN 359el0_sync_a32: 360 restore_mapping 361 362 mrs x2, esr_el1 363 mrs x3, sp_el0 364 lsr x2, x2, #ESR_EC_SHIFT 365 cmp x2, #ESR_EC_AARCH32_SVC 366 b.eq el0_svc 367 b el0_sync_abort 368 check_vector_size el0_sync_a32 369 370 .balign 128, INV_INSN 371el0_irq_a32: 372 restore_mapping 373 374 b elx_irq 375 check_vector_size el0_irq_a32 376 377 .balign 128, INV_INSN 378el0_fiq_a32: 379 restore_mapping 380 381 b elx_fiq 382 check_vector_size el0_fiq_a32 383 384 .balign 128, INV_INSN 385el0_serror_a32: 386 b el0_serror_a32 387 check_vector_size el0_serror_a32 388 389#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) 390 .macro invalidate_branch_predictor 391 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 392 mov_imm x0, SMCCC_ARCH_WORKAROUND_1 393 smc #0 394 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 395 .endm 396 397 .balign 2048, INV_INSN 398 .global thread_excp_vect_workaround 399thread_excp_vect_workaround: 400 /* ----------------------------------------------------- 401 * EL1 with SP0 : 0x0 - 0x180 402 * ----------------------------------------------------- 403 */ 404 .balign 128, INV_INSN 405workaround_el1_sync_sp0: 406 b el1_sync_sp0 407 check_vector_size workaround_el1_sync_sp0 408 409 .balign 128, INV_INSN 410workaround_el1_irq_sp0: 411 b el1_irq_sp0 412 check_vector_size workaround_el1_irq_sp0 413 414 .balign 128, INV_INSN 415workaround_el1_fiq_sp0: 416 b el1_fiq_sp0 417 check_vector_size workaround_el1_fiq_sp0 418 419 .balign 128, INV_INSN 420workaround_el1_serror_sp0: 421 b el1_serror_sp0 422 check_vector_size workaround_el1_serror_sp0 423 424 /* ----------------------------------------------------- 425 * Current EL with SP1: 0x200 - 0x380 426 * ----------------------------------------------------- 427 */ 428 .balign 128, INV_INSN 429workaround_el1_sync_sp1: 430 b workaround_el1_sync_sp1 431 check_vector_size workaround_el1_sync_sp1 432 433 .balign 128, INV_INSN 434workaround_el1_irq_sp1: 435 b workaround_el1_irq_sp1 436 check_vector_size workaround_el1_irq_sp1 437 438 .balign 128, INV_INSN 439workaround_el1_fiq_sp1: 440 b workaround_el1_fiq_sp1 441 check_vector_size workaround_el1_fiq_sp1 442 443 .balign 128, INV_INSN 444workaround_el1_serror_sp1: 445 b workaround_el1_serror_sp1 446 check_vector_size workaround_el1_serror_sp1 447 448 /* ----------------------------------------------------- 449 * Lower EL using AArch64 : 0x400 - 0x580 450 * ----------------------------------------------------- 451 */ 452 .balign 128, INV_INSN 453workaround_el0_sync_a64: 454 invalidate_branch_predictor 455 b el0_sync_a64 456 check_vector_size workaround_el0_sync_a64 457 458 .balign 128, INV_INSN 459workaround_el0_irq_a64: 460 invalidate_branch_predictor 461 b el0_irq_a64 462 check_vector_size workaround_el0_irq_a64 463 464 .balign 128, INV_INSN 465workaround_el0_fiq_a64: 466 invalidate_branch_predictor 467 b el0_fiq_a64 468 check_vector_size workaround_el0_fiq_a64 469 470 .balign 128, INV_INSN 471workaround_el0_serror_a64: 472 b workaround_el0_serror_a64 473 check_vector_size workaround_el0_serror_a64 474 475 /* ----------------------------------------------------- 476 * Lower EL using AArch32 : 0x0 - 0x180 477 * ----------------------------------------------------- 478 */ 479 .balign 128, INV_INSN 480workaround_el0_sync_a32: 481 invalidate_branch_predictor 482 b el0_sync_a32 483 check_vector_size workaround_el0_sync_a32 484 485 .balign 128, INV_INSN 486workaround_el0_irq_a32: 487 invalidate_branch_predictor 488 b el0_irq_a32 489 check_vector_size workaround_el0_irq_a32 490 491 .balign 128, INV_INSN 492workaround_el0_fiq_a32: 493 invalidate_branch_predictor 494 b el0_fiq_a32 495 check_vector_size workaround_el0_fiq_a32 496 497 .balign 128, INV_INSN 498workaround_el0_serror_a32: 499 b workaround_el0_serror_a32 500 check_vector_size workaround_el0_serror_a32 501#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 502 503/* 504 * We're keeping this code in the same section as the vector to make sure 505 * that it's always available. 506 */ 507eret_to_el0: 508 enable_pauth x1 509 510#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 511 /* Point to the vector into the reduced mapping */ 512 adr_l x0, thread_user_kcode_offset 513 ldr x0, [x0] 514 mrs x1, vbar_el1 515 sub x1, x1, x0 516 msr vbar_el1, x1 517 isb 518 519#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 520 /* Store the SP offset in tpidr_el1 to be used below to update SP */ 521 adr_l x1, thread_user_kdata_sp_offset 522 ldr x1, [x1] 523 msr tpidr_el1, x1 524#endif 525 526 /* Jump into the reduced mapping and continue execution */ 527 adr_l x1, 1f 528 sub x1, x1, x0 529 br x1 5301: 531BTI( bti j) 532 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 533 msr tpidrro_el0, x0 534 535 /* Update the mapping to exclude the full kernel mapping */ 536 mrs x0, ttbr0_el1 537 add x0, x0, #CORE_MMU_BASE_TABLE_OFFSET 538 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 539 msr ttbr0_el1, x0 540 isb 541 542#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 543 /* 544 * Update the SP with thread_user_kdata_sp_offset as described in 545 * init_user_kcode(). 546 */ 547 mrs x0, tpidr_el1 548 sub sp, sp, x0 549#endif 550 551 mrs x0, tpidrro_el0 552#else 553 mrs x0, ttbr0_el1 554 orr x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 555 msr ttbr0_el1, x0 556 isb 557 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 558#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 559 560 return_from_exception 561 562 /* 563 * void icache_inv_user_range(void *addr, size_t size); 564 * 565 * This function has to execute with the user space ASID active, 566 * this means executing with reduced mapping and the code needs 567 * to be located here together with the vector. 568 */ 569 .global icache_inv_user_range 570 .type icache_inv_user_range , %function 571icache_inv_user_range: 572 /* Mask all exceptions */ 573 mrs x6, daif /* this register must be preserved */ 574 msr daifset, #DAIFBIT_ALL 575 576#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 577 /* Point to the vector into the reduced mapping */ 578 adr_l x7, thread_user_kcode_offset 579 ldr x7, [x7] /* this register must be preserved */ 580 mrs x4, vbar_el1 /* this register must be preserved */ 581 sub x3, x4, x7 582 msr vbar_el1, x3 583 isb 584 585 /* Jump into the reduced mapping and continue execution */ 586 adr x3, 1f 587 sub x3, x3, x7 588 br x3 5891: 590BTI( bti j) 591 /* Update the mapping to exclude the full kernel mapping */ 592 mrs x5, ttbr0_el1 /* this register must be preserved */ 593 add x2, x5, #CORE_MMU_BASE_TABLE_OFFSET 594 orr x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 595 msr ttbr0_el1, x2 596 isb 597 598#else 599 mrs x5, ttbr0_el1 /* this register must be preserved */ 600 orr x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */ 601 msr ttbr0_el1, x2 602 isb 603#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 604 605 /* 606 * Do the actual icache invalidation 607 */ 608 609 /* Calculate minimum icache line size, result in x2 */ 610 mrs x3, ctr_el0 611 and x3, x3, #CTR_IMINLINE_MASK 612 mov x2, #CTR_WORD_SIZE 613 lsl x2, x2, x3 614 615 add x1, x0, x1 616 sub x3, x2, #1 617 bic x0, x0, x3 6181: 619 ic ivau, x0 620 add x0, x0, x2 621 cmp x0, x1 622 b.lo 1b 623 dsb ish 624 625#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 626 /* Update the mapping to use the full kernel mapping and ASID */ 627 msr ttbr0_el1, x5 628 isb 629 630 /* Jump into the full mapping and continue execution */ 631 adr x0, 1f 632 add x0, x0, x7 633 br x0 6341: 635BTI( bti j) 636 /* Point to the vector into the full mapping */ 637 msr vbar_el1, x4 638 isb 639#else 640 /* switch to kernel mode ASID */ 641 msr ttbr0_el1, x5 642 isb 643#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 644 645 msr daif, x6 /* restore exceptions */ 646 ret /* End of icache_inv_user_range() */ 647 648 /* 649 * Make sure that literals are placed before the 650 * thread_excp_vect_end label. 651 */ 652 .pool 653 .global thread_excp_vect_end 654thread_excp_vect_end: 655END_FUNC thread_excp_vect 656 657LOCAL_FUNC el0_svc , : 658 disable_pauth x1 659 /* get pointer to current thread context in x0 */ 660 get_thread_ctx sp, 0, 1, 2 661 mrs x1, tpidr_el0 662 str x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0] 663 /* load saved kernel sp */ 664 ldr x0, [x0, #THREAD_CTX_KERN_SP] 665 /* Keep pointer to initial recod in x1 */ 666 mov x1, sp 667 /* Switch to SP_EL0 and restore kernel sp */ 668 msr spsel, #0 669 mov x2, sp /* Save SP_EL0 */ 670 mov sp, x0 671 672 /* Make room for struct thread_svc_regs */ 673 sub sp, sp, #THREAD_SVC_REG_SIZE 674 stp x30,x2, [sp, #THREAD_SVC_REG_X30] 675 676 /* Restore x0-x3 */ 677 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X2] 678 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X0] 679 680 /* Prepare the argument for the handler */ 681 store_xregs sp, THREAD_SVC_REG_X0, 0, 14 682 mrs x0, elr_el1 683 mrs x1, spsr_el1 684 store_xregs sp, THREAD_SVC_REG_ELR, 0, 1 685 686#ifdef CFG_TA_PAUTH 687 /* Save APIAKEY */ 688 read_apiakeyhi x0 689 read_apiakeylo x1 690 store_xregs sp, THREAD_SVC_REG_APIAKEY_HI, 0, 1 691#endif 692 693 mov x0, sp 694 695 /* 696 * Unmask native interrupts, Serror, and debug exceptions since we have 697 * nothing left in sp_el1. Note that the SVC handler is excepted to 698 * re-enable foreign interrupts by itself. 699 */ 700#if defined(CFG_ARM_GICV3) 701 msr daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG) 702#else 703 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 704#endif 705 706 /* Call the handler */ 707 bl thread_svc_handler 708 709 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 710 msr daifset, #DAIFBIT_ALL 711 712 /* 713 * Save kernel sp we'll had at the beginning of this function. 714 * This is when this TA has called another TA because 715 * __thread_enter_user_mode() also saves the stack pointer in this 716 * field. 717 */ 718 msr spsel, #1 719 get_thread_ctx sp, 0, 1, 2 720 msr spsel, #0 721 add x1, sp, #THREAD_SVC_REG_SIZE 722 str x1, [x0, #THREAD_CTX_KERN_SP] 723 724 /* Restore registers to the required state and return*/ 725 ldr x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0] 726 msr tpidr_el0, x1 727 load_xregs sp, THREAD_SVC_REG_ELR, 0, 1 728 msr elr_el1, x0 729 msr spsr_el1, x1 730 load_xregs sp, THREAD_SVC_REG_X2, 2, 14 731 mov x30, sp 732 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0] 733 mov sp, x0 734 b_if_spsr_is_el0 w1, 1f 735 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 736 ldr x30, [x30, #THREAD_SVC_REG_X30] 737 738 return_from_exception 739 7401: 741#ifdef CFG_TA_PAUTH 742 /* Restore APIAKEY */ 743 load_xregs x30, THREAD_SVC_REG_APIAKEY_HI, 0, 1 744 write_apiakeyhi x0 745 write_apiakeylo x1 746#endif 747 748 ldp x0, x1, [x30, THREAD_SVC_REG_X0] 749 ldr x30, [x30, #THREAD_SVC_REG_X30] 750 751 msr spsel, #1 752 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 753 b eret_to_el0 754END_FUNC el0_svc 755 756LOCAL_FUNC el1_sync_abort , : 757 mov x0, sp 758 msr spsel, #0 759 mov x3, sp /* Save original sp */ 760 761 /* 762 * Update core local flags. 763 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 764 */ 765 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 766 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 767 orr w1, w1, #THREAD_CLF_ABORT 768 tbnz w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \ 769 .Lsel_tmp_sp 770 771 /* Select abort stack */ 772 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 773 b .Lset_sp 774 775.Lsel_tmp_sp: 776 /* Select tmp stack */ 777 ldr x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 778 orr w1, w1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 779 780.Lset_sp: 781 mov sp, x2 782 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS] 783 784 /* 785 * Save state on stack 786 */ 787 sub sp, sp, #THREAD_ABT_REGS_SIZE 788 mrs x2, spsr_el1 789 /* Store spsr, sp_el0 */ 790 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 791 /* Store original x0, x1 */ 792 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 793 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 794 /* Store original x2, x3 and x4 to x29 */ 795 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 796 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 797 /* Store x30, elr_el1 */ 798 mrs x0, elr_el1 799 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 800 801 /* 802 * Call handler 803 */ 804 mov x0, #0 805 mov x1, sp 806 bl abort_handler 807 808 /* 809 * Restore state from stack 810 */ 811 /* Load x30, elr_el1 */ 812 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 813 msr elr_el1, x0 814 /* Load x0 to x29 */ 815 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 816 /* Switch to SP_EL1 */ 817 msr spsel, #1 818 /* Save x0 to x3 in CORE_LOCAL */ 819 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 820 /* Restore spsr_el1 and sp_el0 */ 821 mrs x3, sp_el0 822 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 823 msr spsr_el1, x0 824 msr sp_el0, x1 825 826 /* Update core local flags */ 827 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 828 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 829 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 830 831 /* Restore x0 to x3 */ 832 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 833 834 /* Return from exception */ 835 return_from_exception 836END_FUNC el1_sync_abort 837 838 /* sp_el0 in x3 */ 839LOCAL_FUNC el0_sync_abort , : 840 disable_pauth x1 841 /* 842 * Update core local flags 843 */ 844 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 845 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 846 orr w1, w1, #THREAD_CLF_ABORT 847 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 848 849 /* 850 * Save state on stack 851 */ 852 853 /* load abt_stack_va_end */ 854 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 855 /* Keep pointer to initial record in x0 */ 856 mov x0, sp 857 /* Switch to SP_EL0 */ 858 msr spsel, #0 859 mov sp, x1 860 sub sp, sp, #THREAD_ABT_REGS_SIZE 861 mrs x2, spsr_el1 862 /* Store spsr, sp_el0 */ 863 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR] 864 /* Store original x0, x1 */ 865 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X0] 866 stp x2, x3, [sp, #THREAD_ABT_REG_X0] 867 /* Store original x2, x3 and x4 to x29 */ 868 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X2] 869 store_xregs sp, THREAD_ABT_REG_X2, 2, 29 870 /* Store x30, elr_el1 */ 871 mrs x0, elr_el1 872 stp x30, x0, [sp, #THREAD_ABT_REG_X30] 873 874 /* 875 * Call handler 876 */ 877 mov x0, #0 878 mov x1, sp 879 bl abort_handler 880 881 /* 882 * Restore state from stack 883 */ 884 885 /* Load x30, elr_el1 */ 886 ldp x30, x0, [sp, #THREAD_ABT_REG_X30] 887 msr elr_el1, x0 888 /* Load x0 to x29 */ 889 load_xregs sp, THREAD_ABT_REG_X0, 0, 29 890 /* Switch to SP_EL1 */ 891 msr spsel, #1 892 /* Save x0 to x3 in EL1_REC */ 893 store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3 894 /* Restore spsr_el1 and sp_el0 */ 895 mrs x3, sp_el0 896 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR] 897 msr spsr_el1, x0 898 msr sp_el0, x1 899 900 /* Update core local flags */ 901 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 902 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 903 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 904 905 /* Restore x2 to x3 */ 906 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 907 908 b_if_spsr_is_el0 w0, 1f 909 910 /* Restore x0 to x1 */ 911 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 912 913 /* Return from exception */ 914 return_from_exception 9151: b eret_to_el0 916END_FUNC el0_sync_abort 917 918/* The handler of foreign interrupt. */ 919.macro foreign_intr_handler mode:req 920 /* 921 * Update core local flags 922 */ 923 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 924 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 925 orr w1, w1, #THREAD_CLF_TMP 926 .ifc \mode\(),fiq 927 orr w1, w1, #THREAD_CLF_FIQ 928 .else 929 orr w1, w1, #THREAD_CLF_IRQ 930 .endif 931 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 932 933 /* get pointer to current thread context in x0 */ 934 get_thread_ctx sp, 0, 1, 2 935 /* Keep original SP_EL0 */ 936 mrs x2, sp_el0 937 938 /* Store original sp_el0 */ 939 str x2, [x0, #THREAD_CTX_REGS_SP] 940 /* Store tpidr_el0 */ 941 mrs x2, tpidr_el0 942 str x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0] 943 /* Store x4..x30 */ 944 store_xregs x0, THREAD_CTX_REGS_X4, 4, 30 945 /* Load original x0..x3 into x10..x13 */ 946 load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13 947 /* Save original x0..x3 */ 948 store_xregs x0, THREAD_CTX_REGS_X0, 10, 13 949 950 /* load tmp_stack_va_end */ 951 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 952 /* Switch to SP_EL0 */ 953 msr spsel, #0 954 mov sp, x1 955 956#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 957 /* 958 * Prevent leaking information about which entries has been used in 959 * cache. We're relying on the dispatcher in TF-A to take care of 960 * the BTB. 961 */ 962 mov x0, #DCACHE_OP_CLEAN_INV 963 bl dcache_op_louis 964 ic iallu 965#endif 966 /* 967 * Mark current thread as suspended 968 */ 969 mov w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 970 mrs x1, spsr_el1 971 mrs x2, elr_el1 972 bl thread_state_suspend 973 974 /* Update core local flags */ 975 /* Switch to SP_EL1 */ 976 msr spsel, #1 977 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 978 lsr w1, w1, #THREAD_CLF_SAVED_SHIFT 979 orr w1, w1, #THREAD_CLF_TMP 980 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 981 msr spsel, #0 982 983 /* 984 * Note that we're exiting with SP_EL0 selected since the entry 985 * functions expects to have SP_EL0 selected with the tmp stack 986 * set. 987 */ 988 989 /* Passing thread index in w0 */ 990 b thread_foreign_intr_exit 991.endm 992 993/* 994 * This struct is never used from C it's only here to visualize the 995 * layout. 996 * 997 * struct elx_nintr_rec { 998 * uint64_t x[19 - 4]; x4..x18 999 * uint64_t lr; 1000 * uint64_t sp_el0; 1001 * }; 1002 */ 1003#define ELX_NINTR_REC_X(x) (8 * ((x) - 4)) 1004#define ELX_NINTR_REC_LR (8 + ELX_NINTR_REC_X(19)) 1005#define ELX_NINTR_REC_SP_EL0 (8 + ELX_NINTR_REC_LR) 1006#define ELX_NINTR_REC_SIZE (8 + ELX_NINTR_REC_SP_EL0) 1007 1008/* The handler of native interrupt. */ 1009.macro native_intr_handler mode:req 1010 /* 1011 * Update core local flags 1012 */ 1013 ldr w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1014 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 1015 .ifc \mode\(),fiq 1016 orr w1, w1, #THREAD_CLF_FIQ 1017 .else 1018 orr w1, w1, #THREAD_CLF_IRQ 1019 .endif 1020 orr w1, w1, #THREAD_CLF_TMP 1021 str w1, [sp, #THREAD_CORE_LOCAL_FLAGS] 1022 1023 /* load tmp_stack_va_end */ 1024 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 1025 /* Keep original SP_EL0 */ 1026 mrs x2, sp_el0 1027 /* Switch to SP_EL0 */ 1028 msr spsel, #0 1029 mov sp, x1 1030 1031 /* 1032 * Save registers on stack that can be corrupted by a call to 1033 * a C function 1034 */ 1035 /* Make room for struct elx_nintr_rec */ 1036 sub sp, sp, #ELX_NINTR_REC_SIZE 1037 /* Store x4..x18 */ 1038 store_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1039 /* Store lr and original sp_el0 */ 1040 stp x30, x2, [sp, #ELX_NINTR_REC_LR] 1041 1042 bl thread_check_canaries 1043 bl itr_core_handler 1044 1045 /* 1046 * Restore registers 1047 */ 1048 /* Restore x4..x18 */ 1049 load_xregs sp, ELX_NINTR_REC_X(4), 4, 18 1050 /* Load lr and original sp_el0 */ 1051 ldp x30, x2, [sp, #ELX_NINTR_REC_LR] 1052 /* Restore SP_El0 */ 1053 mov sp, x2 1054 /* Switch back to SP_EL1 */ 1055 msr spsel, #1 1056 1057 /* Update core local flags */ 1058 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1059 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 1060 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS] 1061 1062 mrs x0, spsr_el1 1063 /* Restore x2..x3 */ 1064 load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3 1065 b_if_spsr_is_el0 w0, 1f 1066 1067 /* Restore x0..x1 */ 1068 load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1 1069 1070 /* Return from exception */ 1071 return_from_exception 10721: b eret_to_el0 1073.endm 1074 1075LOCAL_FUNC elx_irq , : 1076#if defined(CFG_ARM_GICV3) 1077 native_intr_handler irq 1078#else 1079 foreign_intr_handler irq 1080#endif 1081END_FUNC elx_irq 1082 1083LOCAL_FUNC elx_fiq , : 1084#if defined(CFG_ARM_GICV3) 1085 foreign_intr_handler fiq 1086#else 1087 native_intr_handler fiq 1088#endif 1089END_FUNC elx_fiq 1090 1091BTI(emit_aarch64_feature_1_and GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 1092