1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016-2017, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/abort.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_defs.h> 15#include <kernel/unwind.h> 16#include <mm/core_mmu.h> 17 18#include "thread_private.h" 19 20 .syntax unified 21 .arch_extension sec 22 23 .macro cmp_spsr_user_mode reg:req 24 /* 25 * We're only testing the lower 4 bits as bit 5 (0x10) 26 * always is set. 27 */ 28 tst \reg, #0x0f 29 .endm 30 31FUNC thread_set_abt_sp , : 32UNWIND( .fnstart) 33UNWIND( .cantunwind) 34 mrs r1, cpsr 35 cps #CPSR_MODE_ABT 36 mov sp, r0 37 msr cpsr, r1 38 bx lr 39UNWIND( .fnend) 40END_FUNC thread_set_abt_sp 41 42FUNC thread_set_und_sp , : 43UNWIND( .fnstart) 44UNWIND( .cantunwind) 45 mrs r1, cpsr 46 cps #CPSR_MODE_UND 47 mov sp, r0 48 msr cpsr, r1 49 bx lr 50UNWIND( .fnend) 51END_FUNC thread_set_und_sp 52 53FUNC thread_set_irq_sp , : 54UNWIND( .fnstart) 55UNWIND( .cantunwind) 56 mrs r1, cpsr 57 cps #CPSR_MODE_IRQ 58 mov sp, r0 59 msr cpsr, r1 60 bx lr 61UNWIND( .fnend) 62END_FUNC thread_set_irq_sp 63 64FUNC thread_set_fiq_sp , : 65UNWIND( .fnstart) 66UNWIND( .cantunwind) 67 mrs r1, cpsr 68 cps #CPSR_MODE_FIQ 69 mov sp, r0 70 msr cpsr, r1 71 bx lr 72UNWIND( .fnend) 73END_FUNC thread_set_fiq_sp 74 75/* void thread_resume(struct thread_ctx_regs *regs) */ 76FUNC thread_resume , : 77UNWIND( .fnstart) 78UNWIND( .cantunwind) 79 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 80 81 cps #CPSR_MODE_SYS 82 ldr sp, [r12], #4 83 ldr lr, [r12], #4 84 85 cps #CPSR_MODE_SVC 86 ldr r1, [r12], #4 87 ldr sp, [r12], #4 88 ldr lr, [r12], #4 89 msr spsr_fsxc, r1 90 91 ldm r12, {r1, r2} 92 93 /* 94 * Switching to some other mode than SVC as we need to set spsr in 95 * order to return into the old state properly and it may be SVC 96 * mode we're returning to. 97 */ 98 cps #CPSR_MODE_ABT 99 cmp_spsr_user_mode r2 100 mov lr, r1 101 msr spsr_fsxc, r2 102 ldm r0, {r0-r12} 103 movsne pc, lr 104 b eret_to_user_mode 105UNWIND( .fnend) 106END_FUNC thread_resume 107 108/* 109 * Disables IRQ and FIQ and saves state of thread in fiq mode which has 110 * the banked r8-r12 registers, returns original CPSR. 111 */ 112LOCAL_FUNC thread_save_state_fiq , : 113UNWIND( .fnstart) 114UNWIND( .cantunwind) 115 mov r9, lr 116 117 /* 118 * Uses stack for temporary storage, while storing needed 119 * context in the thread context struct. 120 */ 121 122 mrs r8, cpsr 123 124 cpsid aif /* Disable Async abort, IRQ and FIQ */ 125 126 push {r4-r7} 127 push {r0-r3} 128 129 mrs r6, cpsr /* Save current CPSR */ 130 131 bl thread_get_ctx_regs 132 133 pop {r1-r4} /* r0-r3 pushed above */ 134 stm r0!, {r1-r4} 135 pop {r1-r4} /* r4-r7 pushed above */ 136 stm r0!, {r1-r4} 137 138 cps #CPSR_MODE_SYS 139 stm r0!, {r8-r12} 140 str sp, [r0], #4 141 str lr, [r0], #4 142 143 cps #CPSR_MODE_SVC 144 mrs r1, spsr 145 str r1, [r0], #4 146 str sp, [r0], #4 147 str lr, [r0], #4 148 149 /* back to fiq mode */ 150 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 151 msr cpsr, r6 /* Restore mode */ 152 153 mov r0, r8 /* Return original CPSR */ 154 bx r9 155UNWIND( .fnend) 156END_FUNC thread_save_state_fiq 157 158/* 159 * Disables IRQ and FIQ and saves state of thread, returns original 160 * CPSR. 161 */ 162FUNC thread_save_state , : 163UNWIND( .fnstart) 164UNWIND( .cantunwind) 165 push {r12, lr} 166 /* 167 * Uses stack for temporary storage, while storing needed 168 * context in the thread context struct. 169 */ 170 171 mrs r12, cpsr 172 173 cpsid aif /* Disable Async abort, IRQ and FIQ */ 174 175 push {r4-r7} 176 push {r0-r3} 177 178 mov r5, r12 /* Save CPSR in a preserved register */ 179 mrs r6, cpsr /* Save current CPSR */ 180 181 bl thread_get_ctx_regs 182 183 pop {r1-r4} /* r0-r3 pushed above */ 184 stm r0!, {r1-r4} 185 pop {r1-r4} /* r4-r7 pushed above */ 186 stm r0!, {r1-r4} 187 stm r0!, {r8-r11} 188 189 pop {r12, lr} 190 stm r0!, {r12} 191 192 cps #CPSR_MODE_SYS 193 str sp, [r0], #4 194 str lr, [r0], #4 195 196 cps #CPSR_MODE_SVC 197 mrs r1, spsr 198 str r1, [r0], #4 199 str sp, [r0], #4 200 str lr, [r0], #4 201 202 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 203 msr cpsr, r6 /* Restore mode */ 204 205 mov r0, r5 /* Return original CPSR */ 206 bx lr 207UNWIND( .fnend) 208END_FUNC thread_save_state 209 210/* 211 * unsigned long thread_smc(unsigned long func_id, unsigned long a1, 212 * unsigned long a2, unsigned long a3) 213 */ 214FUNC thread_smc , : 215UNWIND( .fnstart) 216 smc #0 217 bx lr 218UNWIND( .fnend) 219END_FUNC thread_smc 220 221FUNC thread_init_vbar , : 222UNWIND( .fnstart) 223 /* Set vector (VBAR) */ 224 write_vbar r0 225 bx lr 226UNWIND( .fnend) 227END_FUNC thread_init_vbar 228DECLARE_KEEP_PAGER thread_init_vbar 229 230/* 231 * Below are low level routines handling entry and return from user mode. 232 * 233 * thread_enter_user_mode() saves all that registers user mode can change 234 * so kernel mode can restore needed registers when resuming execution 235 * after the call to thread_enter_user_mode() has returned. 236 * thread_enter_user_mode() doesn't return directly since it enters user 237 * mode instead, it's thread_unwind_user_mode() that does the 238 * returning by restoring the registers saved by thread_enter_user_mode(). 239 * 240 * There's three ways for thread_enter_user_mode() to return to caller, 241 * user TA calls _utee_return, user TA calls _utee_panic or through an abort. 242 * 243 * Calls to _utee_return or _utee_panic are handled as: 244 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which 245 * calls syscall_return() or syscall_panic(). 246 * 247 * These function calls returns normally except thread_svc_handler() which 248 * which is an exception handling routine so it reads return address and 249 * SPSR to restore from the stack. syscall_return() and syscall_panic() 250 * changes return address and SPSR used by thread_svc_handler() to instead of 251 * returning into user mode as with other syscalls it returns into 252 * thread_unwind_user_mode() in kernel mode instead. When 253 * thread_svc_handler() returns the stack pointer at the point where 254 * thread_enter_user_mode() left it so this is where 255 * thread_unwind_user_mode() can operate. 256 * 257 * Aborts are handled in a similar way but by thread_abort_handler() 258 * instead, when the pager sees that it's an abort from user mode that 259 * can't be handled it updates SPSR and return address used by 260 * thread_abort_handler() to return into thread_unwind_user_mode() 261 * instead. 262 */ 263 264/* 265 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, 266 * uint32_t *exit_status0, 267 * uint32_t *exit_status1); 268 * 269 * This function depends on being called with exceptions masked. 270 */ 271FUNC __thread_enter_user_mode , : 272UNWIND( .fnstart) 273UNWIND( .cantunwind) 274 /* 275 * Save all registers to allow syscall_return() to resume execution 276 * as if this function would have returned. This is also used in 277 * syscall_panic(). 278 * 279 * If stack usage of this function is changed 280 * thread_unwind_user_mode() has to be updated. 281 */ 282 push {r4-r12,lr} 283 284 /* 285 * Save old user sp and set new user sp. 286 */ 287 cps #CPSR_MODE_SYS 288 mov r4, sp 289 ldr sp, [r0, #THREAD_CTX_REGS_USR_SP] 290 cps #CPSR_MODE_SVC 291 292 push {r1, r2, r4, r5} 293 294 /* Prepare user mode entry via eret_to_user_mode */ 295 ldr lr, [r0, #THREAD_CTX_REGS_PC] 296 ldr r4, [r0, #THREAD_CTX_REGS_CPSR] 297 msr spsr_fsxc, r4 298 299 ldm r0, {r0-r12} 300 301 b eret_to_user_mode 302UNWIND( .fnend) 303END_FUNC __thread_enter_user_mode 304 305/* 306 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 307 * uint32_t exit_status1); 308 * See description in thread.h 309 */ 310FUNC thread_unwind_user_mode , : 311UNWIND( .fnstart) 312UNWIND( .cantunwind) 313 /* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */ 314 pop {r4-r7} 315 str r1, [r4] 316 str r2, [r5] 317 318 /* Restore old user sp */ 319 cps #CPSR_MODE_SYS 320 mov sp, r6 321 cps #CPSR_MODE_SVC 322 323 /* Match push {r4-r12,lr} in thread_enter_user_mode() */ 324 pop {r4-r12,pc} 325UNWIND( .fnend) 326END_FUNC thread_unwind_user_mode 327 328 .macro maybe_restore_mapping 329 /* 330 * This macro is a bit hard to read due to all the ifdefs, 331 * we're testing for two different configs which makes four 332 * different combinations. 333 * 334 * - With LPAE, and then some extra code if with 335 * CFG_CORE_UNMAP_CORE_AT_EL0 336 * - Without LPAE, and then some extra code if with 337 * CFG_CORE_UNMAP_CORE_AT_EL0 338 */ 339 340 /* 341 * At this point we can't rely on any memory being writable 342 * yet, so we're using TPIDRPRW to store r0, and if with 343 * LPAE TPIDRURO to store r1 too. 344 */ 345 write_tpidrprw r0 346#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 347 write_tpidruro r1 348#endif 349 350#ifdef CFG_WITH_LPAE 351 read_ttbr0_64bit r0, r1 352 tst r1, #BIT(TTBR_ASID_SHIFT - 32) 353 beq 11f 354 355#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 356 /* 357 * Update the mapping to use the full kernel mode mapping. 358 * Since the translation table could reside above 4GB we'll 359 * have to use 64-bit arithmetics. 360 */ 361 subs r0, r0, #CORE_MMU_L1_TBL_OFFSET 362 sbc r1, r1, #0 363#endif 364 bic r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 365 write_ttbr0_64bit r0, r1 366 isb 367 368#else /*!CFG_WITH_LPAE*/ 369 read_contextidr r0 370 tst r0, #1 371 beq 11f 372 373 /* Update the mapping to use the full kernel mode mapping. */ 374 bic r0, r0, #1 375 write_contextidr r0 376 isb 377#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 378 read_ttbcr r0 379 bic r0, r0, #TTBCR_PD1 380 write_ttbcr r0 381 isb 382#endif 383 384#endif /*!CFG_WITH_LPAE*/ 385 386#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 387 ldr r0, =thread_user_kcode_offset 388 ldr r0, [r0] 389 read_vbar r1 390 add r1, r1, r0 391 write_vbar r1 392 isb 393 394 11: /* 395 * The PC is adjusted unconditionally to guard against the 396 * case there was an FIQ just before we did the "cpsid aif". 397 */ 398 ldr r0, =22f 399 bx r0 400 22: 401#else 402 11: 403#endif 404 read_tpidrprw r0 405#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 406 read_tpidruro r1 407#endif 408 .endm 409 410/* The handler of native interrupt. */ 411.macro native_intr_handler mode:req 412 cpsid aif 413 maybe_restore_mapping 414 415 /* 416 * FIQ and IRQ have a +4 offset for lr compared to preferred return 417 * address 418 */ 419 sub lr, lr, #4 420 421 /* 422 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also. 423 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ 424 * because the secure monitor doesn't save those. The treatment of 425 * the banked fiq registers is somewhat analogous to the lazy save 426 * of VFP registers. 427 */ 428 .ifc \mode\(),fiq 429 push {r0-r3, r8-r12, lr} 430 .else 431 push {r0-r3, r12, lr} 432 .endif 433 434 bl thread_check_canaries 435 bl itr_core_handler 436 437 mrs r0, spsr 438 cmp_spsr_user_mode r0 439 440 .ifc \mode\(),fiq 441 pop {r0-r3, r8-r12, lr} 442 .else 443 pop {r0-r3, r12, lr} 444 .endif 445 446 movsne pc, lr 447 b eret_to_user_mode 448.endm 449 450/* The handler of foreign interrupt. */ 451.macro foreign_intr_handler mode:req 452 cpsid aif 453 maybe_restore_mapping 454 455 sub lr, lr, #4 456 push {r12} 457 458 .ifc \mode\(),fiq 459 /* 460 * If a foreign (non-secure) interrupt is received as a FIQ we need 461 * to check that we're in a saveable state or if we need to mask 462 * the interrupt to be handled later. 463 * 464 * The window when this is needed is quite narrow, it's between 465 * entering the exception vector and until the "cpsid" instruction 466 * of the handler has been executed. 467 * 468 * Currently we can save the state properly if the FIQ is received 469 * while in user or svc (kernel) mode. 470 * 471 * If we're returning to abort, undef or irq mode we're returning 472 * with the mapping restored. This is OK since before the handler 473 * we're returning to eventually returns to user mode the reduced 474 * mapping will be restored. 475 */ 476 mrs r12, spsr 477 and r12, r12, #ARM32_CPSR_MODE_MASK 478 cmp r12, #ARM32_CPSR_MODE_USR 479 cmpne r12, #ARM32_CPSR_MODE_SVC 480 beq 1f 481 mrs r12, spsr 482 orr r12, r12, #ARM32_CPSR_F 483 msr spsr_fsxc, r12 484 pop {r12} 485 movs pc, lr 4861: 487 .endif 488 489 push {lr} 490 491 .ifc \mode\(),fiq 492 bl thread_save_state_fiq 493 .else 494 bl thread_save_state 495 .endif 496 497#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 498 /* 499 * Prevent leaking information about which entries has been used in 500 * cache. We're relying on the secure monitor/dispatcher to take 501 * care of the BTB. 502 */ 503 mov r0, #DCACHE_OP_CLEAN_INV 504 bl dcache_op_louis 505 write_iciallu 506#endif 507 508 /* 509 * Use SP_abt to update core local flags. 510 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP; 511 */ 512 cps #CPSR_MODE_ABT 513 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 514 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 515 orr r1, r1, #THREAD_CLF_TMP 516 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 517 .ifc \mode\(),fiq 518 cps #CPSR_MODE_FIQ 519 .else 520 cps #CPSR_MODE_IRQ 521 .endif 522 523 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 524 mrs r1, spsr 525 pop {r2} 526 pop {r12} 527 blx thread_state_suspend 528 529 /* 530 * Switch to SVC mode and copy current stack pointer as it already 531 * is the tmp stack. 532 */ 533 mov r1, sp 534 cps #CPSR_MODE_SVC 535 mov sp, r1 536 537 /* Passing thread index in r0 */ 538 b thread_foreign_intr_exit 539.endm 540 541 .align 5 542FUNC thread_excp_vect , : 543UNWIND( .fnstart) 544UNWIND( .cantunwind) 545 b . /* Reset */ 546 b __thread_und_handler /* Undefined instruction */ 547 b __thread_svc_handler /* System call */ 548 b __thread_pabort_handler /* Prefetch abort */ 549 b __thread_dabort_handler /* Data abort */ 550 b . /* Reserved */ 551 b __thread_irq_handler /* IRQ */ 552 b __thread_fiq_handler /* FIQ */ 553#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 554 .macro vector_prologue_spectre 555 /* 556 * This depends on SP being 8 byte aligned, that is, the 557 * lowest three bits in SP are zero. 558 * 559 * To avoid unexpected speculation we need to invalidate 560 * the branch predictor before we do the first branch. It 561 * doesn't matter if it's a conditional or an unconditional 562 * branch speculation can still occur. 563 * 564 * The idea is to form a specific bit pattern in the lowest 565 * three bits of SP depending on which entry in the vector 566 * we enter via. This is done by adding 1 to SP in each 567 * entry but the last. 568 */ 569 add sp, sp, #1 /* 7:111 Reset */ 570 add sp, sp, #1 /* 6:110 Undefined instruction */ 571 add sp, sp, #1 /* 5:101 Secure monitor call */ 572 add sp, sp, #1 /* 4:100 Prefetch abort */ 573 add sp, sp, #1 /* 3:011 Data abort */ 574 add sp, sp, #1 /* 2:010 Reserved */ 575 add sp, sp, #1 /* 1:001 IRQ */ 576 cpsid aif /* 0:000 FIQ */ 577 .endm 578 579 .align 5 580 .global thread_excp_vect_workaround_a15 581thread_excp_vect_workaround_a15: 582 vector_prologue_spectre 583 write_tpidrprw r0 584 mrs r0, spsr 585 cmp_spsr_user_mode r0 586 bne 1f 587 /* 588 * Invalidate the branch predictor for the current processor. 589 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 590 * effective. 591 * Note that the BPIALL instruction is not effective in 592 * invalidating the branch predictor on Cortex-A15. For that CPU, 593 * set ACTLR[0] to 1 during early processor initialisation, and 594 * invalidate the branch predictor by performing an ICIALLU 595 * instruction. See also: 596 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 597 */ 598 write_iciallu 599 isb 600 b 1f 601 602 .align 5 603 .global thread_excp_vect_workaround 604thread_excp_vect_workaround: 605 vector_prologue_spectre 606 write_tpidrprw r0 607 mrs r0, spsr 608 cmp_spsr_user_mode r0 609 bne 1f 610 /* Invalidate the branch predictor for the current processor. */ 611 write_bpiall 612 isb 613 6141: and r0, sp, #(BIT(0) | BIT(1) | BIT(2)) 615 bic sp, sp, #(BIT(0) | BIT(1) | BIT(2)) 616 add pc, pc, r0, LSL #3 617 nop 618 619 read_tpidrprw r0 620 b __thread_fiq_handler /* FIQ */ 621 read_tpidrprw r0 622 b __thread_irq_handler /* IRQ */ 623 read_tpidrprw r0 624 b . /* Reserved */ 625 read_tpidrprw r0 626 b __thread_dabort_handler /* Data abort */ 627 read_tpidrprw r0 628 b __thread_pabort_handler /* Prefetch abort */ 629 read_tpidrprw r0 630 b __thread_svc_handler /* System call */ 631 read_tpidrprw r0 632 b __thread_und_handler /* Undefined instruction */ 633 read_tpidrprw r0 634 b . /* Reset */ 635#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 636 637__thread_und_handler: 638 cpsid aif 639 maybe_restore_mapping 640 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 641 mrs r1, spsr 642 tst r1, #CPSR_T 643 subne lr, lr, #2 644 subeq lr, lr, #4 645 mov r0, #ABORT_TYPE_UNDEF 646 b __thread_abort_common 647 648__thread_dabort_handler: 649 cpsid aif 650 maybe_restore_mapping 651 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 652 sub lr, lr, #8 653 mov r0, #ABORT_TYPE_DATA 654 b __thread_abort_common 655 656__thread_pabort_handler: 657 cpsid aif 658 maybe_restore_mapping 659 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 660 sub lr, lr, #4 661 mov r0, #ABORT_TYPE_PREFETCH 662 663__thread_abort_common: 664 /* 665 * At this label: 666 * cpsr is in mode undef or abort 667 * sp is still pointing to struct thread_core_local belonging to 668 * this core. 669 * {r0, r1} are saved in struct thread_core_local pointed to by sp 670 * {r2-r11, ip} are untouched. 671 * r0 holds the first argument for abort_handler() 672 */ 673 674 /* 675 * Update core local flags. 676 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 677 */ 678 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 679 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 680 orr r1, r1, #THREAD_CLF_ABORT 681 682 /* 683 * Select stack and update flags accordingly 684 * 685 * Normal case: 686 * If the abort stack is unused select that. 687 * 688 * Fatal error handling: 689 * If we're already using the abort stack as noted by bit 690 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags 691 * field we're selecting the temporary stack instead to be able to 692 * make a stack trace of the abort in abort mode. 693 * 694 * r1 is initialized as a temporary stack pointer until we've 695 * switched to system mode. 696 */ 697 tst r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 698 orrne r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 699 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 700 ldrne r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 701 ldreq r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 702 703 /* 704 * Store registers on stack fitting struct thread_abort_regs 705 * start from the end of the struct 706 * {r2-r11, ip} 707 * Load content of previously saved {r0-r1} and stores 708 * it up to the pad field. 709 * After this is only {usr_sp, usr_lr} missing in the struct 710 */ 711 stmdb r1!, {r2-r11, ip} /* Push on the selected stack */ 712 ldrd r2, r3, [sp, #THREAD_CORE_LOCAL_R0] 713 /* Push the original {r0-r1} on the selected stack */ 714 stmdb r1!, {r2-r3} 715 mrs r3, spsr 716 /* Push {pad, spsr, elr} on the selected stack */ 717 stmdb r1!, {r2, r3, lr} 718 719 cps #CPSR_MODE_SYS 720 str lr, [r1, #-4]! 721 str sp, [r1, #-4]! 722 mov sp, r1 723 724 bl abort_handler 725 726 mov ip, sp 727 ldr sp, [ip], #4 728 ldr lr, [ip], #4 729 730 /* 731 * Even if we entered via CPSR_MODE_UND, we are returning via 732 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned 733 * here. 734 */ 735 cps #CPSR_MODE_ABT 736 ldm ip!, {r0, r1, lr} /* r0 is pad */ 737 msr spsr_fsxc, r1 738 739 /* Update core local flags */ 740 ldr r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 741 lsr r0, r0, #THREAD_CLF_SAVED_SHIFT 742 str r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 743 744 cmp_spsr_user_mode r1 745 ldm ip, {r0-r11, ip} 746 movsne pc, lr 747 b eret_to_user_mode 748 /* end thread_abort_common */ 749 750__thread_svc_handler: 751 cpsid aif 752 753 maybe_restore_mapping 754 755 push {r0-r7, lr} 756 mrs r0, spsr 757 push {r0} 758 mov r0, sp 759 bl thread_svc_handler 760 cpsid aif /* In case something was unmasked */ 761 pop {r0} 762 msr spsr_fsxc, r0 763 cmp_spsr_user_mode r0 764 pop {r0-r7, lr} 765 movsne pc, lr 766 b eret_to_user_mode 767 /* end thread_svc_handler */ 768 769__thread_fiq_handler: 770#if defined(CFG_ARM_GICV3) 771 foreign_intr_handler fiq 772#else 773 native_intr_handler fiq 774#endif 775 /* end thread_fiq_handler */ 776 777__thread_irq_handler: 778#if defined(CFG_ARM_GICV3) 779 native_intr_handler irq 780#else 781 foreign_intr_handler irq 782#endif 783 /* end thread_irq_handler */ 784 785 /* 786 * Returns to user mode. 787 * Expects to be jumped to with lr pointing to the user space 788 * address to jump to and spsr holding the desired cpsr. Async 789 * abort, irq and fiq should be masked. 790 */ 791eret_to_user_mode: 792 write_tpidrprw r0 793#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 794 write_tpidruro r1 795#endif 796 797#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 798 ldr r0, =thread_user_kcode_offset 799 ldr r0, [r0] 800 read_vbar r1 801 sub r1, r1, r0 802 write_vbar r1 803 isb 804 805 /* Jump into the reduced mapping before the full mapping is removed */ 806 ldr r1, =1f 807 sub r1, r1, r0 808 bx r1 8091: 810#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 811 812#ifdef CFG_WITH_LPAE 813 read_ttbr0_64bit r0, r1 814#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 815 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 816#endif 817 /* switch to user ASID */ 818 orr r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 819 write_ttbr0_64bit r0, r1 820 isb 821#else /*!CFG_WITH_LPAE*/ 822#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 823 read_ttbcr r0 824 orr r0, r0, #TTBCR_PD1 825 write_ttbcr r0 826 isb 827#endif 828 read_contextidr r0 829 orr r0, r0, #BIT(0) 830 write_contextidr r0 831 isb 832#endif /*!CFG_WITH_LPAE*/ 833 834 read_tpidrprw r0 835#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 836 read_tpidruro r1 837#endif 838 839 movs pc, lr 840 841 /* 842 * void icache_inv_user_range(void *addr, size_t size); 843 * 844 * This function has to execute with the user space ASID active, 845 * this means executing with reduced mapping and the code needs 846 * to be located here together with the vector. 847 */ 848 .global icache_inv_user_range 849 .type icache_inv_user_range , %function 850icache_inv_user_range: 851 push {r4-r7} 852 853 /* Mask all exceptions */ 854 mrs r4, cpsr /* This register must be preserved */ 855 cpsid aif 856 857#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 858 ldr r2, =thread_user_kcode_offset 859 ldr r2, [r2] 860 read_vbar r5 /* This register must be preserved */ 861 sub r3, r5, r2 862 write_vbar r3 863 isb 864 865 /* Jump into the reduced mapping before the full mapping is removed */ 866 ldr r3, =1f 867 sub r3, r3, r2 868 bx r3 8691: 870#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 871 872#ifdef CFG_WITH_LPAE 873 read_ttbr0_64bit r6, r7 /* These registers must be preseved */ 874#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 875 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 876#endif 877 /* switch to user ASID */ 878 orr r3, r7, #BIT(TTBR_ASID_SHIFT - 32) 879 write_ttbr0_64bit r2, r3 880 isb 881#else /*!CFG_WITH_LPAE*/ 882#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 883 read_ttbcr r6 /* This register must be preserved */ 884 orr r2, r6, #TTBCR_PD1 885 write_ttbcr r2 886 isb 887#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 888 read_contextidr r7 /* This register must be preserved */ 889 orr r2, r7, #BIT(0) 890 write_contextidr r2 891 isb 892#endif /*!CFG_WITH_LPAE*/ 893 894 /* 895 * Do the actual icache invalidation 896 */ 897 898 /* Calculate minimum icache line size, result in r2 */ 899 read_ctr r3 900 and r3, r3, #CTR_IMINLINE_MASK 901 mov r2, #CTR_WORD_SIZE 902 lsl r2, r2, r3 903 904 add r1, r0, r1 905 sub r3, r2, #1 906 bic r0, r0, r3 9071: 908 write_icimvau r0 909 add r0, r0, r2 910 cmp r0, r1 911 blo 1b 912 913 /* Invalidate entire branch predictor array inner shareable */ 914 write_bpiallis 915 916 dsb ishst 917 isb 918 919#ifdef CFG_WITH_LPAE 920 write_ttbr0_64bit r6, r7 921 isb 922#else /*!CFG_WITH_LPAE*/ 923 write_contextidr r7 924 isb 925#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 926 write_ttbcr r6 927 isb 928#endif 929#endif /*!CFG_WITH_LPAE*/ 930 931#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 932 write_vbar r5 933 isb 934 /* 935 * The PC is adjusted unconditionally to guard against the 936 * case there was an FIQ just before we did the "cpsid aif". 937 */ 938 ldr r0, =1f 939 bx r0 9401: 941#endif 942 943 msr cpsr_fsxc, r4 /* Restore exceptions */ 944 pop {r4-r7} 945 bx lr /* End of icache_inv_user_range() */ 946 947 /* 948 * Make sure that literals are placed before the 949 * thread_excp_vect_end label. 950 */ 951 .pool 952UNWIND( .fnend) 953 .global thread_excp_vect_end 954thread_excp_vect_end: 955END_FUNC thread_excp_vect 956