1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016-2017, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/abort.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_defs.h> 15#include <kernel/unwind.h> 16#include <mm/core_mmu.h> 17 18#include "thread_private.h" 19 20 .syntax unified 21 .arch_extension sec 22 23 .macro cmp_spsr_user_mode reg:req 24 /* 25 * We're only testing the lower 4 bits as bit 5 (0x10) 26 * always is set. 27 */ 28 tst \reg, #0x0f 29 .endm 30 31FUNC thread_set_abt_sp , : 32UNWIND( .fnstart) 33UNWIND( .cantunwind) 34 mrs r1, cpsr 35 cps #CPSR_MODE_ABT 36 mov sp, r0 37 msr cpsr, r1 38 bx lr 39UNWIND( .fnend) 40END_FUNC thread_set_abt_sp 41 42FUNC thread_set_und_sp , : 43UNWIND( .fnstart) 44UNWIND( .cantunwind) 45 mrs r1, cpsr 46 cps #CPSR_MODE_UND 47 mov sp, r0 48 msr cpsr, r1 49 bx lr 50UNWIND( .fnend) 51END_FUNC thread_set_und_sp 52 53FUNC thread_set_irq_sp , : 54UNWIND( .fnstart) 55UNWIND( .cantunwind) 56 mrs r1, cpsr 57 cps #CPSR_MODE_IRQ 58 mov sp, r0 59 msr cpsr, r1 60 bx lr 61UNWIND( .fnend) 62END_FUNC thread_set_irq_sp 63 64FUNC thread_set_fiq_sp , : 65UNWIND( .fnstart) 66UNWIND( .cantunwind) 67 mrs r1, cpsr 68 cps #CPSR_MODE_FIQ 69 mov sp, r0 70 msr cpsr, r1 71 bx lr 72UNWIND( .fnend) 73END_FUNC thread_set_fiq_sp 74 75/* void thread_resume(struct thread_ctx_regs *regs) */ 76FUNC thread_resume , : 77UNWIND( .fnstart) 78UNWIND( .cantunwind) 79 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 80 81 cps #CPSR_MODE_SYS 82 ldr sp, [r12], #4 83 ldr lr, [r12], #4 84 85 cps #CPSR_MODE_SVC 86 ldr r1, [r12], #4 87 ldr sp, [r12], #4 88 ldr lr, [r12], #4 89 msr spsr_fsxc, r1 90 91 ldm r12, {r1, r2} 92 93 /* 94 * Switching to some other mode than SVC as we need to set spsr in 95 * order to return into the old state properly and it may be SVC 96 * mode we're returning to. 97 */ 98 cps #CPSR_MODE_ABT 99 cmp_spsr_user_mode r2 100 mov lr, r1 101 msr spsr_fsxc, r2 102 ldm r0, {r0-r12} 103 movsne pc, lr 104 b eret_to_user_mode 105UNWIND( .fnend) 106END_FUNC thread_resume 107 108/* 109 * Disables IRQ and FIQ and saves state of thread in fiq mode which has 110 * the banked r8-r12 registers, returns original CPSR. 111 */ 112LOCAL_FUNC thread_save_state_fiq , : 113UNWIND( .fnstart) 114UNWIND( .cantunwind) 115 mov r9, lr 116 117 /* 118 * Uses stack for temporary storage, while storing needed 119 * context in the thread context struct. 120 */ 121 122 mrs r8, cpsr 123 124 cpsid aif /* Disable Async abort, IRQ and FIQ */ 125 126 push {r4-r7} 127 push {r0-r3} 128 129 mrs r6, cpsr /* Save current CPSR */ 130 131 bl thread_get_ctx_regs 132 133 pop {r1-r4} /* r0-r3 pushed above */ 134 stm r0!, {r1-r4} 135 pop {r1-r4} /* r4-r7 pushed above */ 136 stm r0!, {r1-r4} 137 138 cps #CPSR_MODE_SYS 139 stm r0!, {r8-r12} 140 str sp, [r0], #4 141 str lr, [r0], #4 142 143 cps #CPSR_MODE_SVC 144 mrs r1, spsr 145 str r1, [r0], #4 146 str sp, [r0], #4 147 str lr, [r0], #4 148 149 /* back to fiq mode */ 150 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 151 msr cpsr, r6 /* Restore mode */ 152 153 mov r0, r8 /* Return original CPSR */ 154 bx r9 155UNWIND( .fnend) 156END_FUNC thread_save_state_fiq 157 158/* 159 * Disables IRQ and FIQ and saves state of thread, returns original 160 * CPSR. 161 */ 162FUNC thread_save_state , : 163UNWIND( .fnstart) 164UNWIND( .cantunwind) 165 push {r12, lr} 166 /* 167 * Uses stack for temporary storage, while storing needed 168 * context in the thread context struct. 169 */ 170 171 mrs r12, cpsr 172 173 cpsid aif /* Disable Async abort, IRQ and FIQ */ 174 175 push {r4-r7} 176 push {r0-r3} 177 178 mov r5, r12 /* Save CPSR in a preserved register */ 179 mrs r6, cpsr /* Save current CPSR */ 180 181 bl thread_get_ctx_regs 182 183 pop {r1-r4} /* r0-r3 pushed above */ 184 stm r0!, {r1-r4} 185 pop {r1-r4} /* r4-r7 pushed above */ 186 stm r0!, {r1-r4} 187 stm r0!, {r8-r11} 188 189 pop {r12, lr} 190 stm r0!, {r12} 191 192 cps #CPSR_MODE_SYS 193 str sp, [r0], #4 194 str lr, [r0], #4 195 196 cps #CPSR_MODE_SVC 197 mrs r1, spsr 198 str r1, [r0], #4 199 str sp, [r0], #4 200 str lr, [r0], #4 201 202 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 203 msr cpsr, r6 /* Restore mode */ 204 205 mov r0, r5 /* Return original CPSR */ 206 bx lr 207UNWIND( .fnend) 208END_FUNC thread_save_state 209 210/* 211 * unsigned long thread_smc(unsigned long func_id, unsigned long a1, 212 * unsigned long a2, unsigned long a3) 213 */ 214FUNC thread_smc , : 215UNWIND( .fnstart) 216 smc #0 217 bx lr 218UNWIND( .fnend) 219END_FUNC thread_smc 220 221FUNC thread_init_vbar , : 222UNWIND( .fnstart) 223 /* Set vector (VBAR) */ 224 write_vbar r0 225 bx lr 226UNWIND( .fnend) 227END_FUNC thread_init_vbar 228KEEP_PAGER thread_init_vbar 229 230/* 231 * Below are low level routines handling entry and return from user mode. 232 * 233 * thread_enter_user_mode() saves all that registers user mode can change 234 * so kernel mode can restore needed registers when resuming execution 235 * after the call to thread_enter_user_mode() has returned. 236 * thread_enter_user_mode() doesn't return directly since it enters user 237 * mode instead, it's thread_unwind_user_mode() that does the 238 * returning by restoring the registers saved by thread_enter_user_mode(). 239 * 240 * There's three ways for thread_enter_user_mode() to return to caller, 241 * user TA calls utee_return, user TA calls utee_panic or through an abort. 242 * 243 * Calls to utee_return or utee_panic are handled as: 244 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which 245 * calls syscall_return() or syscall_panic(). 246 * 247 * These function calls returns normally except thread_svc_handler() which 248 * which is an exception handling routine so it reads return address and 249 * SPSR to restore from the stack. syscall_return() and syscall_panic() 250 * changes return address and SPSR used by thread_svc_handler() to instead of 251 * returning into user mode as with other syscalls it returns into 252 * thread_unwind_user_mode() in kernel mode instead. When 253 * thread_svc_handler() returns the stack pointer at the point where 254 * thread_enter_user_mode() left it so this is where 255 * thread_unwind_user_mode() can operate. 256 * 257 * Aborts are handled in a similar way but by thread_abort_handler() 258 * instead, when the pager sees that it's an abort from user mode that 259 * can't be handled it updates SPSR and return address used by 260 * thread_abort_handler() to return into thread_unwind_user_mode() 261 * instead. 262 */ 263 264/* 265 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, 266 * uint32_t *exit_status0, 267 * uint32_t *exit_status1); 268 * 269 * This function depends on being called with exceptions masked. 270 */ 271FUNC __thread_enter_user_mode , : 272UNWIND( .fnstart) 273UNWIND( .cantunwind) 274 /* 275 * Save all registers to allow syscall_return() to resume execution 276 * as if this function would have returned. This is also used in 277 * syscall_panic(). 278 * 279 * If stack usage of this function is changed 280 * thread_unwind_user_mode() has to be updated. 281 */ 282 push {r4-r12,lr} 283 284 /* 285 * Save old user sp and set new user sp. 286 */ 287 cps #CPSR_MODE_SYS 288 mov r4, sp 289 ldr sp, [r0, #THREAD_CTX_REGS_USR_SP] 290 cps #CPSR_MODE_SVC 291 292 push {r1, r2, r4, r5} 293 294 /* Prepare user mode entry via eret_to_user_mode */ 295 ldr lr, [r0, #THREAD_CTX_REGS_PC] 296 ldr r4, [r0, #THREAD_CTX_REGS_CPSR] 297 msr spsr_fsxc, r4 298 299 ldm r0, {r0-r12} 300 301 b eret_to_user_mode 302UNWIND( .fnend) 303END_FUNC __thread_enter_user_mode 304 305/* 306 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 307 * uint32_t exit_status1); 308 * See description in thread.h 309 */ 310FUNC thread_unwind_user_mode , : 311UNWIND( .fnstart) 312UNWIND( .cantunwind) 313 /* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */ 314 pop {r4-r7} 315 str r1, [r4] 316 str r2, [r5] 317 318 /* Restore old user sp */ 319 cps #CPSR_MODE_SYS 320 mov sp, r6 321 cps #CPSR_MODE_SVC 322 323 /* Match push {r4-r12,lr} in thread_enter_user_mode() */ 324 pop {r4-r12,pc} 325UNWIND( .fnend) 326END_FUNC thread_unwind_user_mode 327 328 .macro maybe_restore_mapping 329 /* 330 * This macro is a bit hard to read due to all the ifdefs, 331 * we're testing for two different configs which makes four 332 * different combinations. 333 * 334 * - With LPAE, and then some extra code if with 335 * CFG_CORE_UNMAP_CORE_AT_EL0 336 * - Without LPAE, and then some extra code if with 337 * CFG_CORE_UNMAP_CORE_AT_EL0 338 */ 339 340 /* 341 * At this point we can't rely on any memory being writable 342 * yet, so we're using TPIDRPRW to store r0, and if with 343 * LPAE TPIDRURO to store r1 too. 344 */ 345 write_tpidrprw r0 346#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 347 write_tpidruro r1 348#endif 349 350#ifdef CFG_WITH_LPAE 351 read_ttbr0_64bit r0, r1 352 tst r1, #BIT(TTBR_ASID_SHIFT - 32) 353 beq 11f 354 355#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 356 /* 357 * Update the mapping to use the full kernel mode mapping. 358 * Since the translation table could reside above 4GB we'll 359 * have to use 64-bit arithmetics. 360 */ 361 subs r0, r0, #CORE_MMU_L1_TBL_OFFSET 362 sbc r1, r1, #0 363#endif 364 bic r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 365 write_ttbr0_64bit r0, r1 366 isb 367 368#else /*!CFG_WITH_LPAE*/ 369 read_contextidr r0 370 tst r0, #1 371 beq 11f 372 373 /* Update the mapping to use the full kernel mode mapping. */ 374 bic r0, r0, #1 375 write_contextidr r0 376 isb 377#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 378 read_ttbcr r0 379 bic r0, r0, #TTBCR_PD1 380 write_ttbcr r0 381 isb 382#endif 383 384#endif /*!CFG_WITH_LPAE*/ 385 386#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 387 ldr r0, =thread_user_kcode_offset 388 ldr r0, [r0] 389 read_vbar r1 390 add r1, r1, r0 391 write_vbar r1 392 isb 393 394 11: /* 395 * The PC is adjusted unconditionally to guard against the 396 * case there was an FIQ just before we did the "cpsid aif". 397 */ 398 ldr r0, =22f 399 bx r0 400 22: 401#else 402 11: 403#endif 404 read_tpidrprw r0 405#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 406 read_tpidruro r1 407#endif 408 .endm 409 410/* The handler of native interrupt. */ 411.macro native_intr_handler mode:req 412 cpsid aif 413 maybe_restore_mapping 414 415 /* 416 * FIQ and IRQ have a +4 offset for lr compared to preferred return 417 * address 418 */ 419 sub lr, lr, #4 420 421 /* 422 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also. 423 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ 424 * because the secure monitor doesn't save those. The treatment of 425 * the banked fiq registers is somewhat analogous to the lazy save 426 * of VFP registers. 427 */ 428 .ifc \mode\(),fiq 429 push {r0-r3, r8-r12, lr} 430 .else 431 push {r0-r3, r12, lr} 432 .endif 433 434 bl thread_check_canaries 435 bl itr_core_handler 436 437 mrs r0, spsr 438 cmp_spsr_user_mode r0 439 440 .ifc \mode\(),fiq 441 pop {r0-r3, r8-r12, lr} 442 .else 443 pop {r0-r3, r12, lr} 444 .endif 445 446 movsne pc, lr 447 b eret_to_user_mode 448.endm 449 450/* The handler of foreign interrupt. */ 451.macro foreign_intr_handler mode:req 452 cpsid aif 453 maybe_restore_mapping 454 455 sub lr, lr, #4 456 push {r12} 457 458 .ifc \mode\(),fiq 459 /* 460 * If a foreign (non-secure) interrupt is received as a FIQ we need 461 * to check that we're in a saveable state or if we need to mask 462 * the interrupt to be handled later. 463 * 464 * The window when this is needed is quite narrow, it's between 465 * entering the exception vector and until the "cpsid" instruction 466 * of the handler has been executed. 467 * 468 * Currently we can save the state properly if the FIQ is received 469 * while in user or svc (kernel) mode. 470 * 471 * If we're returning to abort, undef or irq mode we're returning 472 * with the mapping restored. This is OK since before the handler 473 * we're returning to eventually returns to user mode the reduced 474 * mapping will be restored. 475 */ 476 mrs r12, spsr 477 and r12, r12, #ARM32_CPSR_MODE_MASK 478 cmp r12, #ARM32_CPSR_MODE_USR 479 cmpne r12, #ARM32_CPSR_MODE_SVC 480 beq 1f 481 mrs r12, spsr 482 orr r12, r12, #ARM32_CPSR_F 483 msr spsr_fsxc, r12 484 pop {r12} 485 movs pc, lr 4861: 487 .endif 488 489 push {lr} 490 491 .ifc \mode\(),fiq 492 bl thread_save_state_fiq 493 .else 494 bl thread_save_state 495 .endif 496 497#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 498 /* 499 * Prevent leaking information about which entries has been used in 500 * cache. We're relying on the secure monitor/dispatcher to take 501 * care of the BTB. 502 */ 503 mov r0, #DCACHE_OP_CLEAN_INV 504 bl dcache_op_louis 505 write_iciallu 506#endif 507 508 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 509 mrs r1, spsr 510 pop {r2} 511 pop {r12} 512 blx thread_state_suspend 513 514 /* 515 * Switch to SVC mode and copy current stack pointer as it already 516 * is the tmp stack. 517 */ 518 mov r1, sp 519 cps #CPSR_MODE_SVC 520 mov sp, r1 521 522 /* Passing thread index in r0 */ 523 b thread_foreign_intr_exit 524.endm 525 526 .align 5 527FUNC thread_excp_vect , : 528UNWIND( .fnstart) 529UNWIND( .cantunwind) 530 b . /* Reset */ 531 b __thread_und_handler /* Undefined instruction */ 532 b __thread_svc_handler /* System call */ 533 b __thread_pabort_handler /* Prefetch abort */ 534 b __thread_dabort_handler /* Data abort */ 535 b . /* Reserved */ 536 b __thread_irq_handler /* IRQ */ 537 b __thread_fiq_handler /* FIQ */ 538#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 539 .macro vector_prologue_spectre 540 /* 541 * This depends on SP being 8 byte aligned, that is, the 542 * lowest three bits in SP are zero. 543 * 544 * To avoid unexpected speculation we need to invalidate 545 * the branch predictor before we do the first branch. It 546 * doesn't matter if it's a conditional or an unconditional 547 * branch speculation can still occur. 548 * 549 * The idea is to form a specific bit pattern in the lowest 550 * three bits of SP depending on which entry in the vector 551 * we enter via. This is done by adding 1 to SP in each 552 * entry but the last. 553 */ 554 add sp, sp, #1 /* 7:111 Reset */ 555 add sp, sp, #1 /* 6:110 Undefined instruction */ 556 add sp, sp, #1 /* 5:101 Secure monitor call */ 557 add sp, sp, #1 /* 4:100 Prefetch abort */ 558 add sp, sp, #1 /* 3:011 Data abort */ 559 add sp, sp, #1 /* 2:010 Reserved */ 560 add sp, sp, #1 /* 1:001 IRQ */ 561 cpsid aif /* 0:000 FIQ */ 562 .endm 563 564 .align 5 565 .global thread_excp_vect_workaround_a15 566thread_excp_vect_workaround_a15: 567 vector_prologue_spectre 568 write_tpidrprw r0 569 mrs r0, spsr 570 cmp_spsr_user_mode r0 571 bne 1f 572 /* 573 * Invalidate the branch predictor for the current processor. 574 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 575 * effective. 576 * Note that the BPIALL instruction is not effective in 577 * invalidating the branch predictor on Cortex-A15. For that CPU, 578 * set ACTLR[0] to 1 during early processor initialisation, and 579 * invalidate the branch predictor by performing an ICIALLU 580 * instruction. See also: 581 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 582 */ 583 write_iciallu 584 isb 585 b 1f 586 587 .align 5 588 .global thread_excp_vect_workaround 589thread_excp_vect_workaround: 590 vector_prologue_spectre 591 write_tpidrprw r0 592 mrs r0, spsr 593 cmp_spsr_user_mode r0 594 bne 1f 595 /* Invalidate the branch predictor for the current processor. */ 596 write_bpiall 597 isb 598 5991: and r0, sp, #(BIT(0) | BIT(1) | BIT(2)) 600 bic sp, sp, #(BIT(0) | BIT(1) | BIT(2)) 601 add pc, pc, r0, LSL #3 602 nop 603 604 read_tpidrprw r0 605 b __thread_fiq_handler /* FIQ */ 606 read_tpidrprw r0 607 b __thread_irq_handler /* IRQ */ 608 read_tpidrprw r0 609 b . /* Reserved */ 610 read_tpidrprw r0 611 b __thread_dabort_handler /* Data abort */ 612 read_tpidrprw r0 613 b __thread_pabort_handler /* Prefetch abort */ 614 read_tpidrprw r0 615 b __thread_svc_handler /* System call */ 616 read_tpidrprw r0 617 b __thread_und_handler /* Undefined instruction */ 618 read_tpidrprw r0 619 b . /* Reset */ 620#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 621 622__thread_und_handler: 623 cpsid aif 624 maybe_restore_mapping 625 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 626 mrs r1, spsr 627 tst r1, #CPSR_T 628 subne lr, lr, #2 629 subeq lr, lr, #4 630 mov r0, #ABORT_TYPE_UNDEF 631 b __thread_abort_common 632 633__thread_dabort_handler: 634 cpsid aif 635 maybe_restore_mapping 636 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 637 sub lr, lr, #8 638 mov r0, #ABORT_TYPE_DATA 639 b __thread_abort_common 640 641__thread_pabort_handler: 642 cpsid aif 643 maybe_restore_mapping 644 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 645 sub lr, lr, #4 646 mov r0, #ABORT_TYPE_PREFETCH 647 648__thread_abort_common: 649 /* 650 * At this label: 651 * cpsr is in mode undef or abort 652 * sp is still pointing to struct thread_core_local belonging to 653 * this core. 654 * {r0, r1} are saved in struct thread_core_local pointed to by sp 655 * {r2-r11, ip} are untouched. 656 * r0 holds the first argument for abort_handler() 657 */ 658 659 /* 660 * Update core local flags. 661 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 662 */ 663 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 664 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 665 orr r1, r1, #THREAD_CLF_ABORT 666 667 /* 668 * Select stack and update flags accordingly 669 * 670 * Normal case: 671 * If the abort stack is unused select that. 672 * 673 * Fatal error handling: 674 * If we're already using the abort stack as noted by bit 675 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags 676 * field we're selecting the temporary stack instead to be able to 677 * make a stack trace of the abort in abort mode. 678 * 679 * r1 is initialized as a temporary stack pointer until we've 680 * switched to system mode. 681 */ 682 tst r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 683 orrne r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 684 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 685 ldrne r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 686 ldreq r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 687 688 /* 689 * Store registers on stack fitting struct thread_abort_regs 690 * start from the end of the struct 691 * {r2-r11, ip} 692 * Load content of previously saved {r0-r1} and stores 693 * it up to the pad field. 694 * After this is only {usr_sp, usr_lr} missing in the struct 695 */ 696 stmdb r1!, {r2-r11, ip} /* Push on the selected stack */ 697 ldrd r2, r3, [sp, #THREAD_CORE_LOCAL_R0] 698 /* Push the original {r0-r1} on the selected stack */ 699 stmdb r1!, {r2-r3} 700 mrs r3, spsr 701 /* Push {pad, spsr, elr} on the selected stack */ 702 stmdb r1!, {r2, r3, lr} 703 704 cps #CPSR_MODE_SYS 705 str lr, [r1, #-4]! 706 str sp, [r1, #-4]! 707 mov sp, r1 708 709 bl abort_handler 710 711 mov ip, sp 712 ldr sp, [ip], #4 713 ldr lr, [ip], #4 714 715 /* 716 * Even if we entered via CPSR_MODE_UND, we are returning via 717 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned 718 * here. 719 */ 720 cps #CPSR_MODE_ABT 721 ldm ip!, {r0, r1, lr} /* r0 is pad */ 722 msr spsr_fsxc, r1 723 724 /* Update core local flags */ 725 ldr r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 726 lsr r0, r0, #THREAD_CLF_SAVED_SHIFT 727 str r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 728 729 cmp_spsr_user_mode r1 730 ldm ip, {r0-r11, ip} 731 movsne pc, lr 732 b eret_to_user_mode 733 /* end thread_abort_common */ 734 735__thread_svc_handler: 736 cpsid aif 737 738 maybe_restore_mapping 739 740 push {r0-r7, lr} 741 mrs r0, spsr 742 push {r0} 743 mov r0, sp 744 bl thread_svc_handler 745 cpsid aif /* In case something was unmasked */ 746 pop {r0} 747 msr spsr_fsxc, r0 748 cmp_spsr_user_mode r0 749 pop {r0-r7, lr} 750 movsne pc, lr 751 b eret_to_user_mode 752 /* end thread_svc_handler */ 753 754__thread_fiq_handler: 755#if defined(CFG_ARM_GICV3) 756 foreign_intr_handler fiq 757#else 758 native_intr_handler fiq 759#endif 760 /* end thread_fiq_handler */ 761 762__thread_irq_handler: 763#if defined(CFG_ARM_GICV3) 764 native_intr_handler irq 765#else 766 foreign_intr_handler irq 767#endif 768 /* end thread_irq_handler */ 769 770 /* 771 * Returns to user mode. 772 * Expects to be jumped to with lr pointing to the user space 773 * address to jump to and spsr holding the desired cpsr. Async 774 * abort, irq and fiq should be masked. 775 */ 776eret_to_user_mode: 777 write_tpidrprw r0 778#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 779 write_tpidruro r1 780#endif 781 782#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 783 ldr r0, =thread_user_kcode_offset 784 ldr r0, [r0] 785 read_vbar r1 786 sub r1, r1, r0 787 write_vbar r1 788 isb 789 790 /* Jump into the reduced mapping before the full mapping is removed */ 791 ldr r1, =1f 792 sub r1, r1, r0 793 bx r1 7941: 795#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 796 797#ifdef CFG_WITH_LPAE 798 read_ttbr0_64bit r0, r1 799#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 800 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 801#endif 802 /* switch to user ASID */ 803 orr r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 804 write_ttbr0_64bit r0, r1 805 isb 806#else /*!CFG_WITH_LPAE*/ 807#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 808 read_ttbcr r0 809 orr r0, r0, #TTBCR_PD1 810 write_ttbcr r0 811 isb 812#endif 813 read_contextidr r0 814 orr r0, r0, #BIT(0) 815 write_contextidr r0 816 isb 817#endif /*!CFG_WITH_LPAE*/ 818 819 read_tpidrprw r0 820#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 821 read_tpidruro r1 822#endif 823 824 movs pc, lr 825 826 /* 827 * void icache_inv_user_range(void *addr, size_t size); 828 * 829 * This function has to execute with the user space ASID active, 830 * this means executing with reduced mapping and the code needs 831 * to be located here together with the vector. 832 */ 833 .global icache_inv_user_range 834 .type icache_inv_user_range , %function 835icache_inv_user_range: 836 push {r4-r7} 837 838 /* Mask all exceptions */ 839 mrs r4, cpsr /* This register must be preserved */ 840 cpsid aif 841 842#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 843 ldr r2, =thread_user_kcode_offset 844 ldr r2, [r2] 845 read_vbar r5 /* This register must be preserved */ 846 sub r3, r5, r2 847 write_vbar r3 848 isb 849 850 /* Jump into the reduced mapping before the full mapping is removed */ 851 ldr r3, =1f 852 sub r3, r3, r2 853 bx r3 8541: 855#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 856 857#ifdef CFG_WITH_LPAE 858 read_ttbr0_64bit r6, r7 /* These registers must be preseved */ 859#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 860 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 861#endif 862 /* switch to user ASID */ 863 orr r3, r7, #BIT(TTBR_ASID_SHIFT - 32) 864 write_ttbr0_64bit r2, r3 865 isb 866#else /*!CFG_WITH_LPAE*/ 867#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 868 read_ttbcr r6 /* This register must be preserved */ 869 orr r2, r6, #TTBCR_PD1 870 write_ttbcr r2 871 isb 872#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 873 read_contextidr r7 /* This register must be preserved */ 874 orr r2, r7, #BIT(0) 875 write_contextidr r2 876 isb 877#endif /*!CFG_WITH_LPAE*/ 878 879 /* 880 * Do the actual icache invalidation 881 */ 882 883 /* Calculate minimum icache line size, result in r2 */ 884 read_ctr r3 885 and r3, r3, #CTR_IMINLINE_MASK 886 mov r2, #CTR_WORD_SIZE 887 lsl r2, r2, r3 888 889 add r1, r0, r1 890 sub r3, r2, #1 891 bic r0, r0, r3 8921: 893 write_icimvau r0 894 add r0, r0, r2 895 cmp r0, r1 896 blo 1b 897 898 /* Invalidate entire branch predictor array inner shareable */ 899 write_bpiallis 900 901 dsb ishst 902 isb 903 904#ifdef CFG_WITH_LPAE 905 write_ttbr0_64bit r6, r7 906 isb 907#else /*!CFG_WITH_LPAE*/ 908 write_contextidr r7 909 isb 910#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 911 write_ttbcr r6 912 isb 913#endif 914#endif /*!CFG_WITH_LPAE*/ 915 916#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 917 write_vbar r5 918 isb 919 /* 920 * The PC is adjusted unconditionally to guard against the 921 * case there was an FIQ just before we did the "cpsid aif". 922 */ 923 ldr r0, =1f 924 bx r0 9251: 926#endif 927 928 msr cpsr_fsxc, r4 /* Restore exceptions */ 929 pop {r4-r7} 930 bx lr /* End of icache_inv_user_range() */ 931 932 /* 933 * Make sure that literals are placed before the 934 * thread_excp_vect_end label. 935 */ 936 .pool 937UNWIND( .fnend) 938 .global thread_excp_vect_end 939thread_excp_vect_end: 940END_FUNC thread_excp_vect 941