1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016-2017, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/abort.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_defs.h> 15#include <kernel/unwind.h> 16#include <mm/core_mmu.h> 17 18#include "thread_private.h" 19 20 .syntax unified 21 .arch_extension sec 22 23 .macro cmp_spsr_user_mode reg:req 24 /* 25 * We're only testing the lower 4 bits as bit 5 (0x10) 26 * always is set. 27 */ 28 tst \reg, #0x0f 29 .endm 30 31FUNC thread_set_abt_sp , : 32UNWIND( .fnstart) 33UNWIND( .cantunwind) 34 mrs r1, cpsr 35 cps #CPSR_MODE_ABT 36 mov sp, r0 37 msr cpsr, r1 38 bx lr 39UNWIND( .fnend) 40END_FUNC thread_set_abt_sp 41 42FUNC thread_set_und_sp , : 43UNWIND( .fnstart) 44UNWIND( .cantunwind) 45 mrs r1, cpsr 46 cps #CPSR_MODE_UND 47 mov sp, r0 48 msr cpsr, r1 49 bx lr 50UNWIND( .fnend) 51END_FUNC thread_set_und_sp 52 53FUNC thread_set_irq_sp , : 54UNWIND( .fnstart) 55UNWIND( .cantunwind) 56 mrs r1, cpsr 57 cps #CPSR_MODE_IRQ 58 mov sp, r0 59 msr cpsr, r1 60 bx lr 61UNWIND( .fnend) 62END_FUNC thread_set_irq_sp 63 64FUNC thread_set_fiq_sp , : 65UNWIND( .fnstart) 66UNWIND( .cantunwind) 67 mrs r1, cpsr 68 cps #CPSR_MODE_FIQ 69 mov sp, r0 70 msr cpsr, r1 71 bx lr 72UNWIND( .fnend) 73END_FUNC thread_set_fiq_sp 74 75/* void thread_resume(struct thread_ctx_regs *regs) */ 76FUNC thread_resume , : 77UNWIND( .fnstart) 78UNWIND( .cantunwind) 79 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 80 81 cps #CPSR_MODE_SYS 82 ldr sp, [r12], #4 83 ldr lr, [r12], #4 84 85 cps #CPSR_MODE_SVC 86 ldr r1, [r12], #4 87 ldr sp, [r12], #4 88 ldr lr, [r12], #4 89 msr spsr_fsxc, r1 90 91 ldm r12, {r1, r2} 92 93 /* 94 * Switching to some other mode than SVC as we need to set spsr in 95 * order to return into the old state properly and it may be SVC 96 * mode we're returning to. 97 */ 98 cps #CPSR_MODE_ABT 99 cmp_spsr_user_mode r2 100 mov lr, r1 101 msr spsr_fsxc, r2 102 ldm r0, {r0-r12} 103 movsne pc, lr 104 b eret_to_user_mode 105UNWIND( .fnend) 106END_FUNC thread_resume 107 108/* 109 * Disables IRQ and FIQ and saves state of thread in fiq mode which has 110 * the banked r8-r12 registers, returns original CPSR. 111 */ 112LOCAL_FUNC thread_save_state_fiq , : 113UNWIND( .fnstart) 114UNWIND( .cantunwind) 115 mov r9, lr 116 117 /* 118 * Uses stack for temporary storage, while storing needed 119 * context in the thread context struct. 120 */ 121 122 mrs r8, cpsr 123 124 cpsid aif /* Disable Async abort, IRQ and FIQ */ 125 126 push {r4-r7} 127 push {r0-r3} 128 129 mrs r6, cpsr /* Save current CPSR */ 130 131 bl thread_get_ctx_regs 132 133 pop {r1-r4} /* r0-r3 pushed above */ 134 stm r0!, {r1-r4} 135 pop {r1-r4} /* r4-r7 pushed above */ 136 stm r0!, {r1-r4} 137 138 cps #CPSR_MODE_SYS 139 stm r0!, {r8-r12} 140 str sp, [r0], #4 141 str lr, [r0], #4 142 143 cps #CPSR_MODE_SVC 144 mrs r1, spsr 145 str r1, [r0], #4 146 str sp, [r0], #4 147 str lr, [r0], #4 148 149 /* back to fiq mode */ 150 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 151 msr cpsr, r6 /* Restore mode */ 152 153 mov r0, r8 /* Return original CPSR */ 154 bx r9 155UNWIND( .fnend) 156END_FUNC thread_save_state_fiq 157 158/* 159 * Disables IRQ and FIQ and saves state of thread, returns original 160 * CPSR. 161 */ 162FUNC thread_save_state , : 163UNWIND( .fnstart) 164UNWIND( .cantunwind) 165 push {r12, lr} 166 /* 167 * Uses stack for temporary storage, while storing needed 168 * context in the thread context struct. 169 */ 170 171 mrs r12, cpsr 172 173 cpsid aif /* Disable Async abort, IRQ and FIQ */ 174 175 push {r4-r7} 176 push {r0-r3} 177 178 mov r5, r12 /* Save CPSR in a preserved register */ 179 mrs r6, cpsr /* Save current CPSR */ 180 181 bl thread_get_ctx_regs 182 183 pop {r1-r4} /* r0-r3 pushed above */ 184 stm r0!, {r1-r4} 185 pop {r1-r4} /* r4-r7 pushed above */ 186 stm r0!, {r1-r4} 187 stm r0!, {r8-r11} 188 189 pop {r12, lr} 190 stm r0!, {r12} 191 192 cps #CPSR_MODE_SYS 193 str sp, [r0], #4 194 str lr, [r0], #4 195 196 cps #CPSR_MODE_SVC 197 mrs r1, spsr 198 str r1, [r0], #4 199 str sp, [r0], #4 200 str lr, [r0], #4 201 202 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 203 msr cpsr, r6 /* Restore mode */ 204 205 mov r0, r5 /* Return original CPSR */ 206 bx lr 207UNWIND( .fnend) 208END_FUNC thread_save_state 209 210/* 211 * unsigned long thread_smc(unsigned long func_id, unsigned long a1, 212 * unsigned long a2, unsigned long a3) 213 */ 214FUNC thread_smc , : 215UNWIND( .fnstart) 216 smc #0 217 bx lr 218UNWIND( .fnend) 219END_FUNC thread_smc 220 221FUNC thread_init_vbar , : 222UNWIND( .fnstart) 223 /* Set vector (VBAR) */ 224 write_vbar r0 225 bx lr 226UNWIND( .fnend) 227END_FUNC thread_init_vbar 228KEEP_PAGER thread_init_vbar 229 230/* 231 * Below are low level routines handling entry and return from user mode. 232 * 233 * thread_enter_user_mode() saves all that registers user mode can change 234 * so kernel mode can restore needed registers when resuming execution 235 * after the call to thread_enter_user_mode() has returned. 236 * thread_enter_user_mode() doesn't return directly since it enters user 237 * mode instead, it's thread_unwind_user_mode() that does the 238 * returning by restoring the registers saved by thread_enter_user_mode(). 239 * 240 * There's three ways for thread_enter_user_mode() to return to caller, 241 * user TA calls utee_return, user TA calls utee_panic or through an abort. 242 * 243 * Calls to utee_return or utee_panic are handled as: 244 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which 245 * calls syscall_return() or syscall_panic(). 246 * 247 * These function calls returns normally except thread_svc_handler() which 248 * which is an exception handling routine so it reads return address and 249 * SPSR to restore from the stack. syscall_return() and syscall_panic() 250 * changes return address and SPSR used by thread_svc_handler() to instead of 251 * returning into user mode as with other syscalls it returns into 252 * thread_unwind_user_mode() in kernel mode instead. When 253 * thread_svc_handler() returns the stack pointer at the point where 254 * thread_enter_user_mode() left it so this is where 255 * thread_unwind_user_mode() can operate. 256 * 257 * Aborts are handled in a similar way but by thread_abort_handler() 258 * instead, when the pager sees that it's an abort from user mode that 259 * can't be handled it updates SPSR and return address used by 260 * thread_abort_handler() to return into thread_unwind_user_mode() 261 * instead. 262 */ 263 264/* 265 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, 266 * uint32_t *exit_status0, 267 * uint32_t *exit_status1); 268 * 269 * This function depends on being called with exceptions masked. 270 */ 271FUNC __thread_enter_user_mode , : 272UNWIND( .fnstart) 273UNWIND( .cantunwind) 274 /* 275 * Save all registers to allow syscall_return() to resume execution 276 * as if this function would have returned. This is also used in 277 * syscall_panic(). 278 * 279 * If stack usage of this function is changed 280 * thread_unwind_user_mode() has to be updated. 281 */ 282 push {r4-r12,lr} 283 284 /* 285 * Save old user sp and set new user sp. 286 */ 287 cps #CPSR_MODE_SYS 288 mov r4, sp 289 ldr sp, [r0, #THREAD_CTX_REGS_USR_SP] 290 cps #CPSR_MODE_SVC 291 292 push {r1, r2, r4, r5} 293 294 /* Prepare user mode entry via eret_to_user_mode */ 295 ldr lr, [r0, #THREAD_CTX_REGS_PC] 296 ldr r4, [r0, #THREAD_CTX_REGS_CPSR] 297 msr spsr_fsxc, r4 298 299 ldm r0, {r0-r12} 300 301 b eret_to_user_mode 302UNWIND( .fnend) 303END_FUNC __thread_enter_user_mode 304 305/* 306 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 307 * uint32_t exit_status1); 308 * See description in thread.h 309 */ 310FUNC thread_unwind_user_mode , : 311UNWIND( .fnstart) 312UNWIND( .cantunwind) 313 /* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */ 314 pop {r4-r7} 315 str r1, [r4] 316 str r2, [r5] 317 318 /* Restore old user sp */ 319 cps #CPSR_MODE_SYS 320 mov sp, r6 321 cps #CPSR_MODE_SVC 322 323 /* Match push {r4-r12,lr} in thread_enter_user_mode() */ 324 pop {r4-r12,pc} 325UNWIND( .fnend) 326END_FUNC thread_unwind_user_mode 327 328 .macro maybe_restore_mapping 329 /* 330 * This macro is a bit hard to read due to all the ifdefs, 331 * we're testing for two different configs which makes four 332 * different combinations. 333 * 334 * - With LPAE, and then some extra code if with 335 * CFG_CORE_UNMAP_CORE_AT_EL0 336 * - Without LPAE, and then some extra code if with 337 * CFG_CORE_UNMAP_CORE_AT_EL0 338 */ 339 340 /* 341 * At this point we can't rely on any memory being writable 342 * yet, so we're using TPIDRPRW to store r0, and if with 343 * LPAE TPIDRURO to store r1 too. 344 */ 345 write_tpidrprw r0 346#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 347 write_tpidruro r1 348#endif 349 350#ifdef CFG_WITH_LPAE 351 read_ttbr0_64bit r0, r1 352 tst r1, #BIT(TTBR_ASID_SHIFT - 32) 353 beq 11f 354 355#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 356 /* 357 * Update the mapping to use the full kernel mode mapping. 358 * Since the translation table could reside above 4GB we'll 359 * have to use 64-bit arithmetics. 360 */ 361 subs r0, r0, #CORE_MMU_L1_TBL_OFFSET 362 sbc r1, r1, #0 363#endif 364 bic r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 365 write_ttbr0_64bit r0, r1 366 isb 367 368#else /*!CFG_WITH_LPAE*/ 369 read_contextidr r0 370 tst r0, #1 371 beq 11f 372 373 /* Update the mapping to use the full kernel mode mapping. */ 374 bic r0, r0, #1 375 write_contextidr r0 376 isb 377#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 378 read_ttbr1 r0 379 sub r0, r0, #CORE_MMU_L1_TBL_OFFSET 380 write_ttbr1 r0 381 isb 382#endif 383 384#endif /*!CFG_WITH_LPAE*/ 385 386#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 387 ldr r0, =thread_user_kcode_offset 388 ldr r0, [r0] 389 read_vbar r1 390 add r1, r1, r0 391 write_vbar r1 392 isb 393 394 11: /* 395 * The PC is adjusted unconditionally to guard against the 396 * case there was an FIQ just before we did the "cpsid aif". 397 */ 398 ldr r0, =22f 399 bx r0 400 22: 401#else 402 11: 403#endif 404 read_tpidrprw r0 405#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 406 read_tpidruro r1 407#endif 408 .endm 409 410/* The handler of native interrupt. */ 411.macro native_intr_handler mode:req 412 cpsid aif 413 maybe_restore_mapping 414 415 /* 416 * FIQ and IRQ have a +4 offset for lr compared to preferred return 417 * address 418 */ 419 sub lr, lr, #4 420 421 /* 422 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also. 423 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ 424 * because the secure monitor doesn't save those. The treatment of 425 * the banked fiq registers is somewhat analogous to the lazy save 426 * of VFP registers. 427 */ 428 .ifc \mode\(),fiq 429 push {r0-r3, r8-r12, lr} 430 .else 431 push {r0-r3, r12, lr} 432 .endif 433 434 bl thread_check_canaries 435 bl itr_core_handler 436 437 mrs r0, spsr 438 cmp_spsr_user_mode r0 439 440 .ifc \mode\(),fiq 441 pop {r0-r3, r8-r12, lr} 442 .else 443 pop {r0-r3, r12, lr} 444 .endif 445 446 movsne pc, lr 447 b eret_to_user_mode 448.endm 449 450/* The handler of foreign interrupt. */ 451.macro foreign_intr_handler mode:req 452 cpsid aif 453 maybe_restore_mapping 454 455 sub lr, lr, #4 456 push {r12} 457 458 .ifc \mode\(),fiq 459 /* 460 * If a foreign (non-secure) interrupt is received as a FIQ we need 461 * to check that we're in a saveable state or if we need to mask 462 * the interrupt to be handled later. 463 * 464 * The window when this is needed is quite narrow, it's between 465 * entering the exception vector and until the "cpsid" instruction 466 * of the handler has been executed. 467 * 468 * Currently we can save the state properly if the FIQ is received 469 * while in user or svc (kernel) mode. 470 * 471 * If we're returning to abort, undef or irq mode we're returning 472 * with the mapping restored. This is OK since before the handler 473 * we're returning to eventually returns to user mode the reduced 474 * mapping will be restored. 475 */ 476 mrs r12, spsr 477 and r12, r12, #ARM32_CPSR_MODE_MASK 478 cmp r12, #ARM32_CPSR_MODE_USR 479 cmpne r12, #ARM32_CPSR_MODE_SVC 480 beq 1f 481 mrs r12, spsr 482 orr r12, r12, #ARM32_CPSR_F 483 msr spsr_fsxc, r12 484 pop {r12} 485 movs pc, lr 4861: 487 .endif 488 489 push {lr} 490 491 .ifc \mode\(),fiq 492 bl thread_save_state_fiq 493 .else 494 bl thread_save_state 495 .endif 496 497#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 498 /* 499 * Prevent leaking information about which entries has been used in 500 * cache. We're relying on the secure monitor/dispatcher to take 501 * care of the BTB. 502 */ 503 mov r0, #DCACHE_OP_CLEAN_INV 504 bl dcache_op_louis 505 write_iciallu 506#endif 507 508 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 509 mrs r1, spsr 510 pop {r2} 511 pop {r12} 512 blx thread_state_suspend 513 514 /* 515 * Switch to SVC mode and copy current stack pointer as it already 516 * is the tmp stack. 517 */ 518 mov r1, sp 519 cps #CPSR_MODE_SVC 520 mov sp, r1 521 522 /* Passing thread index in r0 */ 523 b thread_foreign_intr_exit 524.endm 525 526 .section .text.thread_excp_vect 527 .align 5 528FUNC thread_excp_vect , : 529UNWIND( .fnstart) 530UNWIND( .cantunwind) 531 b . /* Reset */ 532 b __thread_und_handler /* Undefined instruction */ 533 b __thread_svc_handler /* System call */ 534 b __thread_pabort_handler /* Prefetch abort */ 535 b __thread_dabort_handler /* Data abort */ 536 b . /* Reserved */ 537 b __thread_irq_handler /* IRQ */ 538 b __thread_fiq_handler /* FIQ */ 539#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 540 .macro vector_prologue_spectre 541 /* 542 * This depends on SP being 8 byte aligned, that is, the 543 * lowest three bits in SP are zero. 544 * 545 * To avoid unexpected speculation we need to invalidate 546 * the branch predictor before we do the first branch. It 547 * doesn't matter if it's a conditional or an unconditional 548 * branch speculation can still occur. 549 * 550 * The idea is to form a specific bit pattern in the lowest 551 * three bits of SP depending on which entry in the vector 552 * we enter via. This is done by adding 1 to SP in each 553 * entry but the last. 554 */ 555 add sp, sp, #1 /* 7:111 Reset */ 556 add sp, sp, #1 /* 6:110 Undefined instruction */ 557 add sp, sp, #1 /* 5:101 Secure monitor call */ 558 add sp, sp, #1 /* 4:100 Prefetch abort */ 559 add sp, sp, #1 /* 3:011 Data abort */ 560 add sp, sp, #1 /* 2:010 Reserved */ 561 add sp, sp, #1 /* 1:001 IRQ */ 562 cpsid aif /* 0:000 FIQ */ 563 .endm 564 565 .align 5 566 .global thread_excp_vect_workaround_a15 567thread_excp_vect_workaround_a15: 568 vector_prologue_spectre 569 write_tpidrprw r0 570 mrs r0, spsr 571 cmp_spsr_user_mode r0 572 bne 1f 573 /* 574 * Invalidate the branch predictor for the current processor. 575 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 576 * effective. 577 * Note that the BPIALL instruction is not effective in 578 * invalidating the branch predictor on Cortex-A15. For that CPU, 579 * set ACTLR[0] to 1 during early processor initialisation, and 580 * invalidate the branch predictor by performing an ICIALLU 581 * instruction. See also: 582 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 583 */ 584 write_iciallu 585 isb 586 b 1f 587 588 .align 5 589 .global thread_excp_vect_workaround 590thread_excp_vect_workaround: 591 vector_prologue_spectre 592 write_tpidrprw r0 593 mrs r0, spsr 594 cmp_spsr_user_mode r0 595 bne 1f 596 /* Invalidate the branch predictor for the current processor. */ 597 write_bpiall 598 isb 599 6001: and r0, sp, #(BIT(0) | BIT(1) | BIT(2)) 601 bic sp, sp, #(BIT(0) | BIT(1) | BIT(2)) 602 add pc, pc, r0, LSL #3 603 nop 604 605 read_tpidrprw r0 606 b __thread_fiq_handler /* FIQ */ 607 read_tpidrprw r0 608 b __thread_irq_handler /* IRQ */ 609 read_tpidrprw r0 610 b . /* Reserved */ 611 read_tpidrprw r0 612 b __thread_dabort_handler /* Data abort */ 613 read_tpidrprw r0 614 b __thread_pabort_handler /* Prefetch abort */ 615 read_tpidrprw r0 616 b __thread_svc_handler /* System call */ 617 read_tpidrprw r0 618 b __thread_und_handler /* Undefined instruction */ 619 read_tpidrprw r0 620 b . /* Reset */ 621#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 622 623__thread_und_handler: 624 cpsid aif 625 maybe_restore_mapping 626 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 627 mrs r1, spsr 628 tst r1, #CPSR_T 629 subne lr, lr, #2 630 subeq lr, lr, #4 631 mov r0, #ABORT_TYPE_UNDEF 632 b __thread_abort_common 633 634__thread_dabort_handler: 635 cpsid aif 636 maybe_restore_mapping 637 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 638 sub lr, lr, #8 639 mov r0, #ABORT_TYPE_DATA 640 b __thread_abort_common 641 642__thread_pabort_handler: 643 cpsid aif 644 maybe_restore_mapping 645 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 646 sub lr, lr, #4 647 mov r0, #ABORT_TYPE_PREFETCH 648 649__thread_abort_common: 650 /* 651 * At this label: 652 * cpsr is in mode undef or abort 653 * sp is still pointing to struct thread_core_local belonging to 654 * this core. 655 * {r0, r1} are saved in struct thread_core_local pointed to by sp 656 * {r2-r11, ip} are untouched. 657 * r0 holds the first argument for abort_handler() 658 */ 659 660 /* 661 * Update core local flags. 662 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 663 */ 664 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 665 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 666 orr r1, r1, #THREAD_CLF_ABORT 667 668 /* 669 * Select stack and update flags accordingly 670 * 671 * Normal case: 672 * If the abort stack is unused select that. 673 * 674 * Fatal error handling: 675 * If we're already using the abort stack as noted by bit 676 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags 677 * field we're selecting the temporary stack instead to be able to 678 * make a stack trace of the abort in abort mode. 679 * 680 * r1 is initialized as a temporary stack pointer until we've 681 * switched to system mode. 682 */ 683 tst r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 684 orrne r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 685 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 686 ldrne r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 687 ldreq r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 688 689 /* 690 * Store registers on stack fitting struct thread_abort_regs 691 * start from the end of the struct 692 * {r2-r11, ip} 693 * Load content of previously saved {r0-r1} and stores 694 * it up to the pad field. 695 * After this is only {usr_sp, usr_lr} missing in the struct 696 */ 697 stmdb r1!, {r2-r11, ip} /* Push on the selected stack */ 698 ldrd r2, r3, [sp, #THREAD_CORE_LOCAL_R0] 699 /* Push the original {r0-r1} on the selected stack */ 700 stmdb r1!, {r2-r3} 701 mrs r3, spsr 702 /* Push {pad, spsr, elr} on the selected stack */ 703 stmdb r1!, {r2, r3, lr} 704 705 cps #CPSR_MODE_SYS 706 str lr, [r1, #-4]! 707 str sp, [r1, #-4]! 708 mov sp, r1 709 710 bl abort_handler 711 712 mov ip, sp 713 ldr sp, [ip], #4 714 ldr lr, [ip], #4 715 716 /* 717 * Even if we entered via CPSR_MODE_UND, we are returning via 718 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned 719 * here. 720 */ 721 cps #CPSR_MODE_ABT 722 ldm ip!, {r0, r1, lr} /* r0 is pad */ 723 msr spsr_fsxc, r1 724 725 /* Update core local flags */ 726 ldr r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 727 lsr r0, r0, #THREAD_CLF_SAVED_SHIFT 728 str r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 729 730 cmp_spsr_user_mode r1 731 ldm ip, {r0-r11, ip} 732 movsne pc, lr 733 b eret_to_user_mode 734 /* end thread_abort_common */ 735 736__thread_svc_handler: 737 cpsid aif 738 739 maybe_restore_mapping 740 741 push {r0-r7, lr} 742 mrs r0, spsr 743 push {r0} 744 mov r0, sp 745 bl thread_svc_handler 746 cpsid aif /* In case something was unmasked */ 747 pop {r0} 748 msr spsr_fsxc, r0 749 cmp_spsr_user_mode r0 750 pop {r0-r7, lr} 751 movsne pc, lr 752 b eret_to_user_mode 753 /* end thread_svc_handler */ 754 755__thread_fiq_handler: 756#if defined(CFG_ARM_GICV3) 757 foreign_intr_handler fiq 758#else 759 native_intr_handler fiq 760#endif 761 /* end thread_fiq_handler */ 762 763__thread_irq_handler: 764#if defined(CFG_ARM_GICV3) 765 native_intr_handler irq 766#else 767 foreign_intr_handler irq 768#endif 769 /* end thread_irq_handler */ 770 771 /* 772 * Returns to user mode. 773 * Expects to be jumped to with lr pointing to the user space 774 * address to jump to and spsr holding the desired cpsr. Async 775 * abort, irq and fiq should be masked. 776 */ 777eret_to_user_mode: 778 write_tpidrprw r0 779#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 780 write_tpidruro r1 781#endif 782 783#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 784 ldr r0, =thread_user_kcode_offset 785 ldr r0, [r0] 786 read_vbar r1 787 sub r1, r1, r0 788 write_vbar r1 789 isb 790 791 /* Jump into the reduced mapping before the full mapping is removed */ 792 ldr r1, =1f 793 sub r1, r1, r0 794 bx r1 7951: 796#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 797 798#ifdef CFG_WITH_LPAE 799 read_ttbr0_64bit r0, r1 800#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 801 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 802#endif 803 /* switch to user ASID */ 804 orr r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 805 write_ttbr0_64bit r0, r1 806 isb 807#else /*!CFG_WITH_LPAE*/ 808#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 809 read_ttbr1 r0 810 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 811 write_ttbr1 r0 812 isb 813#endif 814 read_contextidr r0 815 orr r0, r0, #BIT(0) 816 write_contextidr r0 817 isb 818#endif /*!CFG_WITH_LPAE*/ 819 820 read_tpidrprw r0 821#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 822 read_tpidruro r1 823#endif 824 825 movs pc, lr 826 827 /* 828 * void icache_inv_user_range(void *addr, size_t size); 829 * 830 * This function has to execute with the user space ASID active, 831 * this means executing with reduced mapping and the code needs 832 * to be located here together with the vector. 833 */ 834 .global icache_inv_user_range 835 .type icache_inv_user_range , %function 836icache_inv_user_range: 837 push {r4-r7} 838 839 /* Mask all exceptions */ 840 mrs r4, cpsr /* This register must be preserved */ 841 cpsid aif 842 843#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 844 ldr r2, =thread_user_kcode_offset 845 ldr r2, [r2] 846 read_vbar r5 /* This register must be preserved */ 847 sub r3, r5, r2 848 write_vbar r3 849 isb 850 851 /* Jump into the reduced mapping before the full mapping is removed */ 852 ldr r3, =1f 853 sub r3, r3, r2 854 bx r3 8551: 856#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 857 858#ifdef CFG_WITH_LPAE 859 read_ttbr0_64bit r6, r7 /* These registers must be preseved */ 860#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 861 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 862#endif 863 /* switch to user ASID */ 864 orr r3, r7, #BIT(TTBR_ASID_SHIFT - 32) 865 write_ttbr0_64bit r2, r3 866 isb 867#else /*!CFG_WITH_LPAE*/ 868#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 869 read_ttbr1 r6 /* This register must be preserved */ 870 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 871 write_ttbr1 r2 872 isb 873#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 874 read_contextidr r7 /* This register must be preserved */ 875 orr r2, r7, #BIT(0) 876 write_contextidr r2 877 isb 878#endif /*!CFG_WITH_LPAE*/ 879 880 /* 881 * Do the actual icache invalidation 882 */ 883 884 /* Calculate minimum icache line size, result in r2 */ 885 read_ctr r3 886 and r3, r3, #CTR_IMINLINE_MASK 887 mov r2, #CTR_WORD_SIZE 888 lsl r2, r2, r3 889 890 add r1, r0, r1 891 sub r3, r2, #1 892 bic r0, r0, r3 8931: 894 write_icimvau r0 895 add r0, r0, r2 896 cmp r0, r1 897 blo 1b 898 899 /* Invalidate entire branch predictor array inner shareable */ 900 write_bpiallis 901 902 dsb ishst 903 isb 904 905#ifdef CFG_WITH_LPAE 906 write_ttbr0_64bit r6, r7 907 isb 908#else /*!CFG_WITH_LPAE*/ 909 write_contextidr r7 910 isb 911#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 912 write_ttbr1 r6 913 isb 914#endif 915#endif /*!CFG_WITH_LPAE*/ 916 917#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 918 write_vbar r5 919 isb 920 /* 921 * The PC is adjusted unconditionally to guard against the 922 * case there was an FIQ just before we did the "cpsid aif". 923 */ 924 ldr r0, =1f 925 bx r0 9261: 927#endif 928 929 msr cpsr_fsxc, r4 /* Restore exceptions */ 930 pop {r4-r7} 931 bx lr /* End of icache_inv_user_range() */ 932 933 /* 934 * Make sure that literals are placed before the 935 * thread_excp_vect_end label. 936 */ 937 .pool 938UNWIND( .fnend) 939 .global thread_excp_vect_end 940thread_excp_vect_end: 941END_FUNC thread_excp_vect 942