1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2016-2017, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/abort.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_defs.h> 15#include <kernel/unwind.h> 16#include <mm/core_mmu.h> 17 18#include "thread_private.h" 19 20 .syntax unified 21 .arch_extension sec 22 23 .macro cmp_spsr_user_mode reg:req 24 /* 25 * We're only testing the lower 4 bits as bit 5 (0x10) 26 * always is set. 27 */ 28 tst \reg, #0x0f 29 .endm 30 31FUNC thread_set_abt_sp , : 32UNWIND( .fnstart) 33UNWIND( .cantunwind) 34 mrs r1, cpsr 35 cps #CPSR_MODE_ABT 36 mov sp, r0 37 msr cpsr, r1 38 bx lr 39UNWIND( .fnend) 40END_FUNC thread_set_abt_sp 41 42FUNC thread_set_und_sp , : 43UNWIND( .fnstart) 44UNWIND( .cantunwind) 45 mrs r1, cpsr 46 cps #CPSR_MODE_UND 47 mov sp, r0 48 msr cpsr, r1 49 bx lr 50UNWIND( .fnend) 51END_FUNC thread_set_und_sp 52 53FUNC thread_set_irq_sp , : 54UNWIND( .fnstart) 55UNWIND( .cantunwind) 56 mrs r1, cpsr 57 cps #CPSR_MODE_IRQ 58 mov sp, r0 59 msr cpsr, r1 60 bx lr 61UNWIND( .fnend) 62END_FUNC thread_set_irq_sp 63 64FUNC thread_set_fiq_sp , : 65UNWIND( .fnstart) 66UNWIND( .cantunwind) 67 mrs r1, cpsr 68 cps #CPSR_MODE_FIQ 69 mov sp, r0 70 msr cpsr, r1 71 bx lr 72UNWIND( .fnend) 73END_FUNC thread_set_fiq_sp 74 75/* void thread_resume(struct thread_ctx_regs *regs) */ 76FUNC thread_resume , : 77UNWIND( .fnstart) 78UNWIND( .cantunwind) 79 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 80 81 cps #CPSR_MODE_SYS 82 ldr sp, [r12], #4 83 ldr lr, [r12], #4 84 85 cps #CPSR_MODE_SVC 86 ldr r1, [r12], #4 87 ldr sp, [r12], #4 88 ldr lr, [r12], #4 89 msr spsr_fsxc, r1 90 91 ldm r12, {r1, r2} 92 93 /* 94 * Switching to some other mode than SVC as we need to set spsr in 95 * order to return into the old state properly and it may be SVC 96 * mode we're returning to. 97 */ 98 cps #CPSR_MODE_ABT 99 cmp_spsr_user_mode r2 100 mov lr, r1 101 msr spsr_fsxc, r2 102 ldm r0, {r0-r12} 103 movsne pc, lr 104 b eret_to_user_mode 105UNWIND( .fnend) 106END_FUNC thread_resume 107 108/* 109 * Disables IRQ and FIQ and saves state of thread in fiq mode which has 110 * the banked r8-r12 registers, returns original CPSR. 111 */ 112LOCAL_FUNC thread_save_state_fiq , : 113UNWIND( .fnstart) 114UNWIND( .cantunwind) 115 mov r9, lr 116 117 /* 118 * Uses stack for temporary storage, while storing needed 119 * context in the thread context struct. 120 */ 121 122 mrs r8, cpsr 123 124 cpsid aif /* Disable Async abort, IRQ and FIQ */ 125 126 push {r4-r7} 127 push {r0-r3} 128 129 mrs r6, cpsr /* Save current CPSR */ 130 131 bl thread_get_ctx_regs 132 133 pop {r1-r4} /* r0-r3 pushed above */ 134 stm r0!, {r1-r4} 135 pop {r1-r4} /* r4-r7 pushed above */ 136 stm r0!, {r1-r4} 137 138 cps #CPSR_MODE_SYS 139 stm r0!, {r8-r12} 140 str sp, [r0], #4 141 str lr, [r0], #4 142 143 cps #CPSR_MODE_SVC 144 mrs r1, spsr 145 str r1, [r0], #4 146 str sp, [r0], #4 147 str lr, [r0], #4 148 149 /* back to fiq mode */ 150 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 151 msr cpsr, r6 /* Restore mode */ 152 153 mov r0, r8 /* Return original CPSR */ 154 bx r9 155UNWIND( .fnend) 156END_FUNC thread_save_state_fiq 157 158/* 159 * Disables IRQ and FIQ and saves state of thread, returns original 160 * CPSR. 161 */ 162FUNC thread_save_state , : 163UNWIND( .fnstart) 164UNWIND( .cantunwind) 165 push {r12, lr} 166 /* 167 * Uses stack for temporary storage, while storing needed 168 * context in the thread context struct. 169 */ 170 171 mrs r12, cpsr 172 173 cpsid aif /* Disable Async abort, IRQ and FIQ */ 174 175 push {r4-r7} 176 push {r0-r3} 177 178 mov r5, r12 /* Save CPSR in a preserved register */ 179 mrs r6, cpsr /* Save current CPSR */ 180 181 bl thread_get_ctx_regs 182 183 pop {r1-r4} /* r0-r3 pushed above */ 184 stm r0!, {r1-r4} 185 pop {r1-r4} /* r4-r7 pushed above */ 186 stm r0!, {r1-r4} 187 stm r0!, {r8-r11} 188 189 pop {r12, lr} 190 stm r0!, {r12} 191 192 cps #CPSR_MODE_SYS 193 str sp, [r0], #4 194 str lr, [r0], #4 195 196 cps #CPSR_MODE_SVC 197 mrs r1, spsr 198 str r1, [r0], #4 199 str sp, [r0], #4 200 str lr, [r0], #4 201 202 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 203 msr cpsr, r6 /* Restore mode */ 204 205 mov r0, r5 /* Return original CPSR */ 206 bx lr 207UNWIND( .fnend) 208END_FUNC thread_save_state 209 210/* 211 * unsigned long thread_smc(unsigned long func_id, unsigned long a1, 212 * unsigned long a2, unsigned long a3) 213 */ 214FUNC thread_smc , : 215UNWIND( .fnstart) 216 smc #0 217 bx lr 218UNWIND( .fnend) 219END_FUNC thread_smc 220 221FUNC thread_init_vbar , : 222UNWIND( .fnstart) 223 /* Set vector (VBAR) */ 224 write_vbar r0 225 bx lr 226UNWIND( .fnend) 227END_FUNC thread_init_vbar 228KEEP_PAGER thread_init_vbar 229 230/* 231 * Below are low level routines handling entry and return from user mode. 232 * 233 * thread_enter_user_mode() saves all that registers user mode can change 234 * so kernel mode can restore needed registers when resuming execution 235 * after the call to thread_enter_user_mode() has returned. 236 * thread_enter_user_mode() doesn't return directly since it enters user 237 * mode instead, it's thread_unwind_user_mode() that does the 238 * returning by restoring the registers saved by thread_enter_user_mode(). 239 * 240 * There's three ways for thread_enter_user_mode() to return to caller, 241 * user TA calls utee_return, user TA calls utee_panic or through an abort. 242 * 243 * Calls to utee_return or utee_panic are handled as: 244 * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which 245 * calls syscall_return() or syscall_panic(). 246 * 247 * These function calls returns normally except thread_svc_handler() which 248 * which is an exception handling routine so it reads return address and 249 * SPSR to restore from the stack. syscall_return() and syscall_panic() 250 * changes return address and SPSR used by thread_svc_handler() to instead of 251 * returning into user mode as with other syscalls it returns into 252 * thread_unwind_user_mode() in kernel mode instead. When 253 * thread_svc_handler() returns the stack pointer at the point where 254 * thread_enter_user_mode() left it so this is where 255 * thread_unwind_user_mode() can operate. 256 * 257 * Aborts are handled in a similar way but by thread_abort_handler() 258 * instead, when the pager sees that it's an abort from user mode that 259 * can't be handled it updates SPSR and return address used by 260 * thread_abort_handler() to return into thread_unwind_user_mode() 261 * instead. 262 */ 263 264/* 265 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 266 * unsigned long a2, unsigned long a3, unsigned long user_sp, 267 * unsigned long user_func, unsigned long spsr, 268 * uint32_t *exit_status0, uint32_t *exit_status1) 269 * 270 */ 271FUNC __thread_enter_user_mode , : 272UNWIND( .fnstart) 273UNWIND( .cantunwind) 274 /* 275 * Save all registers to allow syscall_return() to resume execution 276 * as if this function would have returned. This is also used in 277 * syscall_panic(). 278 * 279 * If stack usage of this function is changed 280 * thread_unwind_user_mode() has to be updated. 281 */ 282 push {r4-r12,lr} 283 284 ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */ 285 ldr r5, [sp, #(11 * 0x4)] /* user function */ 286 ldr r6, [sp, #(12 * 0x4)] /* spsr */ 287 288 /* 289 * Save old user sp and set new user sp. 290 */ 291 cps #CPSR_MODE_SYS 292 mov r7, sp 293 mov sp, r4 294 cps #CPSR_MODE_SVC 295 push {r7,r8} 296 297 /* Prepare user mode entry via eret_to_user_mode */ 298 cpsid aif 299 msr spsr_fsxc, r6 300 mov lr, r5 301 302 b eret_to_user_mode 303UNWIND( .fnend) 304END_FUNC __thread_enter_user_mode 305 306/* 307 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 308 * uint32_t exit_status1); 309 * See description in thread.h 310 */ 311FUNC thread_unwind_user_mode , : 312UNWIND( .fnstart) 313UNWIND( .cantunwind) 314 ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */ 315 str r1, [ip] 316 ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */ 317 str r2, [ip] 318 319 /* Restore old user sp */ 320 pop {r4,r7} 321 cps #CPSR_MODE_SYS 322 mov sp, r4 323 cps #CPSR_MODE_SVC 324 325 pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/ 326UNWIND( .fnend) 327END_FUNC thread_unwind_user_mode 328 329 .macro maybe_restore_mapping 330 /* 331 * This macro is a bit hard to read due to all the ifdefs, 332 * we're testing for two different configs which makes four 333 * different combinations. 334 * 335 * - With LPAE, and then some extra code if with 336 * CFG_CORE_UNMAP_CORE_AT_EL0 337 * - Without LPAE, and then some extra code if with 338 * CFG_CORE_UNMAP_CORE_AT_EL0 339 */ 340 341 /* 342 * At this point we can't rely on any memory being writable 343 * yet, so we're using TPIDRPRW to store r0, and if with 344 * LPAE TPIDRURO to store r1 too. 345 */ 346 write_tpidrprw r0 347#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 348 write_tpidruro r1 349#endif 350 351#ifdef CFG_WITH_LPAE 352 read_ttbr0_64bit r0, r1 353 tst r1, #BIT(TTBR_ASID_SHIFT - 32) 354 beq 11f 355 356#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 357 /* 358 * Update the mapping to use the full kernel mode mapping. 359 * Since the translation table could reside above 4GB we'll 360 * have to use 64-bit arithmetics. 361 */ 362 subs r0, r0, #CORE_MMU_L1_TBL_OFFSET 363 sbc r1, r1, #0 364#endif 365 bic r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 366 write_ttbr0_64bit r0, r1 367 isb 368 369#else /*!CFG_WITH_LPAE*/ 370 read_contextidr r0 371 tst r0, #1 372 beq 11f 373 374 /* Update the mapping to use the full kernel mode mapping. */ 375 bic r0, r0, #1 376 write_contextidr r0 377 isb 378#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 379 read_ttbr1 r0 380 sub r0, r0, #CORE_MMU_L1_TBL_OFFSET 381 write_ttbr1 r0 382 isb 383#endif 384 385#endif /*!CFG_WITH_LPAE*/ 386 387#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 388 ldr r0, =thread_user_kcode_offset 389 ldr r0, [r0] 390 read_vbar r1 391 add r1, r1, r0 392 write_vbar r1 393 isb 394 395 11: /* 396 * The PC is adjusted unconditionally to guard against the 397 * case there was an FIQ just before we did the "cpsid aif". 398 */ 399 ldr r0, =22f 400 bx r0 401 22: 402#else 403 11: 404#endif 405 read_tpidrprw r0 406#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 407 read_tpidruro r1 408#endif 409 .endm 410 411/* The handler of native interrupt. */ 412.macro native_intr_handler mode:req 413 cpsid aif 414 maybe_restore_mapping 415 416 /* 417 * FIQ and IRQ have a +4 offset for lr compared to preferred return 418 * address 419 */ 420 sub lr, lr, #4 421 422 /* 423 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also. 424 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ 425 * because the secure monitor doesn't save those. The treatment of 426 * the banked fiq registers is somewhat analogous to the lazy save 427 * of VFP registers. 428 */ 429 .ifc \mode\(),fiq 430 push {r0-r3, r8-r12, lr} 431 .else 432 push {r0-r3, r12, lr} 433 .endif 434 435 bl thread_check_canaries 436 bl itr_core_handler 437 438 mrs r0, spsr 439 cmp_spsr_user_mode r0 440 441 .ifc \mode\(),fiq 442 pop {r0-r3, r8-r12, lr} 443 .else 444 pop {r0-r3, r12, lr} 445 .endif 446 447 movsne pc, lr 448 b eret_to_user_mode 449.endm 450 451/* The handler of foreign interrupt. */ 452.macro foreign_intr_handler mode:req 453 cpsid aif 454 maybe_restore_mapping 455 456 sub lr, lr, #4 457 push {r12} 458 459 .ifc \mode\(),fiq 460 /* 461 * If a foreign (non-secure) interrupt is received as a FIQ we need 462 * to check that we're in a saveable state or if we need to mask 463 * the interrupt to be handled later. 464 * 465 * The window when this is needed is quite narrow, it's between 466 * entering the exception vector and until the "cpsid" instruction 467 * of the handler has been executed. 468 * 469 * Currently we can save the state properly if the FIQ is received 470 * while in user or svc (kernel) mode. 471 * 472 * If we're returning to abort, undef or irq mode we're returning 473 * with the mapping restored. This is OK since before the handler 474 * we're returning to eventually returns to user mode the reduced 475 * mapping will be restored. 476 */ 477 mrs r12, spsr 478 and r12, r12, #ARM32_CPSR_MODE_MASK 479 cmp r12, #ARM32_CPSR_MODE_USR 480 cmpne r12, #ARM32_CPSR_MODE_SVC 481 beq 1f 482 mrs r12, spsr 483 orr r12, r12, #ARM32_CPSR_F 484 msr spsr_fsxc, r12 485 pop {r12} 486 movs pc, lr 4871: 488 .endif 489 490 push {lr} 491 492 .ifc \mode\(),fiq 493 bl thread_save_state_fiq 494 .else 495 bl thread_save_state 496 .endif 497 498#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME 499 /* 500 * Prevent leaking information about which entries has been used in 501 * cache. We're relying on the secure monitor/dispatcher to take 502 * care of the BTB. 503 */ 504 mov r0, #DCACHE_OP_CLEAN_INV 505 bl dcache_op_louis 506 write_iciallu 507#endif 508 509 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 510 mrs r1, spsr 511 pop {r2} 512 pop {r12} 513 blx thread_state_suspend 514 515 /* 516 * Switch to SVC mode and copy current stack pointer as it already 517 * is the tmp stack. 518 */ 519 mov r1, sp 520 cps #CPSR_MODE_SVC 521 mov sp, r1 522 523 /* Passing thread index in r0 */ 524 b thread_foreign_intr_exit 525.endm 526 527 .section .text.thread_excp_vect 528 .align 5 529FUNC thread_excp_vect , : 530UNWIND( .fnstart) 531UNWIND( .cantunwind) 532 b . /* Reset */ 533 b thread_und_handler /* Undefined instruction */ 534 b thread_svc_handler /* System call */ 535 b thread_pabort_handler /* Prefetch abort */ 536 b thread_dabort_handler /* Data abort */ 537 b . /* Reserved */ 538 b thread_irq_handler /* IRQ */ 539 b thread_fiq_handler /* FIQ */ 540#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 541 .macro vector_prologue_spectre 542 /* 543 * This depends on SP being 8 byte aligned, that is, the 544 * lowest three bits in SP are zero. 545 * 546 * To avoid unexpected speculation we need to invalidate 547 * the branch predictor before we do the first branch. It 548 * doesn't matter if it's a conditional or an unconditional 549 * branch speculation can still occur. 550 * 551 * The idea is to form a specific bit pattern in the lowest 552 * three bits of SP depending on which entry in the vector 553 * we enter via. This is done by adding 1 to SP in each 554 * entry but the last. 555 */ 556 add sp, sp, #1 /* 7:111 Reset */ 557 add sp, sp, #1 /* 6:110 Undefined instruction */ 558 add sp, sp, #1 /* 5:101 Secure monitor call */ 559 add sp, sp, #1 /* 4:100 Prefetch abort */ 560 add sp, sp, #1 /* 3:011 Data abort */ 561 add sp, sp, #1 /* 2:010 Reserved */ 562 add sp, sp, #1 /* 1:001 IRQ */ 563 write_tpidrprw r0 /* 0:000 FIQ */ 564 .endm 565 566 .align 5 567 .global thread_excp_vect_workaround_a15 568thread_excp_vect_workaround_a15: 569 vector_prologue_spectre 570 mrs r0, spsr 571 cmp_spsr_user_mode r0 572 bne 1f 573 /* 574 * Invalidate the branch predictor for the current processor. 575 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be 576 * effective. 577 * Note that the BPIALL instruction is not effective in 578 * invalidating the branch predictor on Cortex-A15. For that CPU, 579 * set ACTLR[0] to 1 during early processor initialisation, and 580 * invalidate the branch predictor by performing an ICIALLU 581 * instruction. See also: 582 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715 583 */ 584 write_iciallu 585 isb 586 b 1f 587 588 .align 5 589 .global thread_excp_vect_workaround 590thread_excp_vect_workaround: 591 vector_prologue_spectre 592 mrs r0, spsr 593 cmp_spsr_user_mode r0 594 bne 1f 595 /* Invalidate the branch predictor for the current processor. */ 596 write_bpiall 597 isb 598 5991: and r0, sp, #(BIT(0) | BIT(1) | BIT(2)) 600 bic sp, sp, #(BIT(0) | BIT(1) | BIT(2)) 601 add pc, pc, r0, LSL #3 602 nop 603 604 read_tpidrprw r0 605 b thread_fiq_handler /* FIQ */ 606 read_tpidrprw r0 607 b thread_irq_handler /* IRQ */ 608 read_tpidrprw r0 609 b . /* Reserved */ 610 read_tpidrprw r0 611 b thread_dabort_handler /* Data abort */ 612 read_tpidrprw r0 613 b thread_pabort_handler /* Prefetch abort */ 614 read_tpidrprw r0 615 b thread_svc_handler /* System call */ 616 read_tpidrprw r0 617 b thread_und_handler /* Undefined instruction */ 618 read_tpidrprw r0 619 b . /* Reset */ 620#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 621 622thread_und_handler: 623 cpsid aif 624 maybe_restore_mapping 625 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 626 mrs r1, spsr 627 tst r1, #CPSR_T 628 subne lr, lr, #2 629 subeq lr, lr, #4 630 mov r0, #ABORT_TYPE_UNDEF 631 b thread_abort_common 632 633thread_dabort_handler: 634 cpsid aif 635 maybe_restore_mapping 636 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 637 sub lr, lr, #8 638 mov r0, #ABORT_TYPE_DATA 639 b thread_abort_common 640 641thread_pabort_handler: 642 cpsid aif 643 maybe_restore_mapping 644 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 645 sub lr, lr, #4 646 mov r0, #ABORT_TYPE_PREFETCH 647 648thread_abort_common: 649 /* 650 * At this label: 651 * cpsr is in mode undef or abort 652 * sp is still pointing to struct thread_core_local belonging to 653 * this core. 654 * {r0, r1} are saved in struct thread_core_local pointed to by sp 655 * {r2-r11, ip} are untouched. 656 * r0 holds the first argument for abort_handler() 657 */ 658 659 /* 660 * Update core local flags. 661 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 662 */ 663 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 664 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 665 orr r1, r1, #THREAD_CLF_ABORT 666 667 /* 668 * Select stack and update flags accordingly 669 * 670 * Normal case: 671 * If the abort stack is unused select that. 672 * 673 * Fatal error handling: 674 * If we're already using the abort stack as noted by bit 675 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags 676 * field we're selecting the temporary stack instead to be able to 677 * make a stack trace of the abort in abort mode. 678 * 679 * r1 is initialized as a temporary stack pointer until we've 680 * switched to system mode. 681 */ 682 tst r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 683 orrne r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 684 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 685 ldrne r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 686 ldreq r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 687 688 /* 689 * Store registers on stack fitting struct thread_abort_regs 690 * start from the end of the struct 691 * {r2-r11, ip} 692 * Load content of previously saved {r0-r1} and stores 693 * it up to the pad field. 694 * After this is only {usr_sp, usr_lr} missing in the struct 695 */ 696 stmdb r1!, {r2-r11, ip} /* Push on the selected stack */ 697 ldrd r2, r3, [sp, #THREAD_CORE_LOCAL_R0] 698 /* Push the original {r0-r1} on the selected stack */ 699 stmdb r1!, {r2-r3} 700 mrs r3, spsr 701 /* Push {pad, spsr, elr} on the selected stack */ 702 stmdb r1!, {r2, r3, lr} 703 704 cps #CPSR_MODE_SYS 705 str lr, [r1, #-4]! 706 str sp, [r1, #-4]! 707 mov sp, r1 708 709 bl abort_handler 710 711 mov ip, sp 712 ldr sp, [ip], #4 713 ldr lr, [ip], #4 714 715 /* 716 * Even if we entered via CPSR_MODE_UND, we are returning via 717 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned 718 * here. 719 */ 720 cps #CPSR_MODE_ABT 721 ldm ip!, {r0, r1, lr} /* r0 is pad */ 722 msr spsr_fsxc, r1 723 724 /* Update core local flags */ 725 ldr r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 726 lsr r0, r0, #THREAD_CLF_SAVED_SHIFT 727 str r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 728 729 cmp_spsr_user_mode r1 730 ldm ip, {r0-r11, ip} 731 movsne pc, lr 732 b eret_to_user_mode 733 /* end thread_abort_common */ 734 735thread_svc_handler: 736 cpsid aif 737 738 maybe_restore_mapping 739 740 push {r0-r7, lr} 741 mrs r0, spsr 742 push {r0} 743 mov r0, sp 744 bl tee_svc_handler 745 cpsid aif /* In case something was unmasked */ 746 pop {r0} 747 msr spsr_fsxc, r0 748 cmp_spsr_user_mode r0 749 pop {r0-r7, lr} 750 movsne pc, lr 751 b eret_to_user_mode 752 /* end thread_svc_handler */ 753 754thread_fiq_handler: 755#if defined(CFG_ARM_GICV3) 756 foreign_intr_handler fiq 757#else 758 native_intr_handler fiq 759#endif 760 /* end thread_fiq_handler */ 761 762thread_irq_handler: 763#if defined(CFG_ARM_GICV3) 764 native_intr_handler irq 765#else 766 foreign_intr_handler irq 767#endif 768 /* end thread_irq_handler */ 769 770 /* 771 * Returns to user mode. 772 * Expects to be jumped to with lr pointing to the user space 773 * address to jump to and spsr holding the desired cpsr. Async 774 * abort, irq and fiq should be masked. 775 */ 776eret_to_user_mode: 777 write_tpidrprw r0 778#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 779 write_tpidruro r1 780#endif 781 782#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 783 ldr r0, =thread_user_kcode_offset 784 ldr r0, [r0] 785 read_vbar r1 786 sub r1, r1, r0 787 write_vbar r1 788 isb 789 790 /* Jump into the reduced mapping before the full mapping is removed */ 791 ldr r1, =1f 792 sub r1, r1, r0 793 bx r1 7941: 795#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 796 797#ifdef CFG_WITH_LPAE 798 read_ttbr0_64bit r0, r1 799#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 800 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 801#endif 802 /* switch to user ASID */ 803 orr r1, r1, #BIT(TTBR_ASID_SHIFT - 32) 804 write_ttbr0_64bit r0, r1 805 isb 806#else /*!CFG_WITH_LPAE*/ 807#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 808 read_ttbr1 r0 809 add r0, r0, #CORE_MMU_L1_TBL_OFFSET 810 write_ttbr1 r0 811 isb 812#endif 813 read_contextidr r0 814 orr r0, r0, #BIT(0) 815 write_contextidr r0 816 isb 817#endif /*!CFG_WITH_LPAE*/ 818 819 read_tpidrprw r0 820#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE) 821 read_tpidruro r1 822#endif 823 824 movs pc, lr 825 826 /* 827 * void icache_inv_user_range(void *addr, size_t size); 828 * 829 * This function has to execute with the user space ASID active, 830 * this means executing with reduced mapping and the code needs 831 * to be located here together with the vector. 832 */ 833 .global icache_inv_user_range 834 .type icache_inv_user_range , %function 835icache_inv_user_range: 836 push {r4-r7} 837 838 /* Mask all exceptions */ 839 mrs r4, cpsr /* This register must be preserved */ 840 cpsid aif 841 842#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 843 ldr r2, =thread_user_kcode_offset 844 ldr r2, [r2] 845 read_vbar r5 /* This register must be preserved */ 846 sub r3, r5, r2 847 write_vbar r3 848 isb 849 850 /* Jump into the reduced mapping before the full mapping is removed */ 851 ldr r3, =1f 852 sub r3, r3, r2 853 bx r3 8541: 855#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 856 857#ifdef CFG_WITH_LPAE 858 read_ttbr0_64bit r6, r7 /* These registers must be preseved */ 859#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 860 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 861#endif 862 /* switch to user ASID */ 863 orr r3, r7, #BIT(TTBR_ASID_SHIFT - 32) 864 write_ttbr0_64bit r2, r3 865 isb 866#else /*!CFG_WITH_LPAE*/ 867#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 868 read_ttbr1 r6 /* This register must be preserved */ 869 add r2, r6, #CORE_MMU_L1_TBL_OFFSET 870 write_ttbr1 r2 871 isb 872#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 873 read_contextidr r7 /* This register must be preserved */ 874 orr r2, r7, #BIT(0) 875 write_contextidr r2 876 isb 877#endif /*!CFG_WITH_LPAE*/ 878 879 /* 880 * Do the actual icache invalidation 881 */ 882 883 /* Calculate minimum icache line size, result in r2 */ 884 read_ctr r3 885 and r3, r3, #CTR_IMINLINE_MASK 886 mov r2, #CTR_WORD_SIZE 887 lsl r2, r2, r3 888 889 add r1, r0, r1 890 sub r3, r2, #1 891 bic r0, r0, r3 8921: 893 write_icimvau r0 894 add r0, r0, r2 895 cmp r0, r1 896 blo 1b 897 898 /* Invalidate entire branch predictor array inner shareable */ 899 write_bpiallis 900 901 dsb ishst 902 isb 903 904#ifdef CFG_WITH_LPAE 905 write_ttbr0_64bit r6, r7 906 isb 907#else /*!CFG_WITH_LPAE*/ 908 write_contextidr r7 909 isb 910#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 911 write_ttbr1 r6 912 isb 913#endif 914#endif /*!CFG_WITH_LPAE*/ 915 916#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 917 write_vbar r5 918 isb 919 /* 920 * The PC is adjusted unconditionally to guard against the 921 * case there was an FIQ just before we did the "cpsid aif". 922 */ 923 ldr r0, =1f 924 bx r0 9251: 926#endif 927 928 msr cpsr_fsxc, r4 /* Restore exceptions */ 929 pop {r4-r7} 930 bx lr /* End of icache_inv_user_range() */ 931 932 /* 933 * Make sure that literals are placed before the 934 * thread_excp_vect_end label. 935 */ 936 .pool 937UNWIND( .fnend) 938 .global thread_excp_vect_end 939thread_excp_vect_end: 940END_FUNC thread_excp_vect 941