1/* 2 * Copyright (c) 2016-2017, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <arm32_macros.S> 30#include <arm.h> 31#include <asm-defines.h> 32#include <asm.S> 33#include <keep.h> 34#include <kernel/abort.h> 35#include <kernel/thread_defs.h> 36#include <kernel/unwind.h> 37#include <sm/optee_smc.h> 38#include <sm/teesmc_opteed.h> 39#include <sm/teesmc_opteed_macros.h> 40 41#include "thread_private.h" 42 43 .section .text.thread_asm 44 45LOCAL_FUNC vector_std_smc_entry , : 46UNWIND( .fnstart) 47UNWIND( .cantunwind) 48 push {r0-r7} 49 mov r0, sp 50 bl thread_handle_std_smc 51 /* 52 * Normally thread_handle_std_smc() should return via 53 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 54 * hasn't switched stack (error detected) it will do a normal "C" 55 * return. 56 */ 57 pop {r1-r8} 58 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 59 smc #0 60 b . /* SMC should not return */ 61UNWIND( .fnend) 62END_FUNC vector_std_smc_entry 63 64LOCAL_FUNC vector_fast_smc_entry , : 65UNWIND( .fnstart) 66UNWIND( .cantunwind) 67 push {r0-r7} 68 mov r0, sp 69 bl thread_handle_fast_smc 70 pop {r1-r8} 71 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 72 smc #0 73 b . /* SMC should not return */ 74UNWIND( .fnend) 75END_FUNC vector_fast_smc_entry 76 77LOCAL_FUNC vector_fiq_entry , : 78UNWIND( .fnstart) 79UNWIND( .cantunwind) 80 /* Secure Monitor received a FIQ and passed control to us. */ 81 bl thread_check_canaries 82 ldr lr, =thread_nintr_handler_ptr 83 ldr lr, [lr] 84 blx lr 85 mov r1, r0 86 ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE 87 smc #0 88 b . /* SMC should not return */ 89UNWIND( .fnend) 90END_FUNC vector_fiq_entry 91 92LOCAL_FUNC vector_cpu_on_entry , : 93UNWIND( .fnstart) 94UNWIND( .cantunwind) 95 ldr lr, =thread_cpu_on_handler_ptr 96 ldr lr, [lr] 97 blx lr 98 mov r1, r0 99 ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE 100 smc #0 101 b . /* SMC should not return */ 102UNWIND( .fnend) 103END_FUNC vector_cpu_on_entry 104 105LOCAL_FUNC vector_cpu_off_entry , : 106UNWIND( .fnstart) 107UNWIND( .cantunwind) 108 ldr lr, =thread_cpu_off_handler_ptr 109 ldr lr, [lr] 110 blx lr 111 mov r1, r0 112 ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE 113 smc #0 114 b . /* SMC should not return */ 115UNWIND( .fnend) 116END_FUNC vector_cpu_off_entry 117 118LOCAL_FUNC vector_cpu_suspend_entry , : 119UNWIND( .fnstart) 120UNWIND( .cantunwind) 121 ldr lr, =thread_cpu_suspend_handler_ptr 122 ldr lr, [lr] 123 blx lr 124 mov r1, r0 125 ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 126 smc #0 127 b . /* SMC should not return */ 128UNWIND( .fnend) 129END_FUNC vector_cpu_suspend_entry 130 131LOCAL_FUNC vector_cpu_resume_entry , : 132UNWIND( .fnstart) 133UNWIND( .cantunwind) 134 ldr lr, =thread_cpu_resume_handler_ptr 135 ldr lr, [lr] 136 blx lr 137 mov r1, r0 138 ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE 139 smc #0 140 b . /* SMC should not return */ 141UNWIND( .fnend) 142END_FUNC vector_cpu_resume_entry 143 144LOCAL_FUNC vector_system_off_entry , : 145UNWIND( .fnstart) 146UNWIND( .cantunwind) 147 ldr lr, =thread_system_off_handler_ptr 148 ldr lr, [lr] 149 blx lr 150 mov r1, r0 151 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 152 smc #0 153 b . /* SMC should not return */ 154UNWIND( .fnend) 155END_FUNC vector_system_off_entry 156 157LOCAL_FUNC vector_system_reset_entry , : 158UNWIND( .fnstart) 159UNWIND( .cantunwind) 160 ldr lr, =thread_system_reset_handler_ptr 161 ldr lr, [lr] 162 blx lr 163 mov r1, r0 164 ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 165 smc #0 166 b . /* SMC should not return */ 167UNWIND( .fnend) 168END_FUNC vector_system_reset_entry 169 170/* 171 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 172 * initialization. Also used when compiled with the internal monitor, but 173 * the cpu_*_entry and system_*_entry are not used then. 174 * 175 * Note that ARM-TF depends on the layout of this vector table, any change 176 * in layout has to be synced with ARM-TF. 177 */ 178FUNC thread_vector_table , : 179UNWIND( .fnstart) 180UNWIND( .cantunwind) 181 b vector_std_smc_entry 182 b vector_fast_smc_entry 183 b vector_cpu_on_entry 184 b vector_cpu_off_entry 185 b vector_cpu_resume_entry 186 b vector_cpu_suspend_entry 187 b vector_fiq_entry 188 b vector_system_off_entry 189 b vector_system_reset_entry 190UNWIND( .fnend) 191END_FUNC thread_vector_table 192 193FUNC thread_set_abt_sp , : 194UNWIND( .fnstart) 195UNWIND( .cantunwind) 196 mrs r1, cpsr 197 cps #CPSR_MODE_ABT 198 mov sp, r0 199 msr cpsr, r1 200 bx lr 201UNWIND( .fnend) 202END_FUNC thread_set_abt_sp 203 204FUNC thread_set_und_sp , : 205UNWIND( .fnstart) 206UNWIND( .cantunwind) 207 mrs r1, cpsr 208 cps #CPSR_MODE_UND 209 mov sp, r0 210 msr cpsr, r1 211 bx lr 212UNWIND( .fnend) 213END_FUNC thread_set_abt_sp 214 215FUNC thread_set_irq_sp , : 216UNWIND( .fnstart) 217UNWIND( .cantunwind) 218 mrs r1, cpsr 219 cps #CPSR_MODE_IRQ 220 mov sp, r0 221 msr cpsr, r1 222 bx lr 223UNWIND( .fnend) 224END_FUNC thread_set_irq_sp 225 226FUNC thread_set_fiq_sp , : 227UNWIND( .fnstart) 228UNWIND( .cantunwind) 229 mrs r1, cpsr 230 cps #CPSR_MODE_FIQ 231 mov sp, r0 232 msr cpsr, r1 233 bx lr 234UNWIND( .fnend) 235END_FUNC thread_set_fiq_sp 236 237/* void thread_resume(struct thread_ctx_regs *regs) */ 238FUNC thread_resume , : 239UNWIND( .fnstart) 240UNWIND( .cantunwind) 241 add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */ 242 243 cps #CPSR_MODE_SYS 244 ldm r12!, {sp, lr} 245 246 cps #CPSR_MODE_SVC 247 ldm r12!, {r1, sp, lr} 248 msr spsr_fsxc, r1 249 250 cps #CPSR_MODE_SVC 251 ldm r12, {r1, r2} 252 push {r1, r2} 253 254 ldm r0, {r0-r12} 255 256 /* Restore CPSR and jump to the instruction to resume at */ 257 rfefd sp! 258UNWIND( .fnend) 259END_FUNC thread_resume 260 261/* 262 * Disables IRQ and FIQ and saves state of thread in fiq mode which has 263 * the banked r8-r12 registers, returns original CPSR. 264 */ 265LOCAL_FUNC thread_save_state_fiq , : 266UNWIND( .fnstart) 267UNWIND( .cantunwind) 268 mov r9, lr 269 270 /* 271 * Uses stack for temporary storage, while storing needed 272 * context in the thread context struct. 273 */ 274 275 mrs r8, cpsr 276 277 cpsid aif /* Disable Async abort, IRQ and FIQ */ 278 279 push {r4-r7} 280 push {r0-r3} 281 282 mrs r6, cpsr /* Save current CPSR */ 283 284 bl thread_get_ctx_regs 285 286 pop {r1-r4} /* r0-r3 pushed above */ 287 stm r0!, {r1-r4} 288 pop {r1-r4} /* r4-r7 pushed above */ 289 stm r0!, {r1-r4} 290 291 cps #CPSR_MODE_SYS 292 stm r0!, {r8-r12} 293 stm r0!, {sp, lr} 294 295 cps #CPSR_MODE_SVC 296 mrs r1, spsr 297 stm r0!, {r1, sp, lr} 298 299 /* back to fiq mode */ 300 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 301 msr cpsr, r6 /* Restore mode */ 302 303 mov r0, r8 /* Return original CPSR */ 304 bx r9 305UNWIND( .fnend) 306END_FUNC thread_save_state_fiq 307 308/* 309 * Disables IRQ and FIQ and saves state of thread, returns original 310 * CPSR. 311 */ 312LOCAL_FUNC thread_save_state , : 313UNWIND( .fnstart) 314UNWIND( .cantunwind) 315 push {r12, lr} 316 /* 317 * Uses stack for temporary storage, while storing needed 318 * context in the thread context struct. 319 */ 320 321 mrs r12, cpsr 322 323 cpsid aif /* Disable Async abort, IRQ and FIQ */ 324 325 push {r4-r7} 326 push {r0-r3} 327 328 mov r5, r12 /* Save CPSR in a preserved register */ 329 mrs r6, cpsr /* Save current CPSR */ 330 331 bl thread_get_ctx_regs 332 333 pop {r1-r4} /* r0-r3 pushed above */ 334 stm r0!, {r1-r4} 335 pop {r1-r4} /* r4-r7 pushed above */ 336 stm r0!, {r1-r4} 337 stm r0!, {r8-r11} 338 339 pop {r12, lr} 340 stm r0!, {r12} 341 342 cps #CPSR_MODE_SYS 343 stm r0!, {sp, lr} 344 345 cps #CPSR_MODE_SVC 346 mrs r1, spsr 347 stm r0!, {r1, sp, lr} 348 349 orr r6, r6, #ARM32_CPSR_FIA /* Disable Async abort, IRQ and FIQ */ 350 msr cpsr, r6 /* Restore mode */ 351 352 mov r0, r5 /* Return original CPSR */ 353 bx lr 354UNWIND( .fnend) 355END_FUNC thread_save_state 356 357FUNC thread_std_smc_entry , : 358UNWIND( .fnstart) 359UNWIND( .cantunwind) 360 /* Pass r0-r7 in a struct thread_smc_args */ 361 push {r0-r7} 362 mov r0, sp 363 bl __thread_std_smc_entry 364 /* 365 * Load the returned r0-r3 into preserved registers and skip the 366 * "returned" r4-r7 since they will not be returned to normal 367 * world. 368 */ 369 pop {r4-r7} 370 add sp, #(4 * 4) 371 372 /* Disable interrupts before switching to temporary stack */ 373 cpsid aif 374 bl thread_get_tmp_sp 375 mov sp, r0 376 377 bl thread_state_free 378 379 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 380 mov r1, r4 381 mov r2, r5 382 mov r3, r6 383 mov r4, r7 384 smc #0 385 b . /* SMC should not return */ 386UNWIND( .fnend) 387END_FUNC thread_std_smc_entry 388 389 390/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 391FUNC thread_rpc , : 392/* 393 * r0-r2 are used to pass parameters to normal world 394 * r0-r5 are used to pass return vaule back from normal world 395 * 396 * note that r3 is used to pass "resume information", that is, which 397 * thread it is that should resume. 398 * 399 * Since the this function is following AAPCS we need to preserve r4-r5 400 * which are otherwise modified when returning back from normal world. 401 */ 402UNWIND( .fnstart) 403 push {r4-r5, lr} 404UNWIND( .save {r4-r5, lr}) 405 push {r0} 406UNWIND( .save {r0}) 407 408 bl thread_save_state 409 mov r4, r0 /* Save original CPSR */ 410 411 /* 412 * Switch to temporary stack and SVC mode. Save CPSR to resume into. 413 */ 414 bl thread_get_tmp_sp 415 ldr r5, [sp] /* Get pointer to rv[] */ 416 cps #CPSR_MODE_SVC /* Change to SVC mode */ 417 mov sp, r0 /* Switch to tmp stack */ 418 419 mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 420 mov r1, r4 /* CPSR to restore */ 421 ldr r2, =.thread_rpc_return 422 bl thread_state_suspend 423 mov r4, r0 /* Supply thread index */ 424 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 425 ldm r5, {r1-r3} /* Load rv[] into r0-r2 */ 426 smc #0 427 b . /* SMC should not return */ 428 429.thread_rpc_return: 430 /* 431 * At this point has the stack pointer been restored to the value 432 * it had when thread_save_state() was called above. 433 * 434 * Jumps here from thread_resume above when RPC has returned. The 435 * IRQ and FIQ bits are restored to what they where when this 436 * function was originally entered. 437 */ 438 pop {r12} /* Get pointer to rv[] */ 439 stm r12, {r0-r5} /* Store r0-r5 into rv[] */ 440 pop {r4-r5, pc} 441UNWIND( .fnend) 442END_FUNC thread_rpc 443 444/* The handler of native interrupt. */ 445.macro native_intr_handler mode:req 446 /* 447 * FIQ and IRQ have a +4 offset for lr compared to preferred return 448 * address 449 */ 450 sub lr, lr, #4 451 452 /* 453 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also. 454 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ 455 * because the secure monitor doesn't save those. The treatment of 456 * the banked fiq registers is somewhat analogous to the lazy save 457 * of VFP registers. 458 */ 459 .ifc \mode\(),fiq 460 push {r0-r3, r8-r12, lr} 461 .else 462 push {r0-r3, r12, lr} 463 .endif 464 bl thread_check_canaries 465 ldr lr, =thread_nintr_handler_ptr 466 ldr lr, [lr] 467 blx lr 468 .ifc \mode\(),fiq 469 pop {r0-r3, r8-r12, lr} 470 .else 471 pop {r0-r3, r12, lr} 472 .endif 473 movs pc, lr 474.endm 475 476/* The handler of foreign interrupt. */ 477.macro foreign_intr_handler mode:req 478 .ifc \mode\(),irq 479 /* 480 * Disable FIQ if the foreign interrupt is sent as IRQ. 481 * IRQ mode is set up to use tmp stack so FIQ has to be 482 * disabled before touching the stack. We can also assign 483 * SVC sp from IRQ sp to get SVC mode into the state we 484 * need when doing the SMC below. 485 * If it is sent as FIQ, the IRQ has already been masked by hardware 486 */ 487 cpsid f 488 .endif 489 sub lr, lr, #4 490 push {lr} 491 push {r12} 492 493 .ifc \mode\(),fiq 494 bl thread_save_state_fiq 495 .else 496 bl thread_save_state 497 .endif 498 499 mov r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR 500 mrs r1, spsr 501 pop {r12} 502 pop {r2} 503 blx thread_state_suspend 504 mov r4, r0 /* Supply thread index */ 505 506 /* 507 * Switch to SVC mode and copy current stack pointer as it already 508 * is the tmp stack. 509 */ 510 mov r0, sp 511 cps #CPSR_MODE_SVC 512 mov sp, r0 513 514 ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE 515 ldr r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR 516 mov r2, #0 517 mov r3, #0 518 /* r4 is already filled in above */ 519 smc #0 520 b . /* SMC should not return */ 521.endm 522 523LOCAL_FUNC thread_fiq_handler , : 524UNWIND( .fnstart) 525UNWIND( .cantunwind) 526#if defined(CFG_ARM_GICV3) 527 foreign_intr_handler fiq 528#else 529 native_intr_handler fiq 530#endif 531UNWIND( .fnend) 532END_FUNC thread_fiq_handler 533 534LOCAL_FUNC thread_irq_handler , : 535UNWIND( .fnstart) 536UNWIND( .cantunwind) 537#if defined(CFG_ARM_GICV3) 538 native_intr_handler irq 539#else 540 foreign_intr_handler irq 541#endif 542UNWIND( .fnend) 543END_FUNC thread_irq_handler 544 545FUNC thread_init_vbar , : 546UNWIND( .fnstart) 547 /* Set vector (VBAR) */ 548 ldr r0, =thread_vect_table 549 write_vbar r0 550 bx lr 551UNWIND( .fnend) 552END_FUNC thread_init_vbar 553KEEP_PAGER thread_init_vbar 554 555/* 556 * Below are low level routines handling entry and return from user mode. 557 * 558 * thread_enter_user_mode() saves all that registers user mode can change 559 * so kernel mode can restore needed registers when resuming execution 560 * after the call to thread_enter_user_mode() has returned. 561 * thread_enter_user_mode() doesn't return directly since it enters user 562 * mode instead, it's thread_unwind_user_mode() that does the 563 * returning by restoring the registers saved by thread_enter_user_mode(). 564 * 565 * There's three ways for thread_enter_user_mode() to return to caller, 566 * user TA calls utee_return, user TA calls utee_panic or through an abort. 567 * 568 * Calls to utee_return or utee_panic are handled as: 569 * thread_svc_handler() -> tee_svc_handler() -> tee_svc_do_call() which 570 * calls syscall_return() or syscall_panic(). 571 * 572 * These function calls returns normally except thread_svc_handler() which 573 * which is an exception handling routine so it reads return address and 574 * SPSR to restore from the stack. syscall_return() and syscall_panic() 575 * changes return address and SPSR used by thread_svc_handler() to instead of 576 * returning into user mode as with other syscalls it returns into 577 * thread_unwind_user_mode() in kernel mode instead. When 578 * thread_svc_handler() returns the stack pointer at the point where 579 * thread_enter_user_mode() left it so this is where 580 * thread_unwind_user_mode() can operate. 581 * 582 * Aborts are handled in a similar way but by thread_abort_handler() 583 * instead, when the pager sees that it's an abort from user mode that 584 * can't be handled it updates SPSR and return address used by 585 * thread_abort_handler() to return into thread_unwind_user_mode() 586 * instead. 587 */ 588 589/* 590 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 591 * unsigned long a2, unsigned long a3, unsigned long user_sp, 592 * unsigned long user_func, unsigned long spsr, 593 * uint32_t *exit_status0, uint32_t *exit_status1) 594 * 595 */ 596FUNC __thread_enter_user_mode , : 597UNWIND( .fnstart) 598UNWIND( .cantunwind) 599 /* 600 * Save all registers to allow syscall_return() to resume execution 601 * as if this function would have returned. This is also used in 602 * syscall_panic(). 603 * 604 * If stack usage of this function is changed 605 * thread_unwind_user_mode() has to be updated. 606 */ 607 push {r4-r12,lr} 608 609 ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */ 610 ldr r5, [sp, #(11 * 0x4)] /* user function */ 611 ldr r6, [sp, #(12 * 0x4)] /* spsr */ 612 613 /* 614 * Set the saved Processors Status Register to user mode to allow 615 * entry of user mode through movs below. 616 */ 617 msr spsr_cxsf, r6 618 619 /* 620 * Save old user sp and set new user sp. 621 */ 622 cps #CPSR_MODE_SYS 623 mov r6, sp 624 mov sp, r4 625 cps #CPSR_MODE_SVC 626 push {r6,r7} 627 628 /* 629 * Don't allow return from this function, return is done through 630 * thread_unwind_user_mode() below. 631 */ 632 mov lr, #0 633 /* Call the user function with its arguments */ 634 movs pc, r5 635UNWIND( .fnend) 636END_FUNC __thread_enter_user_mode 637 638/* 639 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 640 * uint32_t exit_status1); 641 * See description in thread.h 642 */ 643FUNC thread_unwind_user_mode , : 644UNWIND( .fnstart) 645UNWIND( .cantunwind) 646 ldr ip, [sp, #(15 * 0x4)] /* &ctx->panicked */ 647 str r1, [ip] 648 ldr ip, [sp, #(16 * 0x4)] /* &ctx->panic_code */ 649 str r2, [ip] 650 651 /* Restore old user sp */ 652 pop {r4,r7} 653 cps #CPSR_MODE_SYS 654 mov sp, r4 655 cps #CPSR_MODE_SVC 656 657 pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/ 658UNWIND( .fnend) 659END_FUNC thread_unwind_user_mode 660 661LOCAL_FUNC thread_abort_handler , : 662thread_und_handler: 663UNWIND( .fnstart) 664UNWIND( .cantunwind) 665 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 666 mrs r1, spsr 667 tst r1, #CPSR_T 668 subne lr, lr, #2 669 subeq lr, lr, #4 670 mov r0, #ABORT_TYPE_UNDEF 671 b .thread_abort_generic 672 673thread_dabort_handler: 674 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 675 sub lr, lr, #8 676 mov r0, #ABORT_TYPE_DATA 677 b .thread_abort_generic 678 679thread_pabort_handler: 680 strd r0, r1, [sp, #THREAD_CORE_LOCAL_R0] 681 sub lr, lr, #4 682 mov r0, #ABORT_TYPE_PREFETCH 683 684.thread_abort_generic: 685 /* 686 * At this label: 687 * cpsr is in mode undef or abort 688 * sp is still pointing to struct thread_core_local belonging to 689 * this core. 690 * {r0, r1} are saved in struct thread_core_local pointed to by sp 691 * {r2-r11, ip} are untouched. 692 * r0 holds the first argument for abort_handler() 693 */ 694 695 /* 696 * Update core local flags. 697 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT; 698 */ 699 ldr r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 700 lsl r1, r1, #THREAD_CLF_SAVED_SHIFT 701 orr r1, r1, #THREAD_CLF_ABORT 702 703 /* 704 * Select stack and update flags accordingly 705 * 706 * Normal case: 707 * If the abort stack is unused select that. 708 * 709 * Fatal error handling: 710 * If we're already using the abort stack as noted by bit 711 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags 712 * field we're selecting the temporary stack instead to be able to 713 * make a stack trace of the abort in abort mode. 714 * 715 * r1 is initialized as a temporary stack pointer until we've 716 * switched to system mode. 717 */ 718 tst r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 719 orrne r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */ 720 str r1, [sp, #THREAD_CORE_LOCAL_FLAGS] 721 ldrne r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END] 722 ldreq r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END] 723 724 /* 725 * Store registers on stack fitting struct thread_abort_regs 726 * start from the end of the struct 727 * {r2-r11, ip} 728 * Load content of previously saved {r0-r1} and stores 729 * it up to the pad field. 730 * After this is only {usr_sp, usr_lr} missing in the struct 731 */ 732 stmdb r1!, {r2-r11, ip} /* Push on the selected stack */ 733 ldrd r2, r3, [sp, #THREAD_CORE_LOCAL_R0] 734 /* Push the original {r0-r1} on the selected stack */ 735 stmdb r1!, {r2-r3} 736 mrs r3, spsr 737 /* Push {pad, spsr, elr} on the selected stack */ 738 stmdb r1!, {r2, r3, lr} 739 740 cps #CPSR_MODE_SYS 741 str lr, [r1, #-4]! 742 str sp, [r1, #-4]! 743 mov sp, r1 744 745 bl abort_handler 746 747 mov ip, sp 748 ldr sp, [ip], #4 749 ldr lr, [ip], #4 750 751 /* 752 * Even if we entered via CPSR_MODE_UND, we are returning via 753 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned 754 * here. 755 */ 756 cps #CPSR_MODE_ABT 757 ldm ip!, {r0, r1, lr} /* r0 is pad */ 758 msr spsr_fsxc, r1 759 760 /* Update core local flags */ 761 ldr r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 762 lsr r0, r0, #THREAD_CLF_SAVED_SHIFT 763 str r0, [sp, #THREAD_CORE_LOCAL_FLAGS] 764 765 ldm ip, {r0-r11, ip} 766 767 movs pc, lr 768UNWIND( .fnend) 769END_FUNC thread_abort_handler 770 771LOCAL_FUNC thread_svc_handler , : 772UNWIND( .fnstart) 773UNWIND( .cantunwind) 774 push {r0-r7, lr} 775 mrs r0, spsr 776 push {r0} 777 mov r0, sp 778 bl tee_svc_handler 779 pop {r0} 780 msr spsr_fsxc, r0 781 pop {r0-r7, lr} 782 movs pc, lr 783UNWIND( .fnend) 784END_FUNC thread_svc_handler 785 786 .align 5 787LOCAL_FUNC thread_vect_table , : 788UNWIND( .fnstart) 789UNWIND( .cantunwind) 790 b . /* Reset */ 791 b thread_und_handler /* Undefined instruction */ 792 b thread_svc_handler /* System call */ 793 b thread_pabort_handler /* Prefetch abort */ 794 b thread_dabort_handler /* Data abort */ 795 b . /* Reserved */ 796 b thread_irq_handler /* IRQ */ 797 b thread_fiq_handler /* FIQ */ 798UNWIND( .fnend) 799END_FUNC thread_vect_table 800