1/* 2 * Copyright (c) 2015, Linaro Limited 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <asm.S> 29#include <arm64_macros.S> 30#include <arm64.h> 31#include <sm/teesmc.h> 32#include <sm/teesmc_opteed_macros.h> 33#include <sm/teesmc_opteed.h> 34#include <kernel/thread_defs.h> 35#include <kernel/thread.h> 36#include "thread_private.h" 37 38 .macro get_thread_ctx core_local, res, tmp0, tmp1 39 ldr w\tmp0, [\core_local, \ 40 #THREAD_CORE_LOCAL_CURR_THREAD_OFFSET] 41 adr x\res, threads 42 mov x\tmp1, #THREAD_CTX_SIZE 43 madd x\res, x\tmp0, x\tmp1, x\res 44 .endm 45 46 .section .text.thread_asm 47LOCAL_FUNC vector_std_smc_entry , : 48 sub sp, sp, #THREAD_SMC_ARGS_SIZE 49 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 50 mov x0, sp 51 bl thread_handle_std_smc 52 /* 53 * Normally thread_handle_std_smc() should return via 54 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 55 * hasn't switched stack (error detected) it will do a normal "C" 56 * return. 57 */ 58 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8 59 add sp, sp, #THREAD_SMC_ARGS_SIZE 60 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 61 smc #0 62 b . /* SMC should not return */ 63END_FUNC vector_std_smc_entry 64 65LOCAL_FUNC vector_fast_smc_entry , : 66 sub sp, sp, #THREAD_SMC_ARGS_SIZE 67 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 68 mov x0, sp 69 bl thread_handle_fast_smc 70 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8 71 add sp, sp, #THREAD_SMC_ARGS_SIZE 72 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 73 smc #0 74 b . /* SMC should not return */ 75END_FUNC vector_fast_smc_entry 76 77LOCAL_FUNC vector_fiq_entry , : 78 /* Secure Monitor received a FIQ and passed control to us. */ 79 bl thread_check_canaries 80 adr x16, thread_fiq_handler_ptr 81 ldr x16, [x16] 82 blr x16 83 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 84 smc #0 85 b . /* SMC should not return */ 86END_FUNC vector_fiq_entry 87 88LOCAL_FUNC vector_cpu_on_entry , : 89 adr x16, thread_cpu_on_handler_ptr 90 ldr x16, [x16] 91 blr x16 92 mov x1, x0 93 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 94 smc #0 95 b . /* SMC should not return */ 96END_FUNC vector_cpu_on_entry 97 98LOCAL_FUNC vector_cpu_off_entry , : 99 adr x16, thread_cpu_off_handler_ptr 100 ldr x16, [x16] 101 blr x16 102 mov x1, x0 103 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 104 smc #0 105 b . /* SMC should not return */ 106END_FUNC vector_cpu_off_entry 107 108LOCAL_FUNC vector_cpu_suspend_entry , : 109 adr x16, thread_cpu_suspend_handler_ptr 110 ldr x16, [x16] 111 blr x16 112 mov x1, x0 113 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 114 smc #0 115 b . /* SMC should not return */ 116END_FUNC vector_cpu_suspend_entry 117 118LOCAL_FUNC vector_cpu_resume_entry , : 119 adr x16, thread_cpu_resume_handler_ptr 120 ldr x16, [x16] 121 blr x16 122 mov x1, x0 123 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 124 smc #0 125 b . /* SMC should not return */ 126END_FUNC vector_cpu_resume_entry 127 128LOCAL_FUNC vector_system_off_entry , : 129 adr x16, thread_system_off_handler_ptr 130 ldr x16, [x16] 131 blr x16 132 mov x1, x0 133 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 134 smc #0 135 b . /* SMC should not return */ 136END_FUNC vector_system_off_entry 137 138LOCAL_FUNC vector_system_reset_entry , : 139 adr x16, thread_system_reset_handler_ptr 140 ldr x16, [x16] 141 blr x16 142 mov x1, x0 143 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 144 smc #0 145 b . /* SMC should not return */ 146END_FUNC vector_system_reset_entry 147 148/* 149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 150 * initialization. 151 * 152 * Note that ARM-TF depends on the layout of this vector table, any change 153 * in layout has to be synced with ARM-TF. 154 */ 155FUNC thread_vector_table , : 156 b vector_std_smc_entry 157 b vector_fast_smc_entry 158 b vector_cpu_on_entry 159 b vector_cpu_off_entry 160 b vector_cpu_resume_entry 161 b vector_cpu_suspend_entry 162 b vector_fiq_entry 163 b vector_system_off_entry 164 b vector_system_reset_entry 165END_FUNC thread_vector_table 166 167 168/* void thread_resume(struct thread_ctx_regs *regs) */ 169FUNC thread_resume , : 170 load_xregs x0, THREAD_CTX_REGS_SP_OFFSET, 1, 3 171 mov sp, x1 172 msr elr_el1, x2 173 msr spsr_el1, x3 174 load_xregs x0, THREAD_CTX_REGS_X_OFFSET(1), 1, 30 175 ldr x0, [x0, THREAD_CTX_REGS_X_OFFSET(0)] 176 eret 177END_FUNC thread_resume 178 179FUNC thread_std_smc_entry , : 180 /* pass x0-x7 in a struct thread_smc_args */ 181 sub sp, sp, #THREAD_SMC_ARGS_SIZE 182 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 183 mov x0, sp 184 185 /* Call the registered handler */ 186 bl __thread_std_smc_entry 187 188 /* 189 * Load the returned x0-x3 into preserved registers and skip the 190 * "returned" x4-x7 since they will not be returned to normal 191 * world. 192 */ 193 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 20, 23 194 add sp, sp, #THREAD_SMC_ARGS_SIZE 195 196 /* Disable interrupts before switching to temporary stack */ 197 msr daifset, #(DAIFBIT_FIQ | DAIFBIT_IRQ) 198 bl thread_get_tmp_sp 199 mov sp, x0 200 201 bl thread_state_free 202 203 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 204 mov x1, x20 205 mov x2, x21 206 mov x3, x22 207 mov x4, x23 208 smc #0 209 b . /* SMC should not return */ 210END_FUNC thread_std_smc_entry 211 212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 213FUNC thread_rpc , : 214 /* Read daif and create an SPSR */ 215 mrs x1, daif 216 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 217 218 msr daifset, #DAIFBIT_ALL 219 push x0, xzr 220 push x1, x30 221 bl thread_get_ctx_regs 222 ldr x30, [sp, #8] 223 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(19), 19, 30 224 mov x19, x0 225 226 bl thread_get_tmp_sp 227 pop x1, xzr /* Match "push x1, x30" above */ 228 mov x2, sp 229 str x2, [x19, #THREAD_CTX_REGS_SP_OFFSET] 230 ldr x20, [sp] /* Get pointer to rv[] */ 231 mov sp, x0 /* Switch to tmp stack */ 232 233 adr x2, .thread_rpc_return 234 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 235 bl thread_state_suspend 236 mov x4, x0 /* Supply thread index */ 237 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 238 load_wregs x20, 0, 1, 3 /* Load rv[] into x0-x2 */ 239 smc #0 240 b . /* SMC should not return */ 241 242.thread_rpc_return: 243 /* 244 * At this point has the stack pointer been restored to the value 245 * stored in THREAD_CTX above. 246 * 247 * Jumps here from thread_resume above when RPC has returned. The 248 * IRQ and FIQ bits are restored to what they where when this 249 * function was originally entered. 250 */ 251 pop x4, xzr /* Get pointer to rv[] */ 252 store_wregs x4, 0, 0, 2 /* Store x0-x2 into rv[] */ 253 ret 254END_FUNC thread_rpc 255 256FUNC thread_init_vbar , : 257 adr x0, thread_vect_table 258 msr vbar_el1, x0 259 ret 260END_FUNC thread_init_vbar 261 262/* 263 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1, 264 * unsigned long a2, unsigned long a3, unsigned long user_sp, 265 * unsigned long user_func, unsigned long spsr, 266 * uint32_t *exit_status0, uint32_t *exit_status1) 267 * 268 */ 269FUNC __thread_enter_user_mode , : 270 ldr x8, [sp] 271 /* 272 * Create the and fill in the struct thread_user_mode_rec 273 */ 274 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 275 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET, 7, 8 276 store_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30 277 278 /* 279 * Switch to SP_EL1 280 * Disable exceptions 281 * Save kern sp in x19 282 */ 283 msr daifset, #DAIFBIT_ALL 284 mov x19, sp 285 msr spsel, #1 286 287 /* 288 * Save the kernel stack pointer in the thread context 289 */ 290 /* get pointer to current thread context */ 291 get_thread_ctx sp, 21, 20, 22 292 /* 293 * Save kernel stack pointer to ensure that el0_svc() uses 294 * correct stack pointer 295 */ 296 str x19, [x21, #THREAD_CTX_KERN_SP_OFFSET] 297 298 /* 299 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 300 */ 301 msr spsr_el1, x6 302 /* Set user sp */ 303 mov x13, x4 /* Used when running TA in Aarch32 */ 304 msr sp_el0, x4 /* Used when running TA in Aarch64 */ 305 /* Set user function */ 306 msr elr_el1, x5 307 308 /* Jump into user mode */ 309 eret 310END_FUNC __thread_enter_user_mode 311 312/* 313 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 314 * uint32_t exit_status1); 315 * See description in thread.h 316 */ 317FUNC thread_unwind_user_mode , : 318 /* Store the exit status */ 319 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET] 320 str w1, [x3] 321 str w2, [x4] 322 /* Restore x19..x30 */ 323 load_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30 324 add sp, sp, #THREAD_USER_MODE_REC_SIZE 325 /* Return from the call of thread_enter_user_mode() */ 326 ret 327END_FUNC thread_unwind_user_mode 328 329 /* 330 * This macro verifies that the a given vector doesn't exceed the 331 * architectural limit of 32 instructions. This is meant to be placed 332 * immedately after the last instruction in the vector. It takes the 333 * vector entry as the parameter 334 */ 335 .macro check_vector_size since 336 .if (. - \since) > (32 * 4) 337 .error "Vector exceeds 32 instructions" 338 .endif 339 .endm 340 341 342 .align 11 343LOCAL_FUNC thread_vect_table , : 344 /* ----------------------------------------------------- 345 * EL1 with SP0 : 0x0 - 0x180 346 * ----------------------------------------------------- 347 */ 348 .align 7 349sync_el1_sp0: 350 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 351 b el1_sync_abort 352 check_vector_size sync_el1_sp0 353 354 .align 7 355irq_el1_sp0: 356 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 357 b elx_irq 358 check_vector_size irq_el1_sp0 359 360 .align 7 361fiq_el1_sp0: 362 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 363 b elx_fiq 364 check_vector_size fiq_el1_sp0 365 366 .align 7 367SErrorSP0: 368 b SErrorSP0 369 check_vector_size SErrorSP0 370 371 /* ----------------------------------------------------- 372 * Current EL with SPx: 0x200 - 0x380 373 * ----------------------------------------------------- 374 */ 375 .align 7 376SynchronousExceptionSPx: 377 b SynchronousExceptionSPx 378 check_vector_size SynchronousExceptionSPx 379 380 .align 7 381IrqSPx: 382 b IrqSPx 383 check_vector_size IrqSPx 384 385 .align 7 386FiqSPx: 387 b FiqSPx 388 check_vector_size FiqSPx 389 390 .align 7 391SErrorSPx: 392 b SErrorSPx 393 check_vector_size SErrorSPx 394 395 /* ----------------------------------------------------- 396 * Lower EL using AArch64 : 0x400 - 0x580 397 * ----------------------------------------------------- 398 */ 399 .align 7 400el0_sync_a64: 401 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 402 mrs x2, esr_el1 403 mrs x3, sp_el0 404 lsr x2, x2, #ESR_EC_SHIFT 405 cmp x2, #ESR_EC_AARCH64_SVC 406 b.eq el0_svc 407 b el0_sync_abort 408 check_vector_size el0_sync_a64 409 410 .align 7 411el0_irq_a64: 412 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 413 b elx_irq 414 check_vector_size el0_irq_a64 415 416 .align 7 417el0_fiq_a64: 418 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 419 b elx_irq 420 check_vector_size el0_fiq_a64 421 422 .align 7 423SErrorA64: 424 b SErrorA64 425 check_vector_size SErrorA64 426 427 /* ----------------------------------------------------- 428 * Lower EL using AArch32 : 0x0 - 0x180 429 * ----------------------------------------------------- 430 */ 431 .align 7 432el0_sync_a32: 433 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 434 mrs x2, esr_el1 435 mrs x3, sp_el0 436 lsr x2, x2, #ESR_EC_SHIFT 437 cmp x2, #ESR_EC_AARCH32_SVC 438 b.eq el0_svc 439 b el0_sync_abort 440 check_vector_size el0_sync_a32 441 442 .align 7 443el0_irq_a32: 444 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 445 b elx_irq 446 check_vector_size el0_irq_a32 447 448 .align 7 449el0_fiq_a32: 450 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 451 b elx_fiq 452 check_vector_size el0_fiq_a32 453 454 .align 7 455SErrorA32: 456 b SErrorA32 457 check_vector_size SErrorA32 458 459END_FUNC thread_vect_table 460 461LOCAL_FUNC el0_svc , : 462 /* get pointer to current thread context in x0 */ 463 get_thread_ctx sp, 0, 1, 2 464 /* load saved kernel sp */ 465 ldr x0, [x0, #THREAD_CTX_KERN_SP_OFFSET] 466 /* Keep pointer to initial recod in x1 */ 467 mov x1, sp 468 /* Switch to SP_EL0 and restore kernel sp */ 469 msr spsel, #0 470 mov x2, sp /* Save SP_EL0 */ 471 mov sp, x0 472 473 /* Make room for struct thread_svc_regs */ 474 sub sp, sp, #THREAD_SVC_REG_SIZE 475 stp x30,x2, [sp, #THREAD_SVC_REG_X30_OFFS] 476 477 /* Restore x0-x3 */ 478 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X_OFFSET(2)] 479 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X_OFFSET(0)] 480 481 /* Prepare the argument for the handler */ 482 store_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14 483 mrs x0, elr_el1 484 mrs x1, spsr_el1 485 store_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1 486 mov x0, sp 487 488 /* 489 * Unmask FIQ, Serror, and debug exceptions since we have nothing 490 * left in sp_el1. Note that the SVC handler is excepted to 491 * re-enable IRQs by itself. 492 */ 493 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 494 495 /* Call the registered handler */ 496 adr x16, thread_svc_handler_ptr 497 ldr x16, [x16] 498 blr x16 499 500 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 501 msr daifset, #DAIFBIT_ALL 502 503 /* 504 * Save kernel sp we'll had at the beginning of this function. 505 * This is when this TA has called another TA because 506 * __thread_enter_user_mode() also saves the stack pointer in this 507 * field. 508 */ 509 msr spsel, #1 510 get_thread_ctx sp, 0, 1, 2 511 msr spsel, #0 512 add x1, sp, #THREAD_SVC_REG_SIZE 513 str x1, [x0, #THREAD_CTX_KERN_SP_OFFSET] 514 515 /* Restore registers to the required state and return*/ 516 load_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1 517 msr elr_el1, x0 518 msr spsr_el1, x1 519 load_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14 520 mov x30, sp 521 ldr x0, [x30, #THREAD_SVC_REG_SP_EL0_OFFS] 522 mov sp, x0 523 ldr x0, [x30, THREAD_SVC_REG_X_OFFS(0)] 524 ldr x30, [x30, #THREAD_SVC_REG_X30_OFFS] 525 526 eret 527END_FUNC el0_svc 528 529LOCAL_FUNC el1_sync_abort , : 530 mov x0, sp 531 msr spsel, #0 532 533 /* Update core local flags */ 534 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 535 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 536 orr w1, w1, #THREAD_CLF_ABORT 537 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 538 539 /* 540 * Check if we should initialize SP_EL0 or use it as is (recursive 541 * aborts). 542 */ 543 tst w1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 544 mov x3, sp /* Save original sp unconditionally */ 545 beq .keep_sp 546 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET] 547 mov sp, x2 548.keep_sp: 549 550 /* 551 * Save state on stack 552 */ 553 sub sp, sp, #THREAD_ABT_REGS_SIZE 554 mrs x2, spsr_el1 555 /* Store spsr, sp_el0 */ 556 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS] 557 /* Store original x0, x1 */ 558 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)] 559 stp x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)] 560 /* Store original x2, x3 and x4 to x29 */ 561 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)] 562 store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29 563 /* Store x30, elr_el1 */ 564 mrs x0, elr_el1 565 stp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 566 567 /* 568 * Call handler 569 */ 570 mov x0, #0 571 mov x1, sp 572 bl thread_handle_abort 573 574 /* 575 * Restore state from stack 576 */ 577 /* Load x30, elr_el1 */ 578 ldp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 579 msr elr_el1, x0 580 /* Load x0 to x29 */ 581 load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29 582 /* Switch to SP_EL1 */ 583 msr spsel, #1 584 /* Save x0 to x3 in CORE_LOCAL */ 585 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 586 /* Restore spsr_el1 and sp_el0 */ 587 mrs x3, sp_el0 588 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS] 589 msr spsr_el1, x0 590 msr sp_el0, x1 591 592 /* Update core local flags */ 593 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 594 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 595 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 596 597 /* Restore x0 to x3 */ 598 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 599 600 /* Return from exception */ 601 eret 602END_FUNC el1_sync_abort 603 604 /* sp_el0 in x3 */ 605LOCAL_FUNC el0_sync_abort , : 606 /* load abt_stack_va_end */ 607 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET] 608 /* Keep pointer to initial record in x0 */ 609 mov x0, sp 610 /* Switch to SP_EL0 */ 611 msr spsel, #0 612 mov sp, x1 613 sub sp, sp, #THREAD_ABT_REGS_SIZE 614 mrs x2, spsr_el1 615 /* Store spsr, sp_el0 */ 616 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS] 617 /* Store original x0, x1 */ 618 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)] 619 stp x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)] 620 /* Store original x2, x3 and x4 to x29 */ 621 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)] 622 store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29 623 /* Store x30, elr_el1 */ 624 mrs x0, elr_el1 625 stp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 626 /* Call handler */ 627 mov x0, #0 628 mov x1, sp 629 bl thread_handle_abort 630 /* Load x30, elr_el1 */ 631 ldp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 632 msr elr_el1, x0 633 /* Load x0 to x29 */ 634 load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29 635 /* Switch to SP_EL1 */ 636 msr spsel, #1 637 /* Save x0 to x3 in EL1_REC */ 638 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 639 /* Restore spsr_el1 and sp_el0 */ 640 mrs x3, sp_el0 641 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS] 642 msr spsr_el1, x0 643 msr sp_el0, x1 644 /* Restore x0 to x3 */ 645 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 646 /* Return from exception */ 647 eret 648END_FUNC el0_sync_abort 649 650/* 651 * struct elx_itr_rec { 652 * uint64_t x[19 - 4]; x4..x18 653 * uint64_t init_rec; 654 * uint64_t pad; 655 * uint64_t lr; 656 * uint64_t sp_el0; 657 * }; 658 */ 659#define ELX_ITR_REC_X_OFFSET(x) (8 * ((x) - 4)) 660#define ELX_ITR_REC_INIT_REC_OFFSET (8 + ELX_ITR_REC_X_OFFSET(19)) 661#define ELX_ITR_REC_PAD_OFFSET (8 + ELX_ITR_REC_INIT_REC_OFFSET) 662#define ELX_ITR_REC_LR_OFFSET (8 + ELX_ITR_REC_PAD_OFFSET) 663#define ELX_ITR_REC_SP_EL0_OFFSET (8 + ELX_ITR_REC_LR_OFFSET) 664#define ELX_ITR_REC_SIZE (8 + ELX_ITR_REC_SP_EL0_OFFSET) 665 666LOCAL_FUNC elx_irq , : 667 /* load tmp_stack_va_end */ 668 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET] 669 /* Keep pointer to initial record in x0 */ 670 mov x0, sp 671 /* Keep original SP_EL0 */ 672 mrs x2, sp_el0 673 /* Switch to SP_EL0 */ 674 msr spsel, #0 675 mov sp, x1 676 677 /* 678 * Save registers on stack that can be corrupted by a call to 679 * thread_get_ctx_regs(). 680 */ 681 /* Make room for struct elx_itr_rec */ 682 sub sp, sp, #ELX_ITR_REC_SIZE 683 /* Store x4..x18 */ 684 store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 685 /* Store pointer to initial record */ 686 str x0, [sp, #ELX_ITR_REC_INIT_REC_OFFSET] 687 /* Store lr and original sp_el0 */ 688 stp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 689 690 /* 691 * Get pointer to struct thread_ctx_regs and store context 692 */ 693 bl thread_get_ctx_regs 694 /* Restore lr and original sp_el0 */ 695 ldp x30, x1, [sp, #ELX_ITR_REC_LR_OFFSET] 696 /* Store original sp_el0 */ 697 str x1, [x0, #THREAD_CTX_REGS_SP_OFFSET] 698 /* Restore x4..x18 */ 699 load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 700 /* store x4..x30 */ 701 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(4), 4, 30 702 /* get pointer to initial record */ 703 ldr x4, [sp, #ELX_ITR_REC_INIT_REC_OFFSET] 704 /* Load original x0..x3 into x10..x13 */ 705 load_xregs x4, THREAD_CORE_LOCAL_X_OFFSET(0), 10, 13 706 /* Save original x0..x3 */ 707 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(0), 10, 13 708 709 /* Remove struct elx_itr_rec from stack */ 710 add sp, sp, #ELX_ITR_REC_SIZE 711 712 /* 713 * Mark current thread as suspended 714 */ 715 mov w0, #THREAD_FLAGS_EXIT_ON_IRQ 716 mrs x1, spsr_el1 717 mrs x2, elr_el1 718 bl thread_state_suspend 719 mov w4, w0 /* Supply thread index */ 720 721 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 722 ldr w1, =TEESMC_RETURN_RPC_IRQ 723 mov w2, #0 724 mov w3, #0 725 /* w4 is already filled in above */ 726 smc #0 727 b . /* SMC should not return */ 728END_FUNC elx_irq 729 730LOCAL_FUNC elx_fiq , : 731 /* load tmp_stack_va_end */ 732 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET] 733 /* Keep pointer to initial record in x0 */ 734 mov x0, sp 735 /* Keep original SP_EL0 */ 736 mrs x2, sp_el0 737 /* Switch to SP_EL0 */ 738 msr spsel, #0 739 mov sp, x1 740 741 /* 742 * Save registers on stack that can be corrupted by a call to 743 * a C function 744 */ 745 /* Make room for struct elx_itr_rec */ 746 sub sp, sp, #ELX_ITR_REC_SIZE 747 /* Store x4..x18 */ 748 store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 749 /* Store lr and original sp_el0 */ 750 stp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 751 752 bl thread_check_canaries 753 adr x16, thread_fiq_handler_ptr 754 ldr x16, [x16] 755 blr x16 756 757 /* 758 * Restore registers 759 */ 760 /* Restore x4..x18 */ 761 load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 762 /* Load lr and original sp_el0 */ 763 ldp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 764 /* Restore sp_el0 */ 765 mov sp, x2 766 /* Switch back to sp_el1 */ 767 msr spsel, #1 768 /* Restore x0..x3 */ 769 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 770 771 /* Return from exception */ 772 eret 773END_FUNC elx_fiq 774