1/* 2 * Copyright (c) 2015, Linaro Limited 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <asm.S> 29#include <arm64_macros.S> 30#include <arm64.h> 31#include <sm/teesmc.h> 32#include <sm/teesmc_opteed_macros.h> 33#include <sm/teesmc_opteed.h> 34#include <kernel/thread_defs.h> 35#include <kernel/thread.h> 36#include "thread_private.h" 37 38 .macro get_thread_ctx core_local, res, tmp0, tmp1 39 ldr \tmp0, [\core_local, \ 40 #THREAD_CORE_LOCAL_CURR_THREAD_OFFSET] 41 adr \res, threads 42 mov \tmp1, #THREAD_CTX_SIZE 43 madd \res, \tmp0, \tmp1, \res 44 .endm 45 46 .section .text.thread_asm 47LOCAL_FUNC vector_std_smc_entry , : 48 sub sp, sp, #THREAD_SMC_ARGS_SIZE 49 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 50 mov x0, sp 51 bl thread_handle_std_smc 52 /* 53 * Normally thread_handle_std_smc() should return via 54 * thread_exit(), thread_rpc(), but if thread_handle_std_smc() 55 * hasn't switched stack (error detected) it will do a normal "C" 56 * return. 57 */ 58 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8 59 add sp, sp, #THREAD_SMC_ARGS_SIZE 60 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 61 smc #0 62 b . /* SMC should not return */ 63END_FUNC vector_std_smc_entry 64 65LOCAL_FUNC vector_fast_smc_entry , : 66 sub sp, sp, #THREAD_SMC_ARGS_SIZE 67 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 68 mov x0, sp 69 bl thread_handle_fast_smc 70 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8 71 add sp, sp, #THREAD_SMC_ARGS_SIZE 72 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 73 smc #0 74 b . /* SMC should not return */ 75END_FUNC vector_fast_smc_entry 76 77LOCAL_FUNC vector_fiq_entry , : 78 /* Secure Monitor received a FIQ and passed control to us. */ 79 bl thread_check_canaries 80 adr x16, thread_fiq_handler_ptr 81 ldr x16, [x16] 82 blr x16 83 ldr x0, =TEESMC_OPTEED_RETURN_FIQ_DONE 84 smc #0 85 b . /* SMC should not return */ 86END_FUNC vector_fiq_entry 87 88LOCAL_FUNC vector_cpu_on_entry , : 89 adr x16, thread_cpu_on_handler_ptr 90 ldr x16, [x16] 91 blr x16 92 mov x1, x0 93 ldr x0, =TEESMC_OPTEED_RETURN_ON_DONE 94 smc #0 95 b . /* SMC should not return */ 96END_FUNC vector_cpu_on_entry 97 98LOCAL_FUNC vector_cpu_off_entry , : 99 adr x16, thread_cpu_off_handler_ptr 100 ldr x16, [x16] 101 blr x16 102 mov x1, x0 103 ldr x0, =TEESMC_OPTEED_RETURN_OFF_DONE 104 smc #0 105 b . /* SMC should not return */ 106END_FUNC vector_cpu_off_entry 107 108LOCAL_FUNC vector_cpu_suspend_entry , : 109 adr x16, thread_cpu_suspend_handler_ptr 110 ldr x16, [x16] 111 blr x16 112 mov x1, x0 113 ldr x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE 114 smc #0 115 b . /* SMC should not return */ 116END_FUNC vector_cpu_suspend_entry 117 118LOCAL_FUNC vector_cpu_resume_entry , : 119 adr x16, thread_cpu_resume_handler_ptr 120 ldr x16, [x16] 121 blr x16 122 mov x1, x0 123 ldr x0, =TEESMC_OPTEED_RETURN_RESUME_DONE 124 smc #0 125 b . /* SMC should not return */ 126END_FUNC vector_cpu_resume_entry 127 128LOCAL_FUNC vector_system_off_entry , : 129 adr x16, thread_system_off_handler_ptr 130 ldr x16, [x16] 131 blr x16 132 mov x1, x0 133 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE 134 smc #0 135 b . /* SMC should not return */ 136END_FUNC vector_system_off_entry 137 138LOCAL_FUNC vector_system_reset_entry , : 139 adr x16, thread_system_reset_handler_ptr 140 ldr x16, [x16] 141 blr x16 142 mov x1, x0 143 ldr x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE 144 smc #0 145 b . /* SMC should not return */ 146END_FUNC vector_system_reset_entry 147 148/* 149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at 150 * initialization. 151 * 152 * Note that ARM-TF depends on the layout of this vector table, any change 153 * in layout has to be synced with ARM-TF. 154 */ 155FUNC thread_vector_table , : 156 b vector_std_smc_entry 157 b vector_fast_smc_entry 158 b vector_cpu_on_entry 159 b vector_cpu_off_entry 160 b vector_cpu_resume_entry 161 b vector_cpu_suspend_entry 162 b vector_fiq_entry 163 b vector_system_off_entry 164 b vector_system_reset_entry 165END_FUNC thread_vector_table 166 167 168/* void thread_resume(struct thread_ctx_regs *regs) */ 169FUNC thread_resume , : 170 load_xregs x0, THREAD_CTX_REGS_SP_OFFSET, 1, 3 171 mov sp, x1 172 msr elr_el1, x2 173 msr spsr_el1, x3 174 load_xregs x0, THREAD_CTX_REGS_X_OFFSET(1), 1, 30 175 ldr x0, [x0, THREAD_CTX_REGS_X_OFFSET(0)] 176 eret 177END_FUNC thread_resume 178 179FUNC thread_std_smc_entry , : 180 /* pass x0-x7 in a struct thread_smc_args */ 181 sub sp, sp, #THREAD_SMC_ARGS_SIZE 182 store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7 183 mov x0, sp 184 185 /* Call the registered handler */ 186 bl __thread_std_smc_entry 187 188 /* 189 * Load the returned x0-x3 into preserved registers and skip the 190 * "returned" x4-x7 since they will not be returned to normal 191 * world. 192 */ 193 load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 20, 23 194 add sp, sp, #THREAD_SMC_ARGS_SIZE 195 196 /* Disable interrupts before switching to temporary stack */ 197 msr daifset, #(DAIFBIT_FIQ | DAIFBIT_IRQ) 198 bl thread_get_tmp_sp 199 mov sp, x0 200 201 bl thread_state_free 202 203 ldr x0, =TEESMC_OPTEED_RETURN_CALL_DONE 204 mov x1, x20 205 mov x2, x21 206 mov x3, x22 207 mov x4, x23 208 smc #0 209 b . /* SMC should not return */ 210END_FUNC thread_std_smc_entry 211 212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 213FUNC thread_rpc , : 214 /* Read daif and create an SPSR */ 215 mrs x1, daif 216 orr x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT) 217 218 msr daifset, #DAIFBIT_ALL 219 push x0, xzr 220 push x1, x30 221 bl thread_get_ctx_regs 222 ldr x30, [sp, #8] 223 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(19), 19, 30 224 mov x19, x0 225 226 bl thread_get_tmp_sp 227 pop x1, xzr /* Match "push x1, x30" above */ 228 mov x2, sp 229 str x2, [x19, #THREAD_CTX_REGS_SP_OFFSET] 230 ldr x20, [sp] /* Get pointer to rv[] */ 231 mov sp, x0 /* Switch to tmp stack */ 232 233 adr x2, .thread_rpc_return 234 mov w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN 235 bl thread_state_suspend 236 mov x4, x0 /* Supply thread index */ 237 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 238 load_wregs x20, 0, 1, 3 /* Load rv[] into x0-x2 */ 239 smc #0 240 b . /* SMC should not return */ 241 242.thread_rpc_return: 243 /* 244 * At this point has the stack pointer been restored to the value 245 * stored in THREAD_CTX above. 246 * 247 * Jumps here from thread_resume above when RPC has returned. The 248 * IRQ and FIQ bits are restored to what they where when this 249 * function was originally entered. 250 */ 251 pop x4, xzr /* Get pointer to rv[] */ 252 store_wregs x4, 0, 0, 2 /* Store x0-x2 into rv[] */ 253 ret 254END_FUNC thread_rpc 255 256FUNC thread_init_vbar , : 257 adr x0, thread_vect_table 258 msr vbar_el1, x0 259 ret 260END_FUNC thread_init_vbar 261 262/* 263 * uint32_t thread_enter_user_mode(uint32_t a0, uint32_t a1, uint32_t a2, 264 * uint32_t a3, vaddr_t user_sp, vaddr_t user_func, 265 * uint32_t *exit_status0, uint32_t *exit_status1); 266 * See description in thread.h 267 */ 268FUNC thread_enter_user_mode , : 269 /* 270 * Create the and fill in the struct thread_user_mode_rec 271 */ 272 sub sp, sp, #THREAD_USER_MODE_REC_SIZE 273 store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET, 6, 7 274 store_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30 275 276 /* 277 * Switch to SP_EL1 278 * Save interrupt bits in x23 279 * Disable exceptions 280 * Save kern sp in x19 281 */ 282 mrs x23, daif 283 msr daifset, #DAIFBIT_ALL 284 mov x19, sp 285 msr spsel, #1 286 287 /* 288 * Save the kernel stack pointer in the thread context 289 */ 290 /* get pointer to current thread context */ 291 get_thread_ctx sp, x21, x20, x22 292 /* save kernel stack pointer */ 293 str x19, [x21, #THREAD_CTX_KERN_SP_OFFSET] 294 295 /* 296 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode 297 */ 298 /* Keep only the AIF bits */ 299 and x23, x23, #(SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT) 300 /* Set Aarch32 */ 301 orr x23, x23, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT) 302 /* Set thumb mode for thumb function */ 303 and x24, x5, #SPSR_32_T_MASK 304 orr x23, x23, x24, lsl #SPSR_32_T_SHIFT 305 msr spsr_el1, x23 306 /* Set user sp */ 307 mov x13, x4 308 msr sp_el0, x4 /* TODO remove, only here to invalidate sp_el0 */ 309 /* Set user function */ 310 msr elr_el1, x5 311 312 /* Jump into user mode */ 313 eret 314END_FUNC thread_enter_user_mode 315 316/* 317 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 318 * uint32_t exit_status1); 319 * See description in thread.h 320 */ 321FUNC thread_unwind_user_mode , : 322 /* Store the exit status */ 323 ldp x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET] 324 str w1, [x3] 325 str w2, [x4] 326 /* Restore x19..x30 */ 327 load_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30 328 add sp, sp, #THREAD_USER_MODE_REC_SIZE 329 /* Return from the call of thread_enter_user_mode() */ 330 ret 331END_FUNC thread_unwind_user_mode 332 333 /* 334 * This macro verifies that the a given vector doesn't exceed the 335 * architectural limit of 32 instructions. This is meant to be placed 336 * immedately after the last instruction in the vector. It takes the 337 * vector entry as the parameter 338 */ 339 .macro check_vector_size since 340 .if (. - \since) > (32 * 4) 341 .error "Vector exceeds 32 instructions" 342 .endif 343 .endm 344 345 346 .align 11 347LOCAL_FUNC thread_vect_table , : 348 /* ----------------------------------------------------- 349 * EL1 with SP0 : 0x0 - 0x180 350 * ----------------------------------------------------- 351 */ 352 .align 7 353sync_el1_sp0: 354 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 355 b el1_sync_abort 356 check_vector_size sync_el1_sp0 357 358 .align 7 359irq_el1_sp0: 360 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 361 b elx_irq 362 check_vector_size irq_el1_sp0 363 364 .align 7 365fiq_el1_sp0: 366 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 367 b elx_fiq 368 check_vector_size fiq_el1_sp0 369 370 .align 7 371SErrorSP0: 372 b SErrorSP0 373 check_vector_size SErrorSP0 374 375 /* ----------------------------------------------------- 376 * Current EL with SPx: 0x200 - 0x380 377 * ----------------------------------------------------- 378 */ 379 .align 7 380SynchronousExceptionSPx: 381 b SynchronousExceptionSPx 382 check_vector_size SynchronousExceptionSPx 383 384 .align 7 385IrqSPx: 386 b IrqSPx 387 check_vector_size IrqSPx 388 389 .align 7 390FiqSPx: 391 b FiqSPx 392 check_vector_size FiqSPx 393 394 .align 7 395SErrorSPx: 396 b SErrorSPx 397 check_vector_size SErrorSPx 398 399 /* ----------------------------------------------------- 400 * Lower EL using AArch64 : 0x400 - 0x580 401 * ----------------------------------------------------- 402 */ 403 .align 7 404el0_sync_a64: 405 b el0_sync_a64 406 check_vector_size el0_sync_a64 407 408 .align 7 409IrqA64: 410 b IrqA64 411 check_vector_size IrqA64 412 413 .align 7 414FiqA64: 415 b FiqA64 416 check_vector_size FiqA64 417 418 .align 7 419SErrorA64: 420 b SErrorA64 421 check_vector_size SErrorA64 422 423 /* ----------------------------------------------------- 424 * Lower EL using AArch32 : 0x0 - 0x180 425 * ----------------------------------------------------- 426 */ 427 .align 7 428el0_sync_a32: 429 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 430 mrs x2, esr_el1 431 mrs x3, sp_el0 432 lsr x2, x2, #ESR_EC_SHIFT 433 cmp x2, #ESR_EC_AARCH32_SVC 434 b.eq el0_sync_a32_svc 435 b el0_sync_abort 436 check_vector_size el0_sync_a32 437 438 .align 7 439el0_irq_a32: 440 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 441 b elx_irq 442 check_vector_size el0_irq_a32 443 444 .align 7 445el0_fiq_a32: 446 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 447 b elx_fiq 448 check_vector_size el0_fiq_a32 449 450 .align 7 451SErrorA32: 452 b SErrorA32 453 check_vector_size SErrorA32 454 455END_FUNC thread_vect_table 456 457LOCAL_FUNC el0_sync_a32_svc , : 458 /* get pointer to current thread context in x0 */ 459 get_thread_ctx sp, x0, x1, x2 460 /* load saved kernel sp */ 461 ldr x0, [x0, #THREAD_CTX_KERN_SP_OFFSET] 462 /* Keep pointer to initial recod in x1 */ 463 mov x1, sp 464 /* Switch to SP_EL0 and restore kernel sp */ 465 msr spsel, #0 466 mov sp, x0 467 /* Restore x0-x3 */ 468 ldp x2, x3, [x1, #THREAD_CORE_LOCAL_X_OFFSET(2)] 469 ldp x0, x1, [x1, #THREAD_CORE_LOCAL_X_OFFSET(0)] 470 471 /* Prepare the argument for the handler */ 472 sub sp, sp, #THREAD_SVC_REG_SIZE 473 store_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14 474 mrs x0, elr_el1 475 mrs x1, spsr_el1 476 store_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1 477 mov x0, sp 478 479 /* 480 * Unmask FIQ, Serror, and debug exceptions since we have nothing 481 * left in sp_el1. 482 */ 483 msr daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG) 484 485 /* Call the registered handler */ 486 adr x16, thread_svc_handler_ptr 487 ldr x16, [x16] 488 blr x16 489 490 /* Mask all maskable exceptions since we're switching back to sp_el1 */ 491 msr daifset, #DAIFBIT_ALL 492 493 /* Save kernel sp we'll have after the add below */ 494 msr spsel, #1 495 get_thread_ctx sp, x0, x1, x2 496 msr spsel, #0 497 add x1, sp, #THREAD_SVC_REG_SIZE 498 str x1, [x0, #THREAD_CTX_KERN_SP_OFFSET] 499 500 /* Restore registers to the required state and return*/ 501 load_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1 502 msr elr_el1, x0 503 msr spsr_el1, x1 504 load_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14 505 add sp, sp, #THREAD_SVC_REG_SIZE 506 507 eret 508END_FUNC el0_sync_a32_svc 509 510LOCAL_FUNC el1_sync_abort , : 511 mov x0, sp 512 msr spsel, #0 513 514 /* Update core local flags */ 515 ldr w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 516 lsl w1, w1, #THREAD_CLF_SAVED_SHIFT 517 orr w1, w1, #THREAD_CLF_ABORT 518 str w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 519 520 /* 521 * Check if we should initialize SP_EL0 or use it as is (recursive 522 * aborts). 523 */ 524 tst w1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT) 525 mov x3, sp /* Save original sp unconditionally */ 526 beq .keep_sp 527 ldr x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET] 528 mov sp, x2 529.keep_sp: 530 531 /* 532 * Save state on stack 533 */ 534 sub sp, sp, #THREAD_ABT_REGS_SIZE 535 mrs x2, spsr_el1 536 /* Store spsr, sp_el0 */ 537 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS] 538 /* Store original x0, x1 */ 539 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)] 540 stp x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)] 541 /* Store original x2, x3 and x4 to x29 */ 542 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)] 543 store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29 544 /* Store x30, elr_el1 */ 545 mrs x0, elr_el1 546 stp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 547 548 /* 549 * Call handler 550 */ 551 mov x0, #0 552 mov x1, sp 553 bl thread_handle_abort 554 555 /* 556 * Restore state from stack 557 */ 558 /* Load x30, elr_el1 */ 559 ldp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 560 msr elr_el1, x0 561 /* Load x0 to x29 */ 562 load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29 563 /* Switch to SP_EL1 */ 564 msr spsel, #1 565 /* Save x0 to x3 in CORE_LOCAL */ 566 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 567 /* Restore spsr_el1 and sp_el0 */ 568 mrs x3, sp_el0 569 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS] 570 msr spsr_el1, x0 571 msr sp_el0, x1 572 573 /* Update core local flags */ 574 ldr w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 575 lsr w0, w0, #THREAD_CLF_SAVED_SHIFT 576 str w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET] 577 578 /* Restore x0 to x3 */ 579 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 580 581 /* Return from exception */ 582 eret 583END_FUNC el1_sync_abort 584 585 /* sp_el0 in x3 */ 586LOCAL_FUNC el0_sync_abort , : 587 /* load abt_stack_va_end */ 588 ldr x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET] 589 /* Keep pointer to initial record in x0 */ 590 mov x0, sp 591 /* Switch to SP_EL0 */ 592 msr spsel, #0 593 mov sp, x1 594 sub sp, sp, #THREAD_ABT_REGS_SIZE 595 mrs x2, spsr_el1 596 /* Store spsr, sp_el0 */ 597 stp x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS] 598 /* Store original x0, x1 */ 599 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)] 600 stp x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)] 601 /* Store original x2, x3 and x4 to x29 */ 602 ldp x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)] 603 store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29 604 /* Store x30, elr_el1 */ 605 mrs x0, elr_el1 606 stp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 607 /* Call handler */ 608 mov x0, #0 609 mov x1, sp 610 bl thread_handle_abort 611 /* Load x30, elr_el1 */ 612 ldp x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)] 613 msr elr_el1, x0 614 /* Load x0 to x29 */ 615 load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29 616 /* Switch to SP_EL1 */ 617 msr spsel, #1 618 /* Save x0 to x3 in EL1_REC */ 619 store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 620 /* Restore spsr_el1 and sp_el0 */ 621 mrs x3, sp_el0 622 ldp x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS] 623 msr spsr_el1, x0 624 msr sp_el0, x1 625 /* Restore x0 to x3 */ 626 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 627 /* Return from exception */ 628 eret 629END_FUNC el0_sync_abort 630 631/* 632 * struct elx_itr_rec { 633 * uint64_t x[19 - 4]; x4..x18 634 * uint64_t init_rec; 635 * uint64_t pad; 636 * uint64_t lr; 637 * uint64_t sp_el0; 638 * }; 639 */ 640#define ELX_ITR_REC_X_OFFSET(x) (8 * ((x) - 4)) 641#define ELX_ITR_REC_INIT_REC_OFFSET (8 + ELX_ITR_REC_X_OFFSET(19)) 642#define ELX_ITR_REC_PAD_OFFSET (8 + ELX_ITR_REC_INIT_REC_OFFSET) 643#define ELX_ITR_REC_LR_OFFSET (8 + ELX_ITR_REC_PAD_OFFSET) 644#define ELX_ITR_REC_SP_EL0_OFFSET (8 + ELX_ITR_REC_LR_OFFSET) 645#define ELX_ITR_REC_SIZE (8 + ELX_ITR_REC_SP_EL0_OFFSET) 646 647LOCAL_FUNC elx_irq , : 648 /* load tmp_stack_va_end */ 649 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET] 650 /* Keep pointer to initial record in x0 */ 651 mov x0, sp 652 /* Keep original SP_EL0 */ 653 mrs x2, sp_el0 654 /* Switch to SP_EL0 */ 655 msr spsel, #0 656 mov sp, x1 657 658 /* 659 * Save registers on stack that can be corrupted by a call to 660 * thread_get_ctx_regs(). 661 */ 662 /* Make room for struct elx_itr_rec */ 663 sub sp, sp, #ELX_ITR_REC_SIZE 664 /* Store x4..x18 */ 665 store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 666 /* Store pointer to initial record */ 667 str x0, [sp, #ELX_ITR_REC_INIT_REC_OFFSET] 668 /* Store lr and original sp_el0 */ 669 stp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 670 671 /* 672 * Get pointer to struct thread_ctx_regs and store context 673 */ 674 bl thread_get_ctx_regs 675 /* Restore lr and original sp_el0 */ 676 ldp x30, x1, [sp, #ELX_ITR_REC_LR_OFFSET] 677 /* Store original sp_el0 */ 678 str x1, [x0, #THREAD_CTX_REGS_SP_OFFSET] 679 /* Restore x4..x18 */ 680 load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 681 /* store x4..x30 */ 682 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(4), 4, 30 683 /* get pointer to initial record */ 684 ldr x4, [sp, #ELX_ITR_REC_INIT_REC_OFFSET] 685 /* Load original x0..x3 into x10..x13 */ 686 load_xregs x4, THREAD_CORE_LOCAL_X_OFFSET(0), 10, 13 687 /* Save original x0..x3 */ 688 store_xregs x0, THREAD_CTX_REGS_X_OFFSET(0), 10, 13 689 690 /* Remove struct elx_itr_rec from stack */ 691 add sp, sp, #ELX_ITR_REC_SIZE 692 693 /* 694 * Mark current thread as suspended 695 */ 696 mov w0, #THREAD_FLAGS_EXIT_ON_IRQ 697 mrs x1, spsr_el1 698 mrs x2, elr_el1 699 bl thread_state_suspend 700 mov w4, w0 /* Supply thread index */ 701 702 ldr w0, =TEESMC_OPTEED_RETURN_CALL_DONE 703 ldr w1, =TEESMC_RETURN_RPC_IRQ 704 mov w2, #0 705 mov w3, #0 706 /* w4 is already filled in above */ 707 smc #0 708 b . /* SMC should not return */ 709END_FUNC elx_irq 710 711LOCAL_FUNC elx_fiq , : 712 /* load tmp_stack_va_end */ 713 ldr x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET] 714 /* Keep pointer to initial record in x0 */ 715 mov x0, sp 716 /* Keep original SP_EL0 */ 717 mrs x2, sp_el0 718 /* Switch to SP_EL0 */ 719 msr spsel, #0 720 mov sp, x1 721 722 /* 723 * Save registers on stack that can be corrupted by a call to 724 * a C function 725 */ 726 /* Make room for struct elx_itr_rec */ 727 sub sp, sp, #ELX_ITR_REC_SIZE 728 /* Store x4..x18 */ 729 store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 730 /* Store lr and original sp_el0 */ 731 stp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 732 733 bl thread_check_canaries 734 adr x16, thread_fiq_handler_ptr 735 ldr x16, [x16] 736 blr x16 737 738 /* 739 * Restore registers 740 */ 741 /* Restore x4..x18 */ 742 load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18 743 /* Load lr and original sp_el0 */ 744 ldp x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET] 745 /* Restore sp_el0 */ 746 mov sp, x2 747 /* Switch back to sp_el1 */ 748 msr spsel, #1 749 /* Restore x0..x3 */ 750 load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3 751 752 /* Return from exception */ 753 eret 754END_FUNC elx_fiq 755