1 /* 2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <inttypes.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 #include <arch_helpers.h> 13 #include <arch_features.h> 14 #include <bl31/ehf.h> 15 #include <bl31/interrupt_mgmt.h> 16 #include <bl31/sync_handle.h> 17 #include <common/bl_common.h> 18 #include <common/debug.h> 19 #include <common/runtime_svc.h> 20 #include <lib/cassert.h> 21 #include <services/sdei.h> 22 23 #include "sdei_private.h" 24 25 /* x0-x17 GPREGS context */ 26 #define SDEI_SAVED_GPREGS 18U 27 28 /* Maximum preemption nesting levels: Critical priority and Normal priority */ 29 #define MAX_EVENT_NESTING 2U 30 31 /* Per-CPU SDEI state access macro */ 32 #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 33 34 /* Structure to store information about an outstanding dispatch */ 35 typedef struct sdei_dispatch_context { 36 sdei_ev_map_t *map; 37 uint64_t x[SDEI_SAVED_GPREGS]; 38 jmp_buf *dispatch_jmp; 39 40 /* Exception state registers */ 41 uint64_t elr_el3; 42 uint64_t spsr_el3; 43 44 #if DYNAMIC_WORKAROUND_CVE_2018_3639 45 /* CVE-2018-3639 mitigation state */ 46 uint64_t disable_cve_2018_3639; 47 #endif 48 } sdei_dispatch_context_t; 49 50 /* Per-CPU SDEI state data */ 51 typedef struct sdei_cpu_state { 52 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 53 unsigned short stack_top; /* Empty ascending */ 54 bool pe_masked; 55 bool pending_enables; 56 } sdei_cpu_state_t; 57 58 /* SDEI states for all cores in the system */ 59 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 60 61 int64_t sdei_pe_mask(void) 62 { 63 int64_t ret = 0; 64 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 65 66 /* 67 * Return value indicates whether this call had any effect in the mask 68 * status of this PE. 69 */ 70 if (!state->pe_masked) { 71 state->pe_masked = true; 72 ret = 1; 73 } 74 75 return ret; 76 } 77 78 void sdei_pe_unmask(void) 79 { 80 unsigned int i; 81 sdei_ev_map_t *map; 82 sdei_entry_t *se; 83 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 84 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 85 86 /* 87 * If there are pending enables, iterate through the private mappings 88 * and enable those bound maps that are in enabled state. Also, iterate 89 * through shared mappings and enable interrupts of events that are 90 * targeted to this PE. 91 */ 92 if (state->pending_enables) { 93 for_each_private_map(i, map) { 94 se = get_event_entry(map); 95 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 96 plat_ic_enable_interrupt(map->intr); 97 } 98 99 for_each_shared_map(i, map) { 100 se = get_event_entry(map); 101 102 sdei_map_lock(map); 103 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 104 (se->reg_flags == SDEI_REGF_RM_PE) && 105 (se->affinity == my_mpidr)) { 106 plat_ic_enable_interrupt(map->intr); 107 } 108 sdei_map_unlock(map); 109 } 110 } 111 112 state->pending_enables = false; 113 state->pe_masked = false; 114 } 115 116 /* Push a dispatch context to the dispatch stack */ 117 static sdei_dispatch_context_t *push_dispatch(void) 118 { 119 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 120 sdei_dispatch_context_t *disp_ctx; 121 122 /* Cannot have more than max events */ 123 assert(state->stack_top < MAX_EVENT_NESTING); 124 125 disp_ctx = &state->dispatch_stack[state->stack_top]; 126 state->stack_top++; 127 128 return disp_ctx; 129 } 130 131 /* Pop a dispatch context to the dispatch stack */ 132 static sdei_dispatch_context_t *pop_dispatch(void) 133 { 134 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 135 136 if (state->stack_top == 0U) 137 return NULL; 138 139 assert(state->stack_top <= MAX_EVENT_NESTING); 140 141 state->stack_top--; 142 143 return &state->dispatch_stack[state->stack_top]; 144 } 145 146 /* Retrieve the context at the top of dispatch stack */ 147 static sdei_dispatch_context_t *get_outstanding_dispatch(void) 148 { 149 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 150 151 if (state->stack_top == 0U) 152 return NULL; 153 154 assert(state->stack_top <= MAX_EVENT_NESTING); 155 156 return &state->dispatch_stack[state->stack_top - 1U]; 157 } 158 159 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 160 void *tgt_ctx) 161 { 162 sdei_dispatch_context_t *disp_ctx; 163 const gp_regs_t *tgt_gpregs; 164 const el3_state_t *tgt_el3; 165 166 assert(tgt_ctx != NULL); 167 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 168 tgt_el3 = get_el3state_ctx(tgt_ctx); 169 170 disp_ctx = push_dispatch(); 171 assert(disp_ctx != NULL); 172 disp_ctx->map = map; 173 174 /* Save general purpose and exception registers */ 175 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 176 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 177 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 178 179 return disp_ctx; 180 } 181 182 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 183 { 184 gp_regs_t *tgt_gpregs; 185 el3_state_t *tgt_el3; 186 187 assert(tgt_ctx != NULL); 188 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 189 tgt_el3 = get_el3state_ctx(tgt_ctx); 190 191 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 192 foo); 193 194 /* Restore general purpose and exception registers */ 195 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 196 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 197 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 198 199 #if DYNAMIC_WORKAROUND_CVE_2018_3639 200 cve_2018_3639_t *tgt_cve_2018_3639; 201 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 202 203 /* Restore CVE-2018-3639 mitigation state */ 204 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 205 disp_ctx->disable_cve_2018_3639); 206 #endif 207 } 208 209 static void save_secure_context(void) 210 { 211 cm_el1_sysregs_context_save(SECURE); 212 } 213 214 /* Restore Secure context and arrange to resume it at the next ERET */ 215 static void restore_and_resume_secure_context(void) 216 { 217 cm_el1_sysregs_context_restore(SECURE); 218 cm_set_next_eret_context(SECURE); 219 } 220 221 /* 222 * Restore Non-secure context and arrange to resume it at the next ERET. Return 223 * pointer to the Non-secure context. 224 */ 225 static cpu_context_t *restore_and_resume_ns_context(void) 226 { 227 cpu_context_t *ns_ctx; 228 229 cm_el1_sysregs_context_restore(NON_SECURE); 230 cm_set_next_eret_context(NON_SECURE); 231 232 ns_ctx = cm_get_context(NON_SECURE); 233 assert(ns_ctx != NULL); 234 235 return ns_ctx; 236 } 237 238 /* 239 * Prepare for ERET: 240 * - Set the ELR to the registered handler address 241 * - Set the SPSR register by calling the common create_spsr() function 242 */ 243 244 static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx) 245 { 246 unsigned int client_el = sdei_client_el(); 247 u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX, 248 DISABLE_ALL_EXCEPTIONS); 249 250 u_register_t interrupted_pstate = disp_ctx->spsr_el3; 251 252 sdei_spsr = create_spsr(interrupted_pstate, client_el); 253 254 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr); 255 } 256 257 /* 258 * Populate the Non-secure context so that the next ERET will dispatch to the 259 * SDEI client. 260 */ 261 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 262 cpu_context_t *ctx, jmp_buf *dispatch_jmp) 263 { 264 sdei_dispatch_context_t *disp_ctx; 265 266 /* Push the event and context */ 267 disp_ctx = save_event_ctx(map, ctx); 268 269 /* 270 * Setup handler arguments: 271 * 272 * - x0: Event number 273 * - x1: Handler argument supplied at the time of event registration 274 * - x2: Interrupted PC 275 * - x3: Interrupted SPSR 276 */ 277 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 278 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 279 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 280 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 281 282 /* Setup the elr and spsr register to prepare for ERET */ 283 sdei_set_elr_spsr(se, disp_ctx); 284 285 #if DYNAMIC_WORKAROUND_CVE_2018_3639 286 cve_2018_3639_t *tgt_cve_2018_3639; 287 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 288 289 /* Save CVE-2018-3639 mitigation state */ 290 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 291 CTX_CVE_2018_3639_DISABLE); 292 293 /* Force SDEI handler to execute with mitigation enabled by default */ 294 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 295 #endif 296 297 disp_ctx->dispatch_jmp = dispatch_jmp; 298 } 299 300 /* Handle a triggered SDEI interrupt while events were masked on this PE */ 301 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 302 sdei_cpu_state_t *state, unsigned int intr_raw) 303 { 304 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 305 bool disable = false; 306 307 /* Nothing to do for event 0 */ 308 if (map->ev_num == SDEI_EVENT_0) 309 return; 310 311 /* 312 * For a private event, or for a shared event specifically routed to 313 * this CPU, we disable interrupt, leave the interrupt pending, and do 314 * EOI. 315 */ 316 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 317 disable = true; 318 319 if (se->reg_flags == SDEI_REGF_RM_PE) 320 assert(se->affinity == my_mpidr); 321 322 if (disable) { 323 plat_ic_disable_interrupt(map->intr); 324 plat_ic_set_interrupt_pending(map->intr); 325 plat_ic_end_of_interrupt(intr_raw); 326 state->pending_enables = true; 327 328 return; 329 } 330 331 /* 332 * We just received a shared event with routing set to ANY PE. The 333 * interrupt can't be delegated on this PE as SDEI events are masked. 334 * However, because its routing mode is ANY, it is possible that the 335 * event can be delegated on any other PE that hasn't masked events. 336 * Therefore, we set the interrupt back pending so as to give other 337 * suitable PEs a chance of handling it. 338 */ 339 assert(plat_ic_is_spi(map->intr) != 0); 340 plat_ic_set_interrupt_pending(map->intr); 341 342 /* 343 * Leaving the same interrupt pending also means that the same interrupt 344 * can target this PE again as soon as this PE leaves EL3. Whether and 345 * how often that happens depends on the implementation of GIC. 346 * 347 * We therefore call a platform handler to resolve this situation. 348 */ 349 plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 350 351 /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 352 plat_ic_end_of_interrupt(intr_raw); 353 } 354 355 /* SDEI main interrupt handler */ 356 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 357 void *cookie) 358 { 359 sdei_entry_t *se; 360 cpu_context_t *ctx; 361 sdei_ev_map_t *map; 362 const sdei_dispatch_context_t *disp_ctx; 363 unsigned int sec_state; 364 sdei_cpu_state_t *state; 365 uint32_t intr; 366 jmp_buf dispatch_jmp; 367 const uint64_t mpidr = read_mpidr_el1(); 368 369 /* 370 * To handle an event, the following conditions must be true: 371 * 372 * 1. Event must be signalled 373 * 2. Event must be enabled 374 * 3. This PE must be a target PE for the event 375 * 4. PE must be unmasked for SDEI 376 * 5. If this is a normal event, no event must be running 377 * 6. If this is a critical event, no critical event must be running 378 * 379 * (1) and (2) are true when this function is running 380 * (3) is enforced in GIC by selecting the appropriate routing option 381 * (4) is satisfied by client calling PE_UNMASK 382 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 383 * - Normal SDEI events belong to Normal SDE priority class 384 * - Critical SDEI events belong to Critical CSDE priority class 385 * 386 * The interrupt has already been acknowledged, and therefore is active, 387 * so no other PE can handle this event while we are at it. 388 * 389 * Find if this is an SDEI interrupt. There must be an event mapped to 390 * this interrupt 391 */ 392 intr = plat_ic_get_interrupt_id(intr_raw); 393 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 394 if (map == NULL) { 395 ERROR("No SDEI map for interrupt %u\n", intr); 396 panic(); 397 } 398 399 /* 400 * Received interrupt number must either correspond to event 0, or must 401 * be bound interrupt. 402 */ 403 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 404 405 se = get_event_entry(map); 406 state = sdei_get_this_pe_state(); 407 408 if (state->pe_masked) { 409 /* 410 * Interrupts received while this PE was masked can't be 411 * dispatched. 412 */ 413 SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n", 414 map->intr, mpidr); 415 if (is_event_shared(map)) 416 sdei_map_lock(map); 417 418 handle_masked_trigger(map, se, state, intr_raw); 419 420 if (is_event_shared(map)) 421 sdei_map_unlock(map); 422 423 return 0; 424 } 425 426 /* Insert load barrier for signalled SDEI event */ 427 if (map->ev_num == SDEI_EVENT_0) 428 dmbld(); 429 430 if (is_event_shared(map)) 431 sdei_map_lock(map); 432 433 /* Assert shared event routed to this PE had been configured so */ 434 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 435 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 436 } 437 438 if (!can_sdei_state_trans(se, DO_DISPATCH)) { 439 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 440 map->ev_num, se->state); 441 442 /* 443 * If the event is registered, leave the interrupt pending so 444 * that it's delivered when the event is enabled. 445 */ 446 if (GET_EV_STATE(se, REGISTERED)) 447 plat_ic_set_interrupt_pending(map->intr); 448 449 /* 450 * The interrupt was disabled or unregistered after the handler 451 * started to execute, which means now the interrupt is already 452 * disabled and we just need to EOI the interrupt. 453 */ 454 plat_ic_end_of_interrupt(intr_raw); 455 456 if (is_event_shared(map)) 457 sdei_map_unlock(map); 458 459 return 0; 460 } 461 462 disp_ctx = get_outstanding_dispatch(); 463 if (is_event_critical(map)) { 464 /* 465 * If this event is Critical, and if there's an outstanding 466 * dispatch, assert the latter is a Normal dispatch. Critical 467 * events can preempt an outstanding Normal event dispatch. 468 */ 469 if (disp_ctx != NULL) 470 assert(is_event_normal(disp_ctx->map)); 471 } else { 472 /* 473 * If this event is Normal, assert that there are no outstanding 474 * dispatches. Normal events can't preempt any outstanding event 475 * dispatches. 476 */ 477 assert(disp_ctx == NULL); 478 } 479 480 sec_state = get_interrupt_src_ss(flags); 481 482 if (is_event_shared(map)) 483 sdei_map_unlock(map); 484 485 SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n", 486 mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3()); 487 488 ctx = handle; 489 490 /* 491 * Check if we interrupted secure state. Perform a context switch so 492 * that we can delegate to NS. 493 */ 494 if (sec_state == SECURE) { 495 save_secure_context(); 496 ctx = restore_and_resume_ns_context(); 497 } 498 499 /* Synchronously dispatch event */ 500 setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 501 begin_sdei_synchronous_dispatch(&dispatch_jmp); 502 503 /* 504 * We reach here when client completes the event. 505 * 506 * If the cause of dispatch originally interrupted the Secure world, 507 * resume Secure. 508 * 509 * No need to save the Non-secure context ahead of a world switch: the 510 * Non-secure context was fully saved before dispatch, and has been 511 * returned to its pre-dispatch state. 512 */ 513 if (sec_state == SECURE) 514 restore_and_resume_secure_context(); 515 516 /* 517 * The event was dispatched after receiving SDEI interrupt. With 518 * the event handling completed, EOI the corresponding 519 * interrupt. 520 */ 521 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 522 ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num); 523 panic(); 524 } 525 plat_ic_end_of_interrupt(intr_raw); 526 527 return 0; 528 } 529 530 /* 531 * Explicitly dispatch the given SDEI event. 532 * 533 * When calling this API, the caller must be prepared for the SDEI dispatcher to 534 * restore and make Non-secure context as active. This call returns only after 535 * the client has completed the dispatch. Then, the Non-secure context will be 536 * active, and the following ERET will return to Non-secure. 537 * 538 * Should the caller require re-entry to Secure, it must restore the Secure 539 * context and program registers for ERET. 540 */ 541 int sdei_dispatch_event(int ev_num) 542 { 543 sdei_entry_t *se; 544 sdei_ev_map_t *map; 545 cpu_context_t *ns_ctx; 546 sdei_dispatch_context_t *disp_ctx; 547 sdei_cpu_state_t *state; 548 jmp_buf dispatch_jmp; 549 550 /* Can't dispatch if events are masked on this PE */ 551 state = sdei_get_this_pe_state(); 552 if (state->pe_masked) 553 return -1; 554 555 /* Event 0 can't be dispatched */ 556 if (ev_num == SDEI_EVENT_0) 557 return -1; 558 559 /* Locate mapping corresponding to this event */ 560 map = find_event_map(ev_num); 561 if (map == NULL) 562 return -1; 563 564 /* Only explicit events can be dispatched */ 565 if (!is_map_explicit(map)) 566 return -1; 567 568 /* Examine state of dispatch stack */ 569 disp_ctx = get_outstanding_dispatch(); 570 if (disp_ctx != NULL) { 571 /* 572 * There's an outstanding dispatch. If the outstanding dispatch 573 * is critical, no more dispatches are possible. 574 */ 575 if (is_event_critical(disp_ctx->map)) 576 return -1; 577 578 /* 579 * If the outstanding dispatch is Normal, only critical events 580 * can be dispatched. 581 */ 582 if (is_event_normal(map)) 583 return -1; 584 } 585 586 se = get_event_entry(map); 587 if (!can_sdei_state_trans(se, DO_DISPATCH)) 588 return -1; 589 590 /* 591 * Prepare for NS dispatch by restoring the Non-secure context and 592 * marking that as active. 593 */ 594 ns_ctx = restore_and_resume_ns_context(); 595 596 /* Activate the priority corresponding to the event being dispatched */ 597 ehf_activate_priority(sdei_event_priority(map)); 598 599 /* Dispatch event synchronously */ 600 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 601 begin_sdei_synchronous_dispatch(&dispatch_jmp); 602 603 /* 604 * We reach here when client completes the event. 605 * 606 * Deactivate the priority level that was activated at the time of 607 * explicit dispatch. 608 */ 609 ehf_deactivate_priority(sdei_event_priority(map)); 610 611 return 0; 612 } 613 614 static void end_sdei_synchronous_dispatch(jmp_buf *buffer) 615 { 616 longjmp(*buffer, 1); 617 } 618 619 int sdei_event_complete(bool resume, uint64_t pc) 620 { 621 sdei_dispatch_context_t *disp_ctx; 622 sdei_entry_t *se; 623 sdei_ev_map_t *map; 624 cpu_context_t *ctx; 625 sdei_action_t act; 626 unsigned int client_el = sdei_client_el(); 627 628 /* Return error if called without an active event */ 629 disp_ctx = get_outstanding_dispatch(); 630 if (disp_ctx == NULL) 631 return SDEI_EDENY; 632 633 /* Validate resumption point */ 634 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 635 return SDEI_EDENY; 636 637 map = disp_ctx->map; 638 assert(map != NULL); 639 se = get_event_entry(map); 640 641 if (is_event_shared(map)) 642 sdei_map_lock(map); 643 644 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 645 if (!can_sdei_state_trans(se, act)) { 646 if (is_event_shared(map)) 647 sdei_map_unlock(map); 648 return SDEI_EDENY; 649 } 650 651 if (is_event_shared(map)) 652 sdei_map_unlock(map); 653 654 /* Having done sanity checks, pop dispatch */ 655 (void) pop_dispatch(); 656 657 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 658 map->ev_num, read_spsr_el3(), read_elr_el3()); 659 660 /* 661 * Restore Non-secure to how it was originally interrupted. Once done, 662 * it's up-to-date with the saved copy. 663 */ 664 ctx = cm_get_context(NON_SECURE); 665 restore_event_ctx(disp_ctx, ctx); 666 667 if (resume) { 668 /* 669 * Complete-and-resume call. Prepare the Non-secure context 670 * (currently active) for complete and resume. 671 */ 672 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 673 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 674 675 /* 676 * Make it look as if a synchronous exception were taken at the 677 * supplied Non-secure resumption point. Populate SPSR and 678 * ELR_ELx so that an ERET from there works as expected. 679 * 680 * The assumption is that the client, if necessary, would have 681 * saved any live content in these registers before making this 682 * call. 683 */ 684 if (client_el == MODE_EL2) { 685 write_elr_el2(disp_ctx->elr_el3); 686 write_spsr_el2(disp_ctx->spsr_el3); 687 } else { 688 /* EL1 */ 689 write_elr_el1(disp_ctx->elr_el3); 690 write_spsr_el1(disp_ctx->spsr_el3); 691 } 692 } 693 694 /* End the outstanding dispatch */ 695 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 696 697 return 0; 698 } 699 700 int64_t sdei_event_context(void *handle, unsigned int param) 701 { 702 sdei_dispatch_context_t *disp_ctx; 703 704 if (param >= SDEI_SAVED_GPREGS) 705 return SDEI_EINVAL; 706 707 /* Get outstanding dispatch on this CPU */ 708 disp_ctx = get_outstanding_dispatch(); 709 if (disp_ctx == NULL) 710 return SDEI_EDENY; 711 712 assert(disp_ctx->map != NULL); 713 714 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 715 return SDEI_EDENY; 716 717 /* 718 * No locking is required for the Running status as this is the only CPU 719 * which can complete the event 720 */ 721 722 return (int64_t) disp_ctx->x[param]; 723 } 724