1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bl_common.h> 10 #include <cassert.h> 11 #include <debug.h> 12 #include <ehf.h> 13 #include <interrupt_mgmt.h> 14 #include <runtime_svc.h> 15 #include <sdei.h> 16 #include <string.h> 17 #include "sdei_private.h" 18 19 /* x0-x17 GPREGS context */ 20 #define SDEI_SAVED_GPREGS 18U 21 22 /* Maximum preemption nesting levels: Critical priority and Normal priority */ 23 #define MAX_EVENT_NESTING 2U 24 25 /* Per-CPU SDEI state access macro */ 26 #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 27 28 /* Structure to store information about an outstanding dispatch */ 29 typedef struct sdei_dispatch_context { 30 sdei_ev_map_t *map; 31 uint64_t x[SDEI_SAVED_GPREGS]; 32 struct jmpbuf *dispatch_jmp; 33 34 /* Exception state registers */ 35 uint64_t elr_el3; 36 uint64_t spsr_el3; 37 38 #if DYNAMIC_WORKAROUND_CVE_2018_3639 39 /* CVE-2018-3639 mitigation state */ 40 uint64_t disable_cve_2018_3639; 41 #endif 42 } sdei_dispatch_context_t; 43 44 /* Per-CPU SDEI state data */ 45 typedef struct sdei_cpu_state { 46 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 47 unsigned short stack_top; /* Empty ascending */ 48 bool pe_masked; 49 bool pending_enables; 50 } sdei_cpu_state_t; 51 52 /* SDEI states for all cores in the system */ 53 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 54 55 int64_t sdei_pe_mask(void) 56 { 57 int64_t ret = 0; 58 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 59 60 /* 61 * Return value indicates whether this call had any effect in the mask 62 * status of this PE. 63 */ 64 if (!state->pe_masked) { 65 state->pe_masked = true; 66 ret = 1; 67 } 68 69 return ret; 70 } 71 72 void sdei_pe_unmask(void) 73 { 74 unsigned int i; 75 sdei_ev_map_t *map; 76 sdei_entry_t *se; 77 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 78 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 79 80 /* 81 * If there are pending enables, iterate through the private mappings 82 * and enable those bound maps that are in enabled state. Also, iterate 83 * through shared mappings and enable interrupts of events that are 84 * targeted to this PE. 85 */ 86 if (state->pending_enables) { 87 for_each_private_map(i, map) { 88 se = get_event_entry(map); 89 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 90 plat_ic_enable_interrupt(map->intr); 91 } 92 93 for_each_shared_map(i, map) { 94 se = get_event_entry(map); 95 96 sdei_map_lock(map); 97 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 98 (se->reg_flags == SDEI_REGF_RM_PE) && 99 (se->affinity == my_mpidr)) { 100 plat_ic_enable_interrupt(map->intr); 101 } 102 sdei_map_unlock(map); 103 } 104 } 105 106 state->pending_enables = false; 107 state->pe_masked = false; 108 } 109 110 /* Push a dispatch context to the dispatch stack */ 111 static sdei_dispatch_context_t *push_dispatch(void) 112 { 113 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 114 sdei_dispatch_context_t *disp_ctx; 115 116 /* Cannot have more than max events */ 117 assert(state->stack_top < MAX_EVENT_NESTING); 118 119 disp_ctx = &state->dispatch_stack[state->stack_top]; 120 state->stack_top++; 121 122 return disp_ctx; 123 } 124 125 /* Pop a dispatch context to the dispatch stack */ 126 static sdei_dispatch_context_t *pop_dispatch(void) 127 { 128 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 129 130 if (state->stack_top == 0U) 131 return NULL; 132 133 assert(state->stack_top <= MAX_EVENT_NESTING); 134 135 state->stack_top--; 136 137 return &state->dispatch_stack[state->stack_top]; 138 } 139 140 /* Retrieve the context at the top of dispatch stack */ 141 static sdei_dispatch_context_t *get_outstanding_dispatch(void) 142 { 143 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 144 145 if (state->stack_top == 0U) 146 return NULL; 147 148 assert(state->stack_top <= MAX_EVENT_NESTING); 149 150 return &state->dispatch_stack[state->stack_top - 1U]; 151 } 152 153 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 154 void *tgt_ctx) 155 { 156 sdei_dispatch_context_t *disp_ctx; 157 const gp_regs_t *tgt_gpregs; 158 const el3_state_t *tgt_el3; 159 160 assert(tgt_ctx != NULL); 161 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 162 tgt_el3 = get_el3state_ctx(tgt_ctx); 163 164 disp_ctx = push_dispatch(); 165 assert(disp_ctx != NULL); 166 disp_ctx->map = map; 167 168 /* Save general purpose and exception registers */ 169 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 170 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 171 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 172 173 return disp_ctx; 174 } 175 176 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 177 { 178 gp_regs_t *tgt_gpregs; 179 el3_state_t *tgt_el3; 180 181 assert(tgt_ctx != NULL); 182 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 183 tgt_el3 = get_el3state_ctx(tgt_ctx); 184 185 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 186 foo); 187 188 /* Restore general purpose and exception registers */ 189 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 190 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 191 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 192 193 #if DYNAMIC_WORKAROUND_CVE_2018_3639 194 cve_2018_3639_t *tgt_cve_2018_3639; 195 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 196 197 /* Restore CVE-2018-3639 mitigation state */ 198 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 199 disp_ctx->disable_cve_2018_3639); 200 #endif 201 } 202 203 static void save_secure_context(void) 204 { 205 cm_el1_sysregs_context_save(SECURE); 206 } 207 208 /* Restore Secure context and arrange to resume it at the next ERET */ 209 static void restore_and_resume_secure_context(void) 210 { 211 cm_el1_sysregs_context_restore(SECURE); 212 cm_set_next_eret_context(SECURE); 213 } 214 215 /* 216 * Restore Non-secure context and arrange to resume it at the next ERET. Return 217 * pointer to the Non-secure context. 218 */ 219 static cpu_context_t *restore_and_resume_ns_context(void) 220 { 221 cpu_context_t *ns_ctx; 222 223 cm_el1_sysregs_context_restore(NON_SECURE); 224 cm_set_next_eret_context(NON_SECURE); 225 226 ns_ctx = cm_get_context(NON_SECURE); 227 assert(ns_ctx != NULL); 228 229 return ns_ctx; 230 } 231 232 /* 233 * Populate the Non-secure context so that the next ERET will dispatch to the 234 * SDEI client. 235 */ 236 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 237 cpu_context_t *ctx, struct jmpbuf *dispatch_jmp) 238 { 239 sdei_dispatch_context_t *disp_ctx; 240 241 /* Push the event and context */ 242 disp_ctx = save_event_ctx(map, ctx); 243 244 /* 245 * Setup handler arguments: 246 * 247 * - x0: Event number 248 * - x1: Handler argument supplied at the time of event registration 249 * - x2: Interrupted PC 250 * - x3: Interrupted SPSR 251 */ 252 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 253 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 254 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 255 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 256 257 /* 258 * Prepare for ERET: 259 * 260 * - Set PC to the registered handler address 261 * - Set SPSR to jump to client EL with exceptions masked 262 */ 263 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, 264 SPSR_64(sdei_client_el(), MODE_SP_ELX, 265 DISABLE_ALL_EXCEPTIONS)); 266 267 #if DYNAMIC_WORKAROUND_CVE_2018_3639 268 cve_2018_3639_t *tgt_cve_2018_3639; 269 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 270 271 /* Save CVE-2018-3639 mitigation state */ 272 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 273 CTX_CVE_2018_3639_DISABLE); 274 275 /* Force SDEI handler to execute with mitigation enabled by default */ 276 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 277 #endif 278 279 disp_ctx->dispatch_jmp = dispatch_jmp; 280 } 281 282 /* Handle a triggered SDEI interrupt while events were masked on this PE */ 283 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 284 sdei_cpu_state_t *state, unsigned int intr_raw) 285 { 286 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 287 bool disable = false; 288 289 /* Nothing to do for event 0 */ 290 if (map->ev_num == SDEI_EVENT_0) 291 return; 292 293 /* 294 * For a private event, or for a shared event specifically routed to 295 * this CPU, we disable interrupt, leave the interrupt pending, and do 296 * EOI. 297 */ 298 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 299 disable = true; 300 301 if (se->reg_flags == SDEI_REGF_RM_PE) 302 assert(se->affinity == my_mpidr); 303 304 if (disable) { 305 plat_ic_disable_interrupt(map->intr); 306 plat_ic_set_interrupt_pending(map->intr); 307 plat_ic_end_of_interrupt(intr_raw); 308 state->pending_enables = true; 309 310 return; 311 } 312 313 /* 314 * We just received a shared event with routing set to ANY PE. The 315 * interrupt can't be delegated on this PE as SDEI events are masked. 316 * However, because its routing mode is ANY, it is possible that the 317 * event can be delegated on any other PE that hasn't masked events. 318 * Therefore, we set the interrupt back pending so as to give other 319 * suitable PEs a chance of handling it. 320 */ 321 assert(plat_ic_is_spi(map->intr) != 0); 322 plat_ic_set_interrupt_pending(map->intr); 323 324 /* 325 * Leaving the same interrupt pending also means that the same interrupt 326 * can target this PE again as soon as this PE leaves EL3. Whether and 327 * how often that happens depends on the implementation of GIC. 328 * 329 * We therefore call a platform handler to resolve this situation. 330 */ 331 plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 332 333 /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 334 plat_ic_end_of_interrupt(intr_raw); 335 } 336 337 /* SDEI main interrupt handler */ 338 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 339 void *cookie) 340 { 341 sdei_entry_t *se; 342 cpu_context_t *ctx; 343 sdei_ev_map_t *map; 344 const sdei_dispatch_context_t *disp_ctx; 345 unsigned int sec_state; 346 sdei_cpu_state_t *state; 347 uint32_t intr; 348 struct jmpbuf dispatch_jmp; 349 const uint64_t mpidr = read_mpidr_el1(); 350 351 /* 352 * To handle an event, the following conditions must be true: 353 * 354 * 1. Event must be signalled 355 * 2. Event must be enabled 356 * 3. This PE must be a target PE for the event 357 * 4. PE must be unmasked for SDEI 358 * 5. If this is a normal event, no event must be running 359 * 6. If this is a critical event, no critical event must be running 360 * 361 * (1) and (2) are true when this function is running 362 * (3) is enforced in GIC by selecting the appropriate routing option 363 * (4) is satisfied by client calling PE_UNMASK 364 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 365 * - Normal SDEI events belong to Normal SDE priority class 366 * - Critical SDEI events belong to Critical CSDE priority class 367 * 368 * The interrupt has already been acknowledged, and therefore is active, 369 * so no other PE can handle this event while we are at it. 370 * 371 * Find if this is an SDEI interrupt. There must be an event mapped to 372 * this interrupt 373 */ 374 intr = plat_ic_get_interrupt_id(intr_raw); 375 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 376 if (map == NULL) { 377 ERROR("No SDEI map for interrupt %u\n", intr); 378 panic(); 379 } 380 381 /* 382 * Received interrupt number must either correspond to event 0, or must 383 * be bound interrupt. 384 */ 385 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 386 387 se = get_event_entry(map); 388 state = sdei_get_this_pe_state(); 389 390 if (state->pe_masked) { 391 /* 392 * Interrupts received while this PE was masked can't be 393 * dispatched. 394 */ 395 SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr, 396 mpidr); 397 if (is_event_shared(map)) 398 sdei_map_lock(map); 399 400 handle_masked_trigger(map, se, state, intr_raw); 401 402 if (is_event_shared(map)) 403 sdei_map_unlock(map); 404 405 return 0; 406 } 407 408 /* Insert load barrier for signalled SDEI event */ 409 if (map->ev_num == SDEI_EVENT_0) 410 dmbld(); 411 412 if (is_event_shared(map)) 413 sdei_map_lock(map); 414 415 /* Assert shared event routed to this PE had been configured so */ 416 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 417 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 418 } 419 420 if (!can_sdei_state_trans(se, DO_DISPATCH)) { 421 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 422 map->ev_num, se->state); 423 424 /* 425 * If the event is registered, leave the interrupt pending so 426 * that it's delivered when the event is enabled. 427 */ 428 if (GET_EV_STATE(se, REGISTERED)) 429 plat_ic_set_interrupt_pending(map->intr); 430 431 /* 432 * The interrupt was disabled or unregistered after the handler 433 * started to execute, which means now the interrupt is already 434 * disabled and we just need to EOI the interrupt. 435 */ 436 plat_ic_end_of_interrupt(intr_raw); 437 438 if (is_event_shared(map)) 439 sdei_map_unlock(map); 440 441 return 0; 442 } 443 444 disp_ctx = get_outstanding_dispatch(); 445 if (is_event_critical(map)) { 446 /* 447 * If this event is Critical, and if there's an outstanding 448 * dispatch, assert the latter is a Normal dispatch. Critical 449 * events can preempt an outstanding Normal event dispatch. 450 */ 451 if (disp_ctx != NULL) 452 assert(is_event_normal(disp_ctx->map)); 453 } else { 454 /* 455 * If this event is Normal, assert that there are no outstanding 456 * dispatches. Normal events can't preempt any outstanding event 457 * dispatches. 458 */ 459 assert(disp_ctx == NULL); 460 } 461 462 sec_state = get_interrupt_src_ss(flags); 463 464 if (is_event_shared(map)) 465 sdei_map_unlock(map); 466 467 SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num, 468 sec_state, read_spsr_el3(), read_elr_el3()); 469 470 ctx = handle; 471 472 /* 473 * Check if we interrupted secure state. Perform a context switch so 474 * that we can delegate to NS. 475 */ 476 if (sec_state == SECURE) { 477 save_secure_context(); 478 ctx = restore_and_resume_ns_context(); 479 } 480 481 /* Synchronously dispatch event */ 482 setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 483 begin_sdei_synchronous_dispatch(&dispatch_jmp); 484 485 /* 486 * We reach here when client completes the event. 487 * 488 * If the cause of dispatch originally interrupted the Secure world, 489 * resume Secure. 490 * 491 * No need to save the Non-secure context ahead of a world switch: the 492 * Non-secure context was fully saved before dispatch, and has been 493 * returned to its pre-dispatch state. 494 */ 495 if (sec_state == SECURE) 496 restore_and_resume_secure_context(); 497 498 /* 499 * The event was dispatched after receiving SDEI interrupt. With 500 * the event handling completed, EOI the corresponding 501 * interrupt. 502 */ 503 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 504 ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num); 505 panic(); 506 } 507 plat_ic_end_of_interrupt(intr_raw); 508 509 return 0; 510 } 511 512 /* 513 * Explicitly dispatch the given SDEI event. 514 * 515 * When calling this API, the caller must be prepared for the SDEI dispatcher to 516 * restore and make Non-secure context as active. This call returns only after 517 * the client has completed the dispatch. Then, the Non-secure context will be 518 * active, and the following ERET will return to Non-secure. 519 * 520 * Should the caller require re-entry to Secure, it must restore the Secure 521 * context and program registers for ERET. 522 */ 523 int sdei_dispatch_event(int ev_num) 524 { 525 sdei_entry_t *se; 526 sdei_ev_map_t *map; 527 cpu_context_t *ns_ctx; 528 sdei_dispatch_context_t *disp_ctx; 529 sdei_cpu_state_t *state; 530 struct jmpbuf dispatch_jmp; 531 532 /* Can't dispatch if events are masked on this PE */ 533 state = sdei_get_this_pe_state(); 534 if (state->pe_masked) 535 return -1; 536 537 /* Event 0 can't be dispatched */ 538 if (ev_num == SDEI_EVENT_0) 539 return -1; 540 541 /* Locate mapping corresponding to this event */ 542 map = find_event_map(ev_num); 543 if (map == NULL) 544 return -1; 545 546 /* Only explicit events can be dispatched */ 547 if (!is_map_explicit(map)) 548 return -1; 549 550 /* Examine state of dispatch stack */ 551 disp_ctx = get_outstanding_dispatch(); 552 if (disp_ctx != NULL) { 553 /* 554 * There's an outstanding dispatch. If the outstanding dispatch 555 * is critical, no more dispatches are possible. 556 */ 557 if (is_event_critical(disp_ctx->map)) 558 return -1; 559 560 /* 561 * If the outstanding dispatch is Normal, only critical events 562 * can be dispatched. 563 */ 564 if (is_event_normal(map)) 565 return -1; 566 } 567 568 se = get_event_entry(map); 569 if (!can_sdei_state_trans(se, DO_DISPATCH)) 570 return -1; 571 572 /* Activate the priority corresponding to the event being dispatched */ 573 ehf_activate_priority(sdei_event_priority(map)); 574 575 /* 576 * Prepare for NS dispatch by restoring the Non-secure context and 577 * marking that as active. 578 */ 579 ns_ctx = restore_and_resume_ns_context(); 580 581 /* Dispatch event synchronously */ 582 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 583 begin_sdei_synchronous_dispatch(&dispatch_jmp); 584 585 /* 586 * We reach here when client completes the event. 587 * 588 * Deactivate the priority level that was activated at the time of 589 * explicit dispatch. 590 */ 591 ehf_deactivate_priority(sdei_event_priority(map)); 592 593 return 0; 594 } 595 596 static void end_sdei_synchronous_dispatch(struct jmpbuf *buffer) 597 { 598 longjmp(buffer); 599 } 600 601 int sdei_event_complete(bool resume, uint64_t pc) 602 { 603 sdei_dispatch_context_t *disp_ctx; 604 sdei_entry_t *se; 605 sdei_ev_map_t *map; 606 cpu_context_t *ctx; 607 sdei_action_t act; 608 unsigned int client_el = sdei_client_el(); 609 610 /* Return error if called without an active event */ 611 disp_ctx = get_outstanding_dispatch(); 612 if (disp_ctx == NULL) 613 return SDEI_EDENY; 614 615 /* Validate resumption point */ 616 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 617 return SDEI_EDENY; 618 619 map = disp_ctx->map; 620 assert(map != NULL); 621 se = get_event_entry(map); 622 623 if (is_event_shared(map)) 624 sdei_map_lock(map); 625 626 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 627 if (!can_sdei_state_trans(se, act)) { 628 if (is_event_shared(map)) 629 sdei_map_unlock(map); 630 return SDEI_EDENY; 631 } 632 633 if (is_event_shared(map)) 634 sdei_map_unlock(map); 635 636 /* Having done sanity checks, pop dispatch */ 637 (void) pop_dispatch(); 638 639 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 640 map->ev_num, read_spsr_el3(), read_elr_el3()); 641 642 /* 643 * Restore Non-secure to how it was originally interrupted. Once done, 644 * it's up-to-date with the saved copy. 645 */ 646 ctx = cm_get_context(NON_SECURE); 647 restore_event_ctx(disp_ctx, ctx); 648 649 if (resume) { 650 /* 651 * Complete-and-resume call. Prepare the Non-secure context 652 * (currently active) for complete and resume. 653 */ 654 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 655 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 656 657 /* 658 * Make it look as if a synchronous exception were taken at the 659 * supplied Non-secure resumption point. Populate SPSR and 660 * ELR_ELx so that an ERET from there works as expected. 661 * 662 * The assumption is that the client, if necessary, would have 663 * saved any live content in these registers before making this 664 * call. 665 */ 666 if (client_el == MODE_EL2) { 667 write_elr_el2(disp_ctx->elr_el3); 668 write_spsr_el2(disp_ctx->spsr_el3); 669 } else { 670 /* EL1 */ 671 write_elr_el1(disp_ctx->elr_el3); 672 write_spsr_el1(disp_ctx->spsr_el3); 673 } 674 } 675 676 /* End the outstanding dispatch */ 677 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 678 679 return 0; 680 } 681 682 int64_t sdei_event_context(void *handle, unsigned int param) 683 { 684 sdei_dispatch_context_t *disp_ctx; 685 686 if (param >= SDEI_SAVED_GPREGS) 687 return SDEI_EINVAL; 688 689 /* Get outstanding dispatch on this CPU */ 690 disp_ctx = get_outstanding_dispatch(); 691 if (disp_ctx == NULL) 692 return SDEI_EDENY; 693 694 assert(disp_ctx->map != NULL); 695 696 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 697 return SDEI_EDENY; 698 699 /* 700 * No locking is required for the Running status as this is the only CPU 701 * which can complete the event 702 */ 703 704 return (int64_t) disp_ctx->x[param]; 705 } 706