1 /* 2 * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch_helpers.h> 11 #include <bl31/ehf.h> 12 #include <bl31/interrupt_mgmt.h> 13 #include <common/bl_common.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/cassert.h> 17 #include <services/sdei.h> 18 19 #include "sdei_private.h" 20 21 /* x0-x17 GPREGS context */ 22 #define SDEI_SAVED_GPREGS 18U 23 24 /* Maximum preemption nesting levels: Critical priority and Normal priority */ 25 #define MAX_EVENT_NESTING 2U 26 27 /* Per-CPU SDEI state access macro */ 28 #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 29 30 /* Structure to store information about an outstanding dispatch */ 31 typedef struct sdei_dispatch_context { 32 sdei_ev_map_t *map; 33 uint64_t x[SDEI_SAVED_GPREGS]; 34 jmp_buf *dispatch_jmp; 35 36 /* Exception state registers */ 37 uint64_t elr_el3; 38 uint64_t spsr_el3; 39 40 #if DYNAMIC_WORKAROUND_CVE_2018_3639 41 /* CVE-2018-3639 mitigation state */ 42 uint64_t disable_cve_2018_3639; 43 #endif 44 } sdei_dispatch_context_t; 45 46 /* Per-CPU SDEI state data */ 47 typedef struct sdei_cpu_state { 48 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 49 unsigned short stack_top; /* Empty ascending */ 50 bool pe_masked; 51 bool pending_enables; 52 } sdei_cpu_state_t; 53 54 /* SDEI states for all cores in the system */ 55 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 56 57 int64_t sdei_pe_mask(void) 58 { 59 int64_t ret = 0; 60 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 61 62 /* 63 * Return value indicates whether this call had any effect in the mask 64 * status of this PE. 65 */ 66 if (!state->pe_masked) { 67 state->pe_masked = true; 68 ret = 1; 69 } 70 71 return ret; 72 } 73 74 void sdei_pe_unmask(void) 75 { 76 unsigned int i; 77 sdei_ev_map_t *map; 78 sdei_entry_t *se; 79 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 80 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 81 82 /* 83 * If there are pending enables, iterate through the private mappings 84 * and enable those bound maps that are in enabled state. Also, iterate 85 * through shared mappings and enable interrupts of events that are 86 * targeted to this PE. 87 */ 88 if (state->pending_enables) { 89 for_each_private_map(i, map) { 90 se = get_event_entry(map); 91 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 92 plat_ic_enable_interrupt(map->intr); 93 } 94 95 for_each_shared_map(i, map) { 96 se = get_event_entry(map); 97 98 sdei_map_lock(map); 99 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 100 (se->reg_flags == SDEI_REGF_RM_PE) && 101 (se->affinity == my_mpidr)) { 102 plat_ic_enable_interrupt(map->intr); 103 } 104 sdei_map_unlock(map); 105 } 106 } 107 108 state->pending_enables = false; 109 state->pe_masked = false; 110 } 111 112 /* Push a dispatch context to the dispatch stack */ 113 static sdei_dispatch_context_t *push_dispatch(void) 114 { 115 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 116 sdei_dispatch_context_t *disp_ctx; 117 118 /* Cannot have more than max events */ 119 assert(state->stack_top < MAX_EVENT_NESTING); 120 121 disp_ctx = &state->dispatch_stack[state->stack_top]; 122 state->stack_top++; 123 124 return disp_ctx; 125 } 126 127 /* Pop a dispatch context to the dispatch stack */ 128 static sdei_dispatch_context_t *pop_dispatch(void) 129 { 130 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 131 132 if (state->stack_top == 0U) 133 return NULL; 134 135 assert(state->stack_top <= MAX_EVENT_NESTING); 136 137 state->stack_top--; 138 139 return &state->dispatch_stack[state->stack_top]; 140 } 141 142 /* Retrieve the context at the top of dispatch stack */ 143 static sdei_dispatch_context_t *get_outstanding_dispatch(void) 144 { 145 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 146 147 if (state->stack_top == 0U) 148 return NULL; 149 150 assert(state->stack_top <= MAX_EVENT_NESTING); 151 152 return &state->dispatch_stack[state->stack_top - 1U]; 153 } 154 155 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 156 void *tgt_ctx) 157 { 158 sdei_dispatch_context_t *disp_ctx; 159 const gp_regs_t *tgt_gpregs; 160 const el3_state_t *tgt_el3; 161 162 assert(tgt_ctx != NULL); 163 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 164 tgt_el3 = get_el3state_ctx(tgt_ctx); 165 166 disp_ctx = push_dispatch(); 167 assert(disp_ctx != NULL); 168 disp_ctx->map = map; 169 170 /* Save general purpose and exception registers */ 171 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 172 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 173 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 174 175 return disp_ctx; 176 } 177 178 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 179 { 180 gp_regs_t *tgt_gpregs; 181 el3_state_t *tgt_el3; 182 183 assert(tgt_ctx != NULL); 184 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 185 tgt_el3 = get_el3state_ctx(tgt_ctx); 186 187 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 188 foo); 189 190 /* Restore general purpose and exception registers */ 191 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 192 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 193 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 194 195 #if DYNAMIC_WORKAROUND_CVE_2018_3639 196 cve_2018_3639_t *tgt_cve_2018_3639; 197 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 198 199 /* Restore CVE-2018-3639 mitigation state */ 200 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 201 disp_ctx->disable_cve_2018_3639); 202 #endif 203 } 204 205 static void save_secure_context(void) 206 { 207 cm_el1_sysregs_context_save(SECURE); 208 } 209 210 /* Restore Secure context and arrange to resume it at the next ERET */ 211 static void restore_and_resume_secure_context(void) 212 { 213 cm_el1_sysregs_context_restore(SECURE); 214 cm_set_next_eret_context(SECURE); 215 } 216 217 /* 218 * Restore Non-secure context and arrange to resume it at the next ERET. Return 219 * pointer to the Non-secure context. 220 */ 221 static cpu_context_t *restore_and_resume_ns_context(void) 222 { 223 cpu_context_t *ns_ctx; 224 225 cm_el1_sysregs_context_restore(NON_SECURE); 226 cm_set_next_eret_context(NON_SECURE); 227 228 ns_ctx = cm_get_context(NON_SECURE); 229 assert(ns_ctx != NULL); 230 231 return ns_ctx; 232 } 233 234 /* 235 * Populate the Non-secure context so that the next ERET will dispatch to the 236 * SDEI client. 237 */ 238 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 239 cpu_context_t *ctx, jmp_buf *dispatch_jmp) 240 { 241 sdei_dispatch_context_t *disp_ctx; 242 243 /* Push the event and context */ 244 disp_ctx = save_event_ctx(map, ctx); 245 246 /* 247 * Setup handler arguments: 248 * 249 * - x0: Event number 250 * - x1: Handler argument supplied at the time of event registration 251 * - x2: Interrupted PC 252 * - x3: Interrupted SPSR 253 */ 254 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 255 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 256 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 257 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 258 259 /* 260 * Prepare for ERET: 261 * 262 * - Set PC to the registered handler address 263 * - Set SPSR to jump to client EL with exceptions masked 264 */ 265 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, 266 SPSR_64(sdei_client_el(), MODE_SP_ELX, 267 DISABLE_ALL_EXCEPTIONS)); 268 269 #if DYNAMIC_WORKAROUND_CVE_2018_3639 270 cve_2018_3639_t *tgt_cve_2018_3639; 271 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 272 273 /* Save CVE-2018-3639 mitigation state */ 274 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 275 CTX_CVE_2018_3639_DISABLE); 276 277 /* Force SDEI handler to execute with mitigation enabled by default */ 278 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 279 #endif 280 281 disp_ctx->dispatch_jmp = dispatch_jmp; 282 } 283 284 /* Handle a triggered SDEI interrupt while events were masked on this PE */ 285 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 286 sdei_cpu_state_t *state, unsigned int intr_raw) 287 { 288 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 289 bool disable = false; 290 291 /* Nothing to do for event 0 */ 292 if (map->ev_num == SDEI_EVENT_0) 293 return; 294 295 /* 296 * For a private event, or for a shared event specifically routed to 297 * this CPU, we disable interrupt, leave the interrupt pending, and do 298 * EOI. 299 */ 300 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 301 disable = true; 302 303 if (se->reg_flags == SDEI_REGF_RM_PE) 304 assert(se->affinity == my_mpidr); 305 306 if (disable) { 307 plat_ic_disable_interrupt(map->intr); 308 plat_ic_set_interrupt_pending(map->intr); 309 plat_ic_end_of_interrupt(intr_raw); 310 state->pending_enables = true; 311 312 return; 313 } 314 315 /* 316 * We just received a shared event with routing set to ANY PE. The 317 * interrupt can't be delegated on this PE as SDEI events are masked. 318 * However, because its routing mode is ANY, it is possible that the 319 * event can be delegated on any other PE that hasn't masked events. 320 * Therefore, we set the interrupt back pending so as to give other 321 * suitable PEs a chance of handling it. 322 */ 323 assert(plat_ic_is_spi(map->intr) != 0); 324 plat_ic_set_interrupt_pending(map->intr); 325 326 /* 327 * Leaving the same interrupt pending also means that the same interrupt 328 * can target this PE again as soon as this PE leaves EL3. Whether and 329 * how often that happens depends on the implementation of GIC. 330 * 331 * We therefore call a platform handler to resolve this situation. 332 */ 333 plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 334 335 /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 336 plat_ic_end_of_interrupt(intr_raw); 337 } 338 339 /* SDEI main interrupt handler */ 340 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 341 void *cookie) 342 { 343 sdei_entry_t *se; 344 cpu_context_t *ctx; 345 sdei_ev_map_t *map; 346 const sdei_dispatch_context_t *disp_ctx; 347 unsigned int sec_state; 348 sdei_cpu_state_t *state; 349 uint32_t intr; 350 jmp_buf dispatch_jmp; 351 const uint64_t mpidr = read_mpidr_el1(); 352 353 /* 354 * To handle an event, the following conditions must be true: 355 * 356 * 1. Event must be signalled 357 * 2. Event must be enabled 358 * 3. This PE must be a target PE for the event 359 * 4. PE must be unmasked for SDEI 360 * 5. If this is a normal event, no event must be running 361 * 6. If this is a critical event, no critical event must be running 362 * 363 * (1) and (2) are true when this function is running 364 * (3) is enforced in GIC by selecting the appropriate routing option 365 * (4) is satisfied by client calling PE_UNMASK 366 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 367 * - Normal SDEI events belong to Normal SDE priority class 368 * - Critical SDEI events belong to Critical CSDE priority class 369 * 370 * The interrupt has already been acknowledged, and therefore is active, 371 * so no other PE can handle this event while we are at it. 372 * 373 * Find if this is an SDEI interrupt. There must be an event mapped to 374 * this interrupt 375 */ 376 intr = plat_ic_get_interrupt_id(intr_raw); 377 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 378 if (map == NULL) { 379 ERROR("No SDEI map for interrupt %u\n", intr); 380 panic(); 381 } 382 383 /* 384 * Received interrupt number must either correspond to event 0, or must 385 * be bound interrupt. 386 */ 387 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 388 389 se = get_event_entry(map); 390 state = sdei_get_this_pe_state(); 391 392 if (state->pe_masked) { 393 /* 394 * Interrupts received while this PE was masked can't be 395 * dispatched. 396 */ 397 SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr, 398 mpidr); 399 if (is_event_shared(map)) 400 sdei_map_lock(map); 401 402 handle_masked_trigger(map, se, state, intr_raw); 403 404 if (is_event_shared(map)) 405 sdei_map_unlock(map); 406 407 return 0; 408 } 409 410 /* Insert load barrier for signalled SDEI event */ 411 if (map->ev_num == SDEI_EVENT_0) 412 dmbld(); 413 414 if (is_event_shared(map)) 415 sdei_map_lock(map); 416 417 /* Assert shared event routed to this PE had been configured so */ 418 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 419 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 420 } 421 422 if (!can_sdei_state_trans(se, DO_DISPATCH)) { 423 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 424 map->ev_num, se->state); 425 426 /* 427 * If the event is registered, leave the interrupt pending so 428 * that it's delivered when the event is enabled. 429 */ 430 if (GET_EV_STATE(se, REGISTERED)) 431 plat_ic_set_interrupt_pending(map->intr); 432 433 /* 434 * The interrupt was disabled or unregistered after the handler 435 * started to execute, which means now the interrupt is already 436 * disabled and we just need to EOI the interrupt. 437 */ 438 plat_ic_end_of_interrupt(intr_raw); 439 440 if (is_event_shared(map)) 441 sdei_map_unlock(map); 442 443 return 0; 444 } 445 446 disp_ctx = get_outstanding_dispatch(); 447 if (is_event_critical(map)) { 448 /* 449 * If this event is Critical, and if there's an outstanding 450 * dispatch, assert the latter is a Normal dispatch. Critical 451 * events can preempt an outstanding Normal event dispatch. 452 */ 453 if (disp_ctx != NULL) 454 assert(is_event_normal(disp_ctx->map)); 455 } else { 456 /* 457 * If this event is Normal, assert that there are no outstanding 458 * dispatches. Normal events can't preempt any outstanding event 459 * dispatches. 460 */ 461 assert(disp_ctx == NULL); 462 } 463 464 sec_state = get_interrupt_src_ss(flags); 465 466 if (is_event_shared(map)) 467 sdei_map_unlock(map); 468 469 SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num, 470 sec_state, read_spsr_el3(), read_elr_el3()); 471 472 ctx = handle; 473 474 /* 475 * Check if we interrupted secure state. Perform a context switch so 476 * that we can delegate to NS. 477 */ 478 if (sec_state == SECURE) { 479 save_secure_context(); 480 ctx = restore_and_resume_ns_context(); 481 } 482 483 /* Synchronously dispatch event */ 484 setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 485 begin_sdei_synchronous_dispatch(&dispatch_jmp); 486 487 /* 488 * We reach here when client completes the event. 489 * 490 * If the cause of dispatch originally interrupted the Secure world, 491 * resume Secure. 492 * 493 * No need to save the Non-secure context ahead of a world switch: the 494 * Non-secure context was fully saved before dispatch, and has been 495 * returned to its pre-dispatch state. 496 */ 497 if (sec_state == SECURE) 498 restore_and_resume_secure_context(); 499 500 /* 501 * The event was dispatched after receiving SDEI interrupt. With 502 * the event handling completed, EOI the corresponding 503 * interrupt. 504 */ 505 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 506 ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num); 507 panic(); 508 } 509 plat_ic_end_of_interrupt(intr_raw); 510 511 return 0; 512 } 513 514 /* 515 * Explicitly dispatch the given SDEI event. 516 * 517 * When calling this API, the caller must be prepared for the SDEI dispatcher to 518 * restore and make Non-secure context as active. This call returns only after 519 * the client has completed the dispatch. Then, the Non-secure context will be 520 * active, and the following ERET will return to Non-secure. 521 * 522 * Should the caller require re-entry to Secure, it must restore the Secure 523 * context and program registers for ERET. 524 */ 525 int sdei_dispatch_event(int ev_num) 526 { 527 sdei_entry_t *se; 528 sdei_ev_map_t *map; 529 cpu_context_t *ns_ctx; 530 sdei_dispatch_context_t *disp_ctx; 531 sdei_cpu_state_t *state; 532 jmp_buf dispatch_jmp; 533 534 /* Can't dispatch if events are masked on this PE */ 535 state = sdei_get_this_pe_state(); 536 if (state->pe_masked) 537 return -1; 538 539 /* Event 0 can't be dispatched */ 540 if (ev_num == SDEI_EVENT_0) 541 return -1; 542 543 /* Locate mapping corresponding to this event */ 544 map = find_event_map(ev_num); 545 if (map == NULL) 546 return -1; 547 548 /* Only explicit events can be dispatched */ 549 if (!is_map_explicit(map)) 550 return -1; 551 552 /* Examine state of dispatch stack */ 553 disp_ctx = get_outstanding_dispatch(); 554 if (disp_ctx != NULL) { 555 /* 556 * There's an outstanding dispatch. If the outstanding dispatch 557 * is critical, no more dispatches are possible. 558 */ 559 if (is_event_critical(disp_ctx->map)) 560 return -1; 561 562 /* 563 * If the outstanding dispatch is Normal, only critical events 564 * can be dispatched. 565 */ 566 if (is_event_normal(map)) 567 return -1; 568 } 569 570 se = get_event_entry(map); 571 if (!can_sdei_state_trans(se, DO_DISPATCH)) 572 return -1; 573 574 /* Activate the priority corresponding to the event being dispatched */ 575 ehf_activate_priority(sdei_event_priority(map)); 576 577 /* 578 * Prepare for NS dispatch by restoring the Non-secure context and 579 * marking that as active. 580 */ 581 ns_ctx = restore_and_resume_ns_context(); 582 583 /* Dispatch event synchronously */ 584 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 585 begin_sdei_synchronous_dispatch(&dispatch_jmp); 586 587 /* 588 * We reach here when client completes the event. 589 * 590 * Deactivate the priority level that was activated at the time of 591 * explicit dispatch. 592 */ 593 ehf_deactivate_priority(sdei_event_priority(map)); 594 595 return 0; 596 } 597 598 static void end_sdei_synchronous_dispatch(jmp_buf *buffer) 599 { 600 longjmp(*buffer, 1); 601 } 602 603 int sdei_event_complete(bool resume, uint64_t pc) 604 { 605 sdei_dispatch_context_t *disp_ctx; 606 sdei_entry_t *se; 607 sdei_ev_map_t *map; 608 cpu_context_t *ctx; 609 sdei_action_t act; 610 unsigned int client_el = sdei_client_el(); 611 612 /* Return error if called without an active event */ 613 disp_ctx = get_outstanding_dispatch(); 614 if (disp_ctx == NULL) 615 return SDEI_EDENY; 616 617 /* Validate resumption point */ 618 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 619 return SDEI_EDENY; 620 621 map = disp_ctx->map; 622 assert(map != NULL); 623 se = get_event_entry(map); 624 625 if (is_event_shared(map)) 626 sdei_map_lock(map); 627 628 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 629 if (!can_sdei_state_trans(se, act)) { 630 if (is_event_shared(map)) 631 sdei_map_unlock(map); 632 return SDEI_EDENY; 633 } 634 635 if (is_event_shared(map)) 636 sdei_map_unlock(map); 637 638 /* Having done sanity checks, pop dispatch */ 639 (void) pop_dispatch(); 640 641 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 642 map->ev_num, read_spsr_el3(), read_elr_el3()); 643 644 /* 645 * Restore Non-secure to how it was originally interrupted. Once done, 646 * it's up-to-date with the saved copy. 647 */ 648 ctx = cm_get_context(NON_SECURE); 649 restore_event_ctx(disp_ctx, ctx); 650 651 if (resume) { 652 /* 653 * Complete-and-resume call. Prepare the Non-secure context 654 * (currently active) for complete and resume. 655 */ 656 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 657 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 658 659 /* 660 * Make it look as if a synchronous exception were taken at the 661 * supplied Non-secure resumption point. Populate SPSR and 662 * ELR_ELx so that an ERET from there works as expected. 663 * 664 * The assumption is that the client, if necessary, would have 665 * saved any live content in these registers before making this 666 * call. 667 */ 668 if (client_el == MODE_EL2) { 669 write_elr_el2(disp_ctx->elr_el3); 670 write_spsr_el2(disp_ctx->spsr_el3); 671 } else { 672 /* EL1 */ 673 write_elr_el1(disp_ctx->elr_el3); 674 write_spsr_el1(disp_ctx->spsr_el3); 675 } 676 } 677 678 /* End the outstanding dispatch */ 679 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 680 681 return 0; 682 } 683 684 int64_t sdei_event_context(void *handle, unsigned int param) 685 { 686 sdei_dispatch_context_t *disp_ctx; 687 688 if (param >= SDEI_SAVED_GPREGS) 689 return SDEI_EINVAL; 690 691 /* Get outstanding dispatch on this CPU */ 692 disp_ctx = get_outstanding_dispatch(); 693 if (disp_ctx == NULL) 694 return SDEI_EDENY; 695 696 assert(disp_ctx->map != NULL); 697 698 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 699 return SDEI_EDENY; 700 701 /* 702 * No locking is required for the Running status as this is the only CPU 703 * which can complete the event 704 */ 705 706 return (int64_t) disp_ctx->x[param]; 707 } 708