1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bl_common.h> 10 #include <cassert.h> 11 #include <debug.h> 12 #include <ehf.h> 13 #include <interrupt_mgmt.h> 14 #include <runtime_svc.h> 15 #include <sdei.h> 16 #include <string.h> 17 #include "sdei_private.h" 18 19 #define PE_MASKED 1 20 #define PE_NOT_MASKED 0 21 22 /* x0-x17 GPREGS context */ 23 #define SDEI_SAVED_GPREGS 18 24 25 /* Maximum preemption nesting levels: Critical priority and Normal priority */ 26 #define MAX_EVENT_NESTING 2 27 28 /* Per-CPU SDEI state access macro */ 29 #define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()]) 30 31 /* Structure to store information about an outstanding dispatch */ 32 typedef struct sdei_dispatch_context { 33 sdei_ev_map_t *map; 34 uint64_t x[SDEI_SAVED_GPREGS]; 35 struct jmpbuf *dispatch_jmp; 36 37 /* Exception state registers */ 38 uint64_t elr_el3; 39 uint64_t spsr_el3; 40 41 #if DYNAMIC_WORKAROUND_CVE_2018_3639 42 /* CVE-2018-3639 mitigation state */ 43 uint64_t disable_cve_2018_3639; 44 #endif 45 } sdei_dispatch_context_t; 46 47 /* Per-CPU SDEI state data */ 48 typedef struct sdei_cpu_state { 49 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 50 unsigned short stack_top; /* Empty ascending */ 51 unsigned int pe_masked:1; 52 unsigned int pending_enables:1; 53 } sdei_cpu_state_t; 54 55 /* SDEI states for all cores in the system */ 56 static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT]; 57 58 unsigned int sdei_pe_mask(void) 59 { 60 unsigned int ret; 61 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 62 63 /* 64 * Return value indicates whether this call had any effect in the mask 65 * status of this PE. 66 */ 67 ret = (state->pe_masked ^ PE_MASKED); 68 state->pe_masked = PE_MASKED; 69 70 return ret; 71 } 72 73 void sdei_pe_unmask(void) 74 { 75 int i; 76 sdei_ev_map_t *map; 77 sdei_entry_t *se; 78 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 79 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 80 81 /* 82 * If there are pending enables, iterate through the private mappings 83 * and enable those bound maps that are in enabled state. Also, iterate 84 * through shared mappings and enable interrupts of events that are 85 * targeted to this PE. 86 */ 87 if (state->pending_enables) { 88 for_each_private_map(i, map) { 89 se = get_event_entry(map); 90 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 91 plat_ic_enable_interrupt(map->intr); 92 } 93 94 for_each_shared_map(i, map) { 95 se = get_event_entry(map); 96 97 sdei_map_lock(map); 98 if (is_map_bound(map) && 99 GET_EV_STATE(se, ENABLED) && 100 (se->reg_flags == SDEI_REGF_RM_PE) && 101 (se->affinity == my_mpidr)) { 102 plat_ic_enable_interrupt(map->intr); 103 } 104 sdei_map_unlock(map); 105 } 106 } 107 108 state->pending_enables = 0; 109 state->pe_masked = PE_NOT_MASKED; 110 } 111 112 /* Push a dispatch context to the dispatch stack */ 113 static sdei_dispatch_context_t *push_dispatch(void) 114 { 115 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 116 sdei_dispatch_context_t *disp_ctx; 117 118 /* Cannot have more than max events */ 119 assert(state->stack_top < MAX_EVENT_NESTING); 120 121 disp_ctx = &state->dispatch_stack[state->stack_top]; 122 state->stack_top++; 123 124 return disp_ctx; 125 } 126 127 /* Pop a dispatch context to the dispatch stack */ 128 static sdei_dispatch_context_t *pop_dispatch(void) 129 { 130 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 131 132 if (state->stack_top == 0) 133 return NULL; 134 135 assert(state->stack_top <= MAX_EVENT_NESTING); 136 137 state->stack_top--; 138 139 return &state->dispatch_stack[state->stack_top]; 140 } 141 142 /* Retrieve the context at the top of dispatch stack */ 143 static sdei_dispatch_context_t *get_outstanding_dispatch(void) 144 { 145 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 146 147 if (state->stack_top == 0) 148 return NULL; 149 150 assert(state->stack_top <= MAX_EVENT_NESTING); 151 152 return &state->dispatch_stack[state->stack_top - 1]; 153 } 154 155 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 156 void *tgt_ctx) 157 { 158 sdei_dispatch_context_t *disp_ctx; 159 gp_regs_t *tgt_gpregs; 160 el3_state_t *tgt_el3; 161 162 assert(tgt_ctx); 163 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 164 tgt_el3 = get_el3state_ctx(tgt_ctx); 165 166 disp_ctx = push_dispatch(); 167 assert(disp_ctx); 168 disp_ctx->map = map; 169 170 /* Save general purpose and exception registers */ 171 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 172 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 173 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 174 175 return disp_ctx; 176 } 177 178 static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 179 { 180 gp_regs_t *tgt_gpregs; 181 el3_state_t *tgt_el3; 182 183 assert(tgt_ctx); 184 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 185 tgt_el3 = get_el3state_ctx(tgt_ctx); 186 187 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 188 foo); 189 190 /* Restore general purpose and exception registers */ 191 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 192 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 193 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 194 195 #if DYNAMIC_WORKAROUND_CVE_2018_3639 196 cve_2018_3639_t *tgt_cve_2018_3639; 197 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 198 199 /* Restore CVE-2018-3639 mitigation state */ 200 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 201 disp_ctx->disable_cve_2018_3639); 202 #endif 203 } 204 205 static void save_secure_context(void) 206 { 207 cm_el1_sysregs_context_save(SECURE); 208 } 209 210 /* Restore Secure context and arrange to resume it at the next ERET */ 211 static void restore_and_resume_secure_context(void) 212 { 213 cm_el1_sysregs_context_restore(SECURE); 214 cm_set_next_eret_context(SECURE); 215 } 216 217 /* 218 * Restore Non-secure context and arrange to resume it at the next ERET. Return 219 * pointer to the Non-secure context. 220 */ 221 static cpu_context_t *restore_and_resume_ns_context(void) 222 { 223 cpu_context_t *ns_ctx; 224 225 cm_el1_sysregs_context_restore(NON_SECURE); 226 cm_set_next_eret_context(NON_SECURE); 227 228 ns_ctx = cm_get_context(NON_SECURE); 229 assert(ns_ctx); 230 231 return ns_ctx; 232 } 233 234 /* 235 * Populate the Non-secure context so that the next ERET will dispatch to the 236 * SDEI client. 237 */ 238 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 239 cpu_context_t *ctx, struct jmpbuf *dispatch_jmp) 240 { 241 sdei_dispatch_context_t *disp_ctx; 242 243 /* Push the event and context */ 244 disp_ctx = save_event_ctx(map, ctx); 245 246 /* 247 * Setup handler arguments: 248 * 249 * - x0: Event number 250 * - x1: Handler argument supplied at the time of event registration 251 * - x2: Interrupted PC 252 * - x3: Interrupted SPSR 253 */ 254 SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num); 255 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 256 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 257 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 258 259 /* 260 * Prepare for ERET: 261 * 262 * - Set PC to the registered handler address 263 * - Set SPSR to jump to client EL with exceptions masked 264 */ 265 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, 266 SPSR_64(sdei_client_el(), MODE_SP_ELX, 267 DISABLE_ALL_EXCEPTIONS)); 268 269 #if DYNAMIC_WORKAROUND_CVE_2018_3639 270 cve_2018_3639_t *tgt_cve_2018_3639; 271 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 272 273 /* Save CVE-2018-3639 mitigation state */ 274 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 275 CTX_CVE_2018_3639_DISABLE); 276 277 /* Force SDEI handler to execute with mitigation enabled by default */ 278 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 279 #endif 280 281 disp_ctx->dispatch_jmp = dispatch_jmp; 282 } 283 284 /* Handle a triggered SDEI interrupt while events were masked on this PE */ 285 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 286 sdei_cpu_state_t *state, unsigned int intr_raw) 287 { 288 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 289 int disable = 0; 290 291 /* Nothing to do for event 0 */ 292 if (map->ev_num == SDEI_EVENT_0) 293 return; 294 295 /* 296 * For a private event, or for a shared event specifically routed to 297 * this CPU, we disable interrupt, leave the interrupt pending, and do 298 * EOI. 299 */ 300 if (is_event_private(map)) { 301 disable = 1; 302 } else if (se->reg_flags == SDEI_REGF_RM_PE) { 303 assert(se->affinity == my_mpidr); 304 disable = 1; 305 } 306 307 if (disable) { 308 plat_ic_disable_interrupt(map->intr); 309 plat_ic_set_interrupt_pending(map->intr); 310 plat_ic_end_of_interrupt(intr_raw); 311 state->pending_enables = 1; 312 313 return; 314 } 315 316 /* 317 * We just received a shared event with routing set to ANY PE. The 318 * interrupt can't be delegated on this PE as SDEI events are masked. 319 * However, because its routing mode is ANY, it is possible that the 320 * event can be delegated on any other PE that hasn't masked events. 321 * Therefore, we set the interrupt back pending so as to give other 322 * suitable PEs a chance of handling it. 323 */ 324 assert(plat_ic_is_spi(map->intr)); 325 plat_ic_set_interrupt_pending(map->intr); 326 327 /* 328 * Leaving the same interrupt pending also means that the same interrupt 329 * can target this PE again as soon as this PE leaves EL3. Whether and 330 * how often that happens depends on the implementation of GIC. 331 * 332 * We therefore call a platform handler to resolve this situation. 333 */ 334 plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 335 336 /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 337 plat_ic_end_of_interrupt(intr_raw); 338 } 339 340 /* SDEI main interrupt handler */ 341 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 342 void *cookie) 343 { 344 sdei_entry_t *se; 345 cpu_context_t *ctx; 346 sdei_ev_map_t *map; 347 sdei_dispatch_context_t *disp_ctx; 348 unsigned int sec_state; 349 sdei_cpu_state_t *state; 350 uint32_t intr; 351 struct jmpbuf dispatch_jmp; 352 353 /* 354 * To handle an event, the following conditions must be true: 355 * 356 * 1. Event must be signalled 357 * 2. Event must be enabled 358 * 3. This PE must be a target PE for the event 359 * 4. PE must be unmasked for SDEI 360 * 5. If this is a normal event, no event must be running 361 * 6. If this is a critical event, no critical event must be running 362 * 363 * (1) and (2) are true when this function is running 364 * (3) is enforced in GIC by selecting the appropriate routing option 365 * (4) is satisfied by client calling PE_UNMASK 366 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 367 * - Normal SDEI events belong to Normal SDE priority class 368 * - Critical SDEI events belong to Critical CSDE priority class 369 * 370 * The interrupt has already been acknowledged, and therefore is active, 371 * so no other PE can handle this event while we are at it. 372 * 373 * Find if this is an SDEI interrupt. There must be an event mapped to 374 * this interrupt 375 */ 376 intr = plat_ic_get_interrupt_id(intr_raw); 377 map = find_event_map_by_intr(intr, plat_ic_is_spi(intr)); 378 if (!map) { 379 ERROR("No SDEI map for interrupt %u\n", intr); 380 panic(); 381 } 382 383 /* 384 * Received interrupt number must either correspond to event 0, or must 385 * be bound interrupt. 386 */ 387 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 388 389 se = get_event_entry(map); 390 state = sdei_get_this_pe_state(); 391 392 if (state->pe_masked == PE_MASKED) { 393 /* 394 * Interrupts received while this PE was masked can't be 395 * dispatched. 396 */ 397 SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr, 398 read_mpidr_el1()); 399 if (is_event_shared(map)) 400 sdei_map_lock(map); 401 402 handle_masked_trigger(map, se, state, intr_raw); 403 404 if (is_event_shared(map)) 405 sdei_map_unlock(map); 406 407 return 0; 408 } 409 410 /* Insert load barrier for signalled SDEI event */ 411 if (map->ev_num == SDEI_EVENT_0) 412 dmbld(); 413 414 if (is_event_shared(map)) 415 sdei_map_lock(map); 416 417 /* Assert shared event routed to this PE had been configured so */ 418 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 419 assert(se->affinity == 420 (read_mpidr_el1() & MPIDR_AFFINITY_MASK)); 421 } 422 423 if (!can_sdei_state_trans(se, DO_DISPATCH)) { 424 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 425 map->ev_num, se->state); 426 427 /* 428 * If the event is registered, leave the interrupt pending so 429 * that it's delivered when the event is enabled. 430 */ 431 if (GET_EV_STATE(se, REGISTERED)) 432 plat_ic_set_interrupt_pending(map->intr); 433 434 /* 435 * The interrupt was disabled or unregistered after the handler 436 * started to execute, which means now the interrupt is already 437 * disabled and we just need to EOI the interrupt. 438 */ 439 plat_ic_end_of_interrupt(intr_raw); 440 441 if (is_event_shared(map)) 442 sdei_map_unlock(map); 443 444 return 0; 445 } 446 447 disp_ctx = get_outstanding_dispatch(); 448 if (is_event_critical(map)) { 449 /* 450 * If this event is Critical, and if there's an outstanding 451 * dispatch, assert the latter is a Normal dispatch. Critical 452 * events can preempt an outstanding Normal event dispatch. 453 */ 454 if (disp_ctx) 455 assert(is_event_normal(disp_ctx->map)); 456 } else { 457 /* 458 * If this event is Normal, assert that there are no outstanding 459 * dispatches. Normal events can't preempt any outstanding event 460 * dispatches. 461 */ 462 assert(disp_ctx == NULL); 463 } 464 465 sec_state = get_interrupt_src_ss(flags); 466 467 if (is_event_shared(map)) 468 sdei_map_unlock(map); 469 470 SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(), 471 map->ev_num, sec_state, read_spsr_el3(), 472 read_elr_el3()); 473 474 ctx = handle; 475 476 /* 477 * Check if we interrupted secure state. Perform a context switch so 478 * that we can delegate to NS. 479 */ 480 if (sec_state == SECURE) { 481 save_secure_context(); 482 ctx = restore_and_resume_ns_context(); 483 } 484 485 /* Synchronously dispatch event */ 486 setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 487 begin_sdei_synchronous_dispatch(&dispatch_jmp); 488 489 /* 490 * We reach here when client completes the event. 491 * 492 * If the cause of dispatch originally interrupted the Secure world, and 493 * if Non-secure world wasn't allowed to preempt Secure execution, 494 * resume Secure. 495 * 496 * No need to save the Non-secure context ahead of a world switch: the 497 * Non-secure context was fully saved before dispatch, and has been 498 * returned to its pre-dispatch state. 499 */ 500 if ((sec_state == SECURE) && (ehf_is_ns_preemption_allowed() == 0)) 501 restore_and_resume_secure_context(); 502 503 /* 504 * The event was dispatched after receiving SDEI interrupt. With 505 * the event handling completed, EOI the corresponding 506 * interrupt. 507 */ 508 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 509 ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num); 510 panic(); 511 } 512 plat_ic_end_of_interrupt(intr_raw); 513 514 if (is_event_shared(map)) 515 sdei_map_unlock(map); 516 517 return 0; 518 } 519 520 /* 521 * Explicitly dispatch the given SDEI event. 522 * 523 * When calling this API, the caller must be prepared for the SDEI dispatcher to 524 * restore and make Non-secure context as active. This call returns only after 525 * the client has completed the dispatch. Then, the Non-secure context will be 526 * active, and the following ERET will return to Non-secure. 527 * 528 * Should the caller require re-entry to Secure, it must restore the Secure 529 * context and program registers for ERET. 530 */ 531 int sdei_dispatch_event(int ev_num) 532 { 533 sdei_entry_t *se; 534 sdei_ev_map_t *map; 535 cpu_context_t *ns_ctx; 536 sdei_dispatch_context_t *disp_ctx; 537 sdei_cpu_state_t *state; 538 struct jmpbuf dispatch_jmp; 539 540 /* Can't dispatch if events are masked on this PE */ 541 state = sdei_get_this_pe_state(); 542 if (state->pe_masked == PE_MASKED) 543 return -1; 544 545 /* Event 0 can't be dispatched */ 546 if (ev_num == SDEI_EVENT_0) 547 return -1; 548 549 /* Locate mapping corresponding to this event */ 550 map = find_event_map(ev_num); 551 if (!map) 552 return -1; 553 554 /* Only explicit events can be dispatched */ 555 if (!is_map_explicit(map)) 556 return -1; 557 558 /* Examine state of dispatch stack */ 559 disp_ctx = get_outstanding_dispatch(); 560 if (disp_ctx) { 561 /* 562 * There's an outstanding dispatch. If the outstanding dispatch 563 * is critical, no more dispatches are possible. 564 */ 565 if (is_event_critical(disp_ctx->map)) 566 return -1; 567 568 /* 569 * If the outstanding dispatch is Normal, only critical events 570 * can be dispatched. 571 */ 572 if (is_event_normal(map)) 573 return -1; 574 } 575 576 se = get_event_entry(map); 577 if (!can_sdei_state_trans(se, DO_DISPATCH)) 578 return -1; 579 580 /* Activate the priority corresponding to the event being dispatched */ 581 ehf_activate_priority(sdei_event_priority(map)); 582 583 /* 584 * Prepare for NS dispatch by restoring the Non-secure context and 585 * marking that as active. 586 */ 587 ns_ctx = restore_and_resume_ns_context(); 588 589 /* Dispatch event synchronously */ 590 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 591 begin_sdei_synchronous_dispatch(&dispatch_jmp); 592 593 /* 594 * We reach here when client completes the event. 595 * 596 * Deactivate the priority level that was activated at the time of 597 * explicit dispatch. 598 */ 599 ehf_deactivate_priority(sdei_event_priority(map)); 600 601 return 0; 602 } 603 604 static void end_sdei_synchronous_dispatch(struct jmpbuf *buffer) 605 { 606 longjmp(buffer); 607 } 608 609 int sdei_event_complete(int resume, uint64_t pc) 610 { 611 sdei_dispatch_context_t *disp_ctx; 612 sdei_entry_t *se; 613 sdei_ev_map_t *map; 614 cpu_context_t *ctx; 615 sdei_action_t act; 616 unsigned int client_el = sdei_client_el(); 617 618 /* Return error if called without an active event */ 619 disp_ctx = get_outstanding_dispatch(); 620 if (!disp_ctx) 621 return SDEI_EDENY; 622 623 /* Validate resumption point */ 624 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 625 return SDEI_EDENY; 626 627 map = disp_ctx->map; 628 assert(map); 629 se = get_event_entry(map); 630 631 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 632 if (!can_sdei_state_trans(se, act)) { 633 if (is_event_shared(map)) 634 sdei_map_unlock(map); 635 return SDEI_EDENY; 636 } 637 638 /* Having done sanity checks, pop dispatch */ 639 pop_dispatch(); 640 641 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 642 map->ev_num, read_spsr_el3(), read_elr_el3()); 643 644 if (is_event_shared(map)) 645 sdei_map_lock(map); 646 647 /* 648 * Restore Non-secure to how it was originally interrupted. Once done, 649 * it's up-to-date with the saved copy. 650 */ 651 ctx = cm_get_context(NON_SECURE); 652 restore_event_ctx(disp_ctx, ctx); 653 654 if (resume) { 655 /* 656 * Complete-and-resume call. Prepare the Non-secure context 657 * (currently active) for complete and resume. 658 */ 659 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 660 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 661 662 /* 663 * Make it look as if a synchronous exception were taken at the 664 * supplied Non-secure resumption point. Populate SPSR and 665 * ELR_ELx so that an ERET from there works as expected. 666 * 667 * The assumption is that the client, if necessary, would have 668 * saved any live content in these registers before making this 669 * call. 670 */ 671 if (client_el == MODE_EL2) { 672 write_elr_el2(disp_ctx->elr_el3); 673 write_spsr_el2(disp_ctx->spsr_el3); 674 } else { 675 /* EL1 */ 676 write_elr_el1(disp_ctx->elr_el3); 677 write_spsr_el1(disp_ctx->spsr_el3); 678 } 679 } 680 681 /* End the outstanding dispatch */ 682 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 683 684 return 0; 685 } 686 687 int sdei_event_context(void *handle, unsigned int param) 688 { 689 sdei_dispatch_context_t *disp_ctx; 690 691 if (param >= SDEI_SAVED_GPREGS) 692 return SDEI_EINVAL; 693 694 /* Get outstanding dispatch on this CPU */ 695 disp_ctx = get_outstanding_dispatch(); 696 if (!disp_ctx) 697 return SDEI_EDENY; 698 699 assert(disp_ctx->map); 700 701 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 702 return SDEI_EDENY; 703 704 /* 705 * No locking is required for the Running status as this is the only CPU 706 * which can complete the event 707 */ 708 709 return disp_ctx->x[param]; 710 } 711