1 /* 2 * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <arch_helpers.h> 11 #include <arch_features.h> 12 #include <bl31/ehf.h> 13 #include <bl31/interrupt_mgmt.h> 14 #include <common/bl_common.h> 15 #include <common/debug.h> 16 #include <common/runtime_svc.h> 17 #include <lib/cassert.h> 18 #include <services/sdei.h> 19 20 #include "sdei_private.h" 21 22 /* x0-x17 GPREGS context */ 23 #define SDEI_SAVED_GPREGS 18U 24 25 /* Maximum preemption nesting levels: Critical priority and Normal priority */ 26 #define MAX_EVENT_NESTING 2U 27 28 /* Per-CPU SDEI state access macro */ 29 #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) 30 31 /* Structure to store information about an outstanding dispatch */ 32 typedef struct sdei_dispatch_context { 33 sdei_ev_map_t *map; 34 uint64_t x[SDEI_SAVED_GPREGS]; 35 jmp_buf *dispatch_jmp; 36 37 /* Exception state registers */ 38 uint64_t elr_el3; 39 uint64_t spsr_el3; 40 41 #if DYNAMIC_WORKAROUND_CVE_2018_3639 42 /* CVE-2018-3639 mitigation state */ 43 uint64_t disable_cve_2018_3639; 44 #endif 45 } sdei_dispatch_context_t; 46 47 /* Per-CPU SDEI state data */ 48 typedef struct sdei_cpu_state { 49 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; 50 unsigned short stack_top; /* Empty ascending */ 51 bool pe_masked; 52 bool pending_enables; 53 } sdei_cpu_state_t; 54 55 /* SDEI states for all cores in the system */ 56 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; 57 58 int64_t sdei_pe_mask(void) 59 { 60 int64_t ret = 0; 61 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 62 63 /* 64 * Return value indicates whether this call had any effect in the mask 65 * status of this PE. 66 */ 67 if (!state->pe_masked) { 68 state->pe_masked = true; 69 ret = 1; 70 } 71 72 return ret; 73 } 74 75 void sdei_pe_unmask(void) 76 { 77 unsigned int i; 78 sdei_ev_map_t *map; 79 sdei_entry_t *se; 80 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 81 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 82 83 /* 84 * If there are pending enables, iterate through the private mappings 85 * and enable those bound maps that are in enabled state. Also, iterate 86 * through shared mappings and enable interrupts of events that are 87 * targeted to this PE. 88 */ 89 if (state->pending_enables) { 90 for_each_private_map(i, map) { 91 se = get_event_entry(map); 92 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED)) 93 plat_ic_enable_interrupt(map->intr); 94 } 95 96 for_each_shared_map(i, map) { 97 se = get_event_entry(map); 98 99 sdei_map_lock(map); 100 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && 101 (se->reg_flags == SDEI_REGF_RM_PE) && 102 (se->affinity == my_mpidr)) { 103 plat_ic_enable_interrupt(map->intr); 104 } 105 sdei_map_unlock(map); 106 } 107 } 108 109 state->pending_enables = false; 110 state->pe_masked = false; 111 } 112 113 /* Push a dispatch context to the dispatch stack */ 114 static sdei_dispatch_context_t *push_dispatch(void) 115 { 116 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 117 sdei_dispatch_context_t *disp_ctx; 118 119 /* Cannot have more than max events */ 120 assert(state->stack_top < MAX_EVENT_NESTING); 121 122 disp_ctx = &state->dispatch_stack[state->stack_top]; 123 state->stack_top++; 124 125 return disp_ctx; 126 } 127 128 /* Pop a dispatch context to the dispatch stack */ 129 static sdei_dispatch_context_t *pop_dispatch(void) 130 { 131 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 132 133 if (state->stack_top == 0U) 134 return NULL; 135 136 assert(state->stack_top <= MAX_EVENT_NESTING); 137 138 state->stack_top--; 139 140 return &state->dispatch_stack[state->stack_top]; 141 } 142 143 /* Retrieve the context at the top of dispatch stack */ 144 static sdei_dispatch_context_t *get_outstanding_dispatch(void) 145 { 146 sdei_cpu_state_t *state = sdei_get_this_pe_state(); 147 148 if (state->stack_top == 0U) 149 return NULL; 150 151 assert(state->stack_top <= MAX_EVENT_NESTING); 152 153 return &state->dispatch_stack[state->stack_top - 1U]; 154 } 155 156 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, 157 void *tgt_ctx) 158 { 159 sdei_dispatch_context_t *disp_ctx; 160 const gp_regs_t *tgt_gpregs; 161 const el3_state_t *tgt_el3; 162 163 assert(tgt_ctx != NULL); 164 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 165 tgt_el3 = get_el3state_ctx(tgt_ctx); 166 167 disp_ctx = push_dispatch(); 168 assert(disp_ctx != NULL); 169 disp_ctx->map = map; 170 171 /* Save general purpose and exception registers */ 172 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x)); 173 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3); 174 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3); 175 176 return disp_ctx; 177 } 178 179 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) 180 { 181 gp_regs_t *tgt_gpregs; 182 el3_state_t *tgt_el3; 183 184 assert(tgt_ctx != NULL); 185 tgt_gpregs = get_gpregs_ctx(tgt_ctx); 186 tgt_el3 = get_el3state_ctx(tgt_ctx); 187 188 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)), 189 foo); 190 191 /* Restore general purpose and exception registers */ 192 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x)); 193 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3); 194 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3); 195 196 #if DYNAMIC_WORKAROUND_CVE_2018_3639 197 cve_2018_3639_t *tgt_cve_2018_3639; 198 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx); 199 200 /* Restore CVE-2018-3639 mitigation state */ 201 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 202 disp_ctx->disable_cve_2018_3639); 203 #endif 204 } 205 206 static void save_secure_context(void) 207 { 208 cm_el1_sysregs_context_save(SECURE); 209 } 210 211 /* Restore Secure context and arrange to resume it at the next ERET */ 212 static void restore_and_resume_secure_context(void) 213 { 214 cm_el1_sysregs_context_restore(SECURE); 215 cm_set_next_eret_context(SECURE); 216 } 217 218 /* 219 * Restore Non-secure context and arrange to resume it at the next ERET. Return 220 * pointer to the Non-secure context. 221 */ 222 static cpu_context_t *restore_and_resume_ns_context(void) 223 { 224 cpu_context_t *ns_ctx; 225 226 cm_el1_sysregs_context_restore(NON_SECURE); 227 cm_set_next_eret_context(NON_SECURE); 228 229 ns_ctx = cm_get_context(NON_SECURE); 230 assert(ns_ctx != NULL); 231 232 return ns_ctx; 233 } 234 235 /* 236 * Prepare for ERET: 237 * - Set the ELR to the registered handler address 238 * - Set the SPSR register as described in the SDEI documentation and 239 * the AArch64.TakeException() pseudocode function in 240 * ARM DDI 0487F.c page J1-7635 241 */ 242 243 static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx) 244 { 245 unsigned int client_el = sdei_client_el(); 246 u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX, 247 DISABLE_ALL_EXCEPTIONS); 248 249 u_register_t interrupted_pstate = disp_ctx->spsr_el3; 250 251 /* Check the SPAN bit in the client el SCTLR */ 252 u_register_t client_el_sctlr; 253 254 if (client_el == MODE_EL2) { 255 client_el_sctlr = read_sctlr_el2(); 256 } else { 257 client_el_sctlr = read_sctlr_el1(); 258 } 259 260 /* 261 * Check whether to force the PAN bit or use the value in the 262 * interrupted EL according to the check described in 263 * TakeException. Since the client can only be Non-Secure 264 * EL2 or El1 some of the conditions in ElIsInHost() we know 265 * will always be True. 266 * When the client_el is EL2 we know that there will be a SPAN 267 * bit in SCTLR_EL2 as we have already checked for the condition 268 * HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1 269 */ 270 u_register_t hcr_el2 = read_hcr(); 271 bool el_is_in_host = is_armv8_1_vhe_present() && 272 (hcr_el2 & HCR_TGE_BIT) && 273 (hcr_el2 & HCR_E2H_BIT); 274 275 if (is_armv8_1_pan_present() && 276 ((client_el == MODE_EL1) || 277 (client_el == MODE_EL2 && el_is_in_host)) && 278 ((client_el_sctlr & SCTLR_SPAN_BIT) == 0U)) { 279 sdei_spsr |= SPSR_PAN_BIT; 280 } else { 281 sdei_spsr |= (interrupted_pstate & SPSR_PAN_BIT); 282 } 283 284 /* If SSBS is implemented, take the value from the client el SCTLR */ 285 u_register_t ssbs_enabled = (read_id_aa64pfr1_el1() 286 >> ID_AA64PFR1_EL1_SSBS_SHIFT) 287 & ID_AA64PFR1_EL1_SSBS_MASK; 288 if (ssbs_enabled != SSBS_UNAVAILABLE) { 289 u_register_t ssbs_bit = ((client_el_sctlr & SCTLR_DSSBS_BIT) 290 >> SCTLR_DSSBS_SHIFT) 291 << SPSR_SSBS_SHIFT_AARCH64; 292 sdei_spsr |= ssbs_bit; 293 } 294 295 /* If MTE is implemented in the client el set the TCO bit */ 296 if (get_armv8_5_mte_support() >= MTE_IMPLEMENTED_ELX) { 297 sdei_spsr |= SPSR_TCO_BIT_AARCH64; 298 } 299 300 /* Take the DIT field from the pstate of the interrupted el */ 301 sdei_spsr |= (interrupted_pstate & SPSR_DIT_BIT); 302 303 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr); 304 } 305 306 /* 307 * Populate the Non-secure context so that the next ERET will dispatch to the 308 * SDEI client. 309 */ 310 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se, 311 cpu_context_t *ctx, jmp_buf *dispatch_jmp) 312 { 313 sdei_dispatch_context_t *disp_ctx; 314 315 /* Push the event and context */ 316 disp_ctx = save_event_ctx(map, ctx); 317 318 /* 319 * Setup handler arguments: 320 * 321 * - x0: Event number 322 * - x1: Handler argument supplied at the time of event registration 323 * - x2: Interrupted PC 324 * - x3: Interrupted SPSR 325 */ 326 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); 327 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); 328 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); 329 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); 330 331 /* Setup the elr and spsr register to prepare for ERET */ 332 sdei_set_elr_spsr(se, disp_ctx); 333 334 #if DYNAMIC_WORKAROUND_CVE_2018_3639 335 cve_2018_3639_t *tgt_cve_2018_3639; 336 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx); 337 338 /* Save CVE-2018-3639 mitigation state */ 339 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639, 340 CTX_CVE_2018_3639_DISABLE); 341 342 /* Force SDEI handler to execute with mitigation enabled by default */ 343 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0); 344 #endif 345 346 disp_ctx->dispatch_jmp = dispatch_jmp; 347 } 348 349 /* Handle a triggered SDEI interrupt while events were masked on this PE */ 350 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se, 351 sdei_cpu_state_t *state, unsigned int intr_raw) 352 { 353 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); 354 bool disable = false; 355 356 /* Nothing to do for event 0 */ 357 if (map->ev_num == SDEI_EVENT_0) 358 return; 359 360 /* 361 * For a private event, or for a shared event specifically routed to 362 * this CPU, we disable interrupt, leave the interrupt pending, and do 363 * EOI. 364 */ 365 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) 366 disable = true; 367 368 if (se->reg_flags == SDEI_REGF_RM_PE) 369 assert(se->affinity == my_mpidr); 370 371 if (disable) { 372 plat_ic_disable_interrupt(map->intr); 373 plat_ic_set_interrupt_pending(map->intr); 374 plat_ic_end_of_interrupt(intr_raw); 375 state->pending_enables = true; 376 377 return; 378 } 379 380 /* 381 * We just received a shared event with routing set to ANY PE. The 382 * interrupt can't be delegated on this PE as SDEI events are masked. 383 * However, because its routing mode is ANY, it is possible that the 384 * event can be delegated on any other PE that hasn't masked events. 385 * Therefore, we set the interrupt back pending so as to give other 386 * suitable PEs a chance of handling it. 387 */ 388 assert(plat_ic_is_spi(map->intr) != 0); 389 plat_ic_set_interrupt_pending(map->intr); 390 391 /* 392 * Leaving the same interrupt pending also means that the same interrupt 393 * can target this PE again as soon as this PE leaves EL3. Whether and 394 * how often that happens depends on the implementation of GIC. 395 * 396 * We therefore call a platform handler to resolve this situation. 397 */ 398 plat_sdei_handle_masked_trigger(my_mpidr, map->intr); 399 400 /* This PE is masked. We EOI the interrupt, as it can't be delegated */ 401 plat_ic_end_of_interrupt(intr_raw); 402 } 403 404 /* SDEI main interrupt handler */ 405 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, 406 void *cookie) 407 { 408 sdei_entry_t *se; 409 cpu_context_t *ctx; 410 sdei_ev_map_t *map; 411 const sdei_dispatch_context_t *disp_ctx; 412 unsigned int sec_state; 413 sdei_cpu_state_t *state; 414 uint32_t intr; 415 jmp_buf dispatch_jmp; 416 const uint64_t mpidr = read_mpidr_el1(); 417 418 /* 419 * To handle an event, the following conditions must be true: 420 * 421 * 1. Event must be signalled 422 * 2. Event must be enabled 423 * 3. This PE must be a target PE for the event 424 * 4. PE must be unmasked for SDEI 425 * 5. If this is a normal event, no event must be running 426 * 6. If this is a critical event, no critical event must be running 427 * 428 * (1) and (2) are true when this function is running 429 * (3) is enforced in GIC by selecting the appropriate routing option 430 * (4) is satisfied by client calling PE_UNMASK 431 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC: 432 * - Normal SDEI events belong to Normal SDE priority class 433 * - Critical SDEI events belong to Critical CSDE priority class 434 * 435 * The interrupt has already been acknowledged, and therefore is active, 436 * so no other PE can handle this event while we are at it. 437 * 438 * Find if this is an SDEI interrupt. There must be an event mapped to 439 * this interrupt 440 */ 441 intr = plat_ic_get_interrupt_id(intr_raw); 442 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); 443 if (map == NULL) { 444 ERROR("No SDEI map for interrupt %u\n", intr); 445 panic(); 446 } 447 448 /* 449 * Received interrupt number must either correspond to event 0, or must 450 * be bound interrupt. 451 */ 452 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)); 453 454 se = get_event_entry(map); 455 state = sdei_get_this_pe_state(); 456 457 if (state->pe_masked) { 458 /* 459 * Interrupts received while this PE was masked can't be 460 * dispatched. 461 */ 462 SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr, 463 mpidr); 464 if (is_event_shared(map)) 465 sdei_map_lock(map); 466 467 handle_masked_trigger(map, se, state, intr_raw); 468 469 if (is_event_shared(map)) 470 sdei_map_unlock(map); 471 472 return 0; 473 } 474 475 /* Insert load barrier for signalled SDEI event */ 476 if (map->ev_num == SDEI_EVENT_0) 477 dmbld(); 478 479 if (is_event_shared(map)) 480 sdei_map_lock(map); 481 482 /* Assert shared event routed to this PE had been configured so */ 483 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { 484 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); 485 } 486 487 if (!can_sdei_state_trans(se, DO_DISPATCH)) { 488 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n", 489 map->ev_num, se->state); 490 491 /* 492 * If the event is registered, leave the interrupt pending so 493 * that it's delivered when the event is enabled. 494 */ 495 if (GET_EV_STATE(se, REGISTERED)) 496 plat_ic_set_interrupt_pending(map->intr); 497 498 /* 499 * The interrupt was disabled or unregistered after the handler 500 * started to execute, which means now the interrupt is already 501 * disabled and we just need to EOI the interrupt. 502 */ 503 plat_ic_end_of_interrupt(intr_raw); 504 505 if (is_event_shared(map)) 506 sdei_map_unlock(map); 507 508 return 0; 509 } 510 511 disp_ctx = get_outstanding_dispatch(); 512 if (is_event_critical(map)) { 513 /* 514 * If this event is Critical, and if there's an outstanding 515 * dispatch, assert the latter is a Normal dispatch. Critical 516 * events can preempt an outstanding Normal event dispatch. 517 */ 518 if (disp_ctx != NULL) 519 assert(is_event_normal(disp_ctx->map)); 520 } else { 521 /* 522 * If this event is Normal, assert that there are no outstanding 523 * dispatches. Normal events can't preempt any outstanding event 524 * dispatches. 525 */ 526 assert(disp_ctx == NULL); 527 } 528 529 sec_state = get_interrupt_src_ss(flags); 530 531 if (is_event_shared(map)) 532 sdei_map_unlock(map); 533 534 SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num, 535 sec_state, read_spsr_el3(), read_elr_el3()); 536 537 ctx = handle; 538 539 /* 540 * Check if we interrupted secure state. Perform a context switch so 541 * that we can delegate to NS. 542 */ 543 if (sec_state == SECURE) { 544 save_secure_context(); 545 ctx = restore_and_resume_ns_context(); 546 } 547 548 /* Synchronously dispatch event */ 549 setup_ns_dispatch(map, se, ctx, &dispatch_jmp); 550 begin_sdei_synchronous_dispatch(&dispatch_jmp); 551 552 /* 553 * We reach here when client completes the event. 554 * 555 * If the cause of dispatch originally interrupted the Secure world, 556 * resume Secure. 557 * 558 * No need to save the Non-secure context ahead of a world switch: the 559 * Non-secure context was fully saved before dispatch, and has been 560 * returned to its pre-dispatch state. 561 */ 562 if (sec_state == SECURE) 563 restore_and_resume_secure_context(); 564 565 /* 566 * The event was dispatched after receiving SDEI interrupt. With 567 * the event handling completed, EOI the corresponding 568 * interrupt. 569 */ 570 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) { 571 ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num); 572 panic(); 573 } 574 plat_ic_end_of_interrupt(intr_raw); 575 576 return 0; 577 } 578 579 /* 580 * Explicitly dispatch the given SDEI event. 581 * 582 * When calling this API, the caller must be prepared for the SDEI dispatcher to 583 * restore and make Non-secure context as active. This call returns only after 584 * the client has completed the dispatch. Then, the Non-secure context will be 585 * active, and the following ERET will return to Non-secure. 586 * 587 * Should the caller require re-entry to Secure, it must restore the Secure 588 * context and program registers for ERET. 589 */ 590 int sdei_dispatch_event(int ev_num) 591 { 592 sdei_entry_t *se; 593 sdei_ev_map_t *map; 594 cpu_context_t *ns_ctx; 595 sdei_dispatch_context_t *disp_ctx; 596 sdei_cpu_state_t *state; 597 jmp_buf dispatch_jmp; 598 599 /* Can't dispatch if events are masked on this PE */ 600 state = sdei_get_this_pe_state(); 601 if (state->pe_masked) 602 return -1; 603 604 /* Event 0 can't be dispatched */ 605 if (ev_num == SDEI_EVENT_0) 606 return -1; 607 608 /* Locate mapping corresponding to this event */ 609 map = find_event_map(ev_num); 610 if (map == NULL) 611 return -1; 612 613 /* Only explicit events can be dispatched */ 614 if (!is_map_explicit(map)) 615 return -1; 616 617 /* Examine state of dispatch stack */ 618 disp_ctx = get_outstanding_dispatch(); 619 if (disp_ctx != NULL) { 620 /* 621 * There's an outstanding dispatch. If the outstanding dispatch 622 * is critical, no more dispatches are possible. 623 */ 624 if (is_event_critical(disp_ctx->map)) 625 return -1; 626 627 /* 628 * If the outstanding dispatch is Normal, only critical events 629 * can be dispatched. 630 */ 631 if (is_event_normal(map)) 632 return -1; 633 } 634 635 se = get_event_entry(map); 636 if (!can_sdei_state_trans(se, DO_DISPATCH)) 637 return -1; 638 639 /* 640 * Prepare for NS dispatch by restoring the Non-secure context and 641 * marking that as active. 642 */ 643 ns_ctx = restore_and_resume_ns_context(); 644 645 /* Activate the priority corresponding to the event being dispatched */ 646 ehf_activate_priority(sdei_event_priority(map)); 647 648 /* Dispatch event synchronously */ 649 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp); 650 begin_sdei_synchronous_dispatch(&dispatch_jmp); 651 652 /* 653 * We reach here when client completes the event. 654 * 655 * Deactivate the priority level that was activated at the time of 656 * explicit dispatch. 657 */ 658 ehf_deactivate_priority(sdei_event_priority(map)); 659 660 return 0; 661 } 662 663 static void end_sdei_synchronous_dispatch(jmp_buf *buffer) 664 { 665 longjmp(*buffer, 1); 666 } 667 668 int sdei_event_complete(bool resume, uint64_t pc) 669 { 670 sdei_dispatch_context_t *disp_ctx; 671 sdei_entry_t *se; 672 sdei_ev_map_t *map; 673 cpu_context_t *ctx; 674 sdei_action_t act; 675 unsigned int client_el = sdei_client_el(); 676 677 /* Return error if called without an active event */ 678 disp_ctx = get_outstanding_dispatch(); 679 if (disp_ctx == NULL) 680 return SDEI_EDENY; 681 682 /* Validate resumption point */ 683 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0)) 684 return SDEI_EDENY; 685 686 map = disp_ctx->map; 687 assert(map != NULL); 688 se = get_event_entry(map); 689 690 if (is_event_shared(map)) 691 sdei_map_lock(map); 692 693 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; 694 if (!can_sdei_state_trans(se, act)) { 695 if (is_event_shared(map)) 696 sdei_map_unlock(map); 697 return SDEI_EDENY; 698 } 699 700 if (is_event_shared(map)) 701 sdei_map_unlock(map); 702 703 /* Having done sanity checks, pop dispatch */ 704 (void) pop_dispatch(); 705 706 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), 707 map->ev_num, read_spsr_el3(), read_elr_el3()); 708 709 /* 710 * Restore Non-secure to how it was originally interrupted. Once done, 711 * it's up-to-date with the saved copy. 712 */ 713 ctx = cm_get_context(NON_SECURE); 714 restore_event_ctx(disp_ctx, ctx); 715 716 if (resume) { 717 /* 718 * Complete-and-resume call. Prepare the Non-secure context 719 * (currently active) for complete and resume. 720 */ 721 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el, 722 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 723 724 /* 725 * Make it look as if a synchronous exception were taken at the 726 * supplied Non-secure resumption point. Populate SPSR and 727 * ELR_ELx so that an ERET from there works as expected. 728 * 729 * The assumption is that the client, if necessary, would have 730 * saved any live content in these registers before making this 731 * call. 732 */ 733 if (client_el == MODE_EL2) { 734 write_elr_el2(disp_ctx->elr_el3); 735 write_spsr_el2(disp_ctx->spsr_el3); 736 } else { 737 /* EL1 */ 738 write_elr_el1(disp_ctx->elr_el3); 739 write_spsr_el1(disp_ctx->spsr_el3); 740 } 741 } 742 743 /* End the outstanding dispatch */ 744 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp); 745 746 return 0; 747 } 748 749 int64_t sdei_event_context(void *handle, unsigned int param) 750 { 751 sdei_dispatch_context_t *disp_ctx; 752 753 if (param >= SDEI_SAVED_GPREGS) 754 return SDEI_EINVAL; 755 756 /* Get outstanding dispatch on this CPU */ 757 disp_ctx = get_outstanding_dispatch(); 758 if (disp_ctx == NULL) 759 return SDEI_EDENY; 760 761 assert(disp_ctx->map != NULL); 762 763 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) 764 return SDEI_EDENY; 765 766 /* 767 * No locking is required for the Running status as this is the only CPU 768 * which can complete the event 769 */ 770 771 return (int64_t) disp_ctx->x[param]; 772 } 773