1 /*
2 * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <inttypes.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <arch_helpers.h>
13 #include <arch_features.h>
14 #include <bl31/ehf.h>
15 #include <bl31/interrupt_mgmt.h>
16 #include <bl31/sync_handle.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <lib/cassert.h>
21 #include <services/sdei.h>
22
23 #include "sdei_private.h"
24
25 /* x0-x17 GPREGS context */
26 #define SDEI_SAVED_GPREGS 18U
27
28 /* Maximum preemption nesting levels: Critical priority and Normal priority */
29 #define MAX_EVENT_NESTING 2U
30
31 /* Per-CPU SDEI state access macro */
32 #define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
33 #define sdei_get_target_pe_state(_pe) (&cpu_state[plat_core_pos_by_mpidr(_pe)])
34
35 /* Structure to store information about an outstanding dispatch */
36 typedef struct sdei_dispatch_context {
37 sdei_ev_map_t *map;
38 uint64_t x[SDEI_SAVED_GPREGS];
39 jmp_buf *dispatch_jmp;
40
41 /* Exception state registers */
42 uint64_t elr_el3;
43 uint64_t spsr_el3;
44
45 #if DYNAMIC_WORKAROUND_CVE_2018_3639
46 /* CVE-2018-3639 mitigation state */
47 uint64_t disable_cve_2018_3639;
48 #endif
49 } sdei_dispatch_context_t;
50
51 /* Per-CPU SDEI state data */
52 typedef struct sdei_cpu_state {
53 sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
54 unsigned short stack_top; /* Empty ascending */
55 bool pe_masked;
56 bool pending_enables;
57 } sdei_cpu_state_t;
58
59 /* SDEI states for all cores in the system */
60 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
61
sdei_is_target_pe_masked(uint64_t target_pe)62 bool sdei_is_target_pe_masked(uint64_t target_pe)
63 {
64 int errstat = plat_core_pos_by_mpidr(target_pe);
65 if (errstat >= 0) {
66 const sdei_cpu_state_t *state = &cpu_state[errstat];
67 return state->pe_masked;
68 }
69 return true;
70 }
71
sdei_pe_mask(void)72 int64_t sdei_pe_mask(void)
73 {
74 int64_t ret = 0;
75 sdei_cpu_state_t *state = sdei_get_this_pe_state();
76
77 /*
78 * Return value indicates whether this call had any effect in the mask
79 * status of this PE.
80 */
81 if (!state->pe_masked) {
82 state->pe_masked = true;
83 ret = 1;
84 }
85
86 return ret;
87 }
88
sdei_pe_unmask(void)89 void sdei_pe_unmask(void)
90 {
91 unsigned int i;
92 sdei_ev_map_t *map;
93 sdei_entry_t *se;
94 sdei_cpu_state_t *state = sdei_get_this_pe_state();
95 uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
96
97 /*
98 * If there are pending enables, iterate through the private mappings
99 * and enable those bound maps that are in enabled state. Also, iterate
100 * through shared mappings and enable interrupts of events that are
101 * targeted to this PE.
102 */
103 if (state->pending_enables) {
104 for_each_private_map(i, map) {
105 se = get_event_entry(map);
106 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
107 plat_ic_enable_interrupt(map->intr);
108 }
109
110 for_each_shared_map(i, map) {
111 se = get_event_entry(map);
112
113 sdei_map_lock(map);
114 if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
115 (se->reg_flags == SDEI_REGF_RM_PE) &&
116 (se->affinity == my_mpidr)) {
117 plat_ic_enable_interrupt(map->intr);
118 }
119 sdei_map_unlock(map);
120 }
121 }
122
123 state->pending_enables = false;
124 state->pe_masked = false;
125 }
126
127 /* Push a dispatch context to the dispatch stack */
push_dispatch(void)128 static sdei_dispatch_context_t *push_dispatch(void)
129 {
130 sdei_cpu_state_t *state = sdei_get_this_pe_state();
131 sdei_dispatch_context_t *disp_ctx;
132
133 /* Cannot have more than max events */
134 assert(state->stack_top < MAX_EVENT_NESTING);
135
136 disp_ctx = &state->dispatch_stack[state->stack_top];
137 state->stack_top++;
138
139 return disp_ctx;
140 }
141
142 /* Pop a dispatch context to the dispatch stack */
pop_dispatch(void)143 static sdei_dispatch_context_t *pop_dispatch(void)
144 {
145 sdei_cpu_state_t *state = sdei_get_this_pe_state();
146
147 if (state->stack_top == 0U)
148 return NULL;
149
150 assert(state->stack_top <= MAX_EVENT_NESTING);
151
152 state->stack_top--;
153
154 return &state->dispatch_stack[state->stack_top];
155 }
156
157 /* Retrieve the context at the top of dispatch stack */
get_outstanding_dispatch(void)158 static sdei_dispatch_context_t *get_outstanding_dispatch(void)
159 {
160 sdei_cpu_state_t *state = sdei_get_this_pe_state();
161
162 if (state->stack_top == 0U)
163 return NULL;
164
165 assert(state->stack_top <= MAX_EVENT_NESTING);
166
167 return &state->dispatch_stack[state->stack_top - 1U];
168 }
169
save_event_ctx(sdei_ev_map_t * map,void * tgt_ctx)170 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
171 void *tgt_ctx)
172 {
173 sdei_dispatch_context_t *disp_ctx;
174 const gp_regs_t *tgt_gpregs;
175 const el3_state_t *tgt_el3;
176
177 assert(tgt_ctx != NULL);
178 tgt_gpregs = get_gpregs_ctx(tgt_ctx);
179 tgt_el3 = get_el3state_ctx(tgt_ctx);
180
181 disp_ctx = push_dispatch();
182 assert(disp_ctx != NULL);
183 disp_ctx->map = map;
184
185 /* Save general purpose and exception registers */
186 memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
187 disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
188 disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
189
190 return disp_ctx;
191 }
192
restore_event_ctx(const sdei_dispatch_context_t * disp_ctx,void * tgt_ctx)193 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
194 {
195 gp_regs_t *tgt_gpregs;
196 el3_state_t *tgt_el3;
197
198 assert(tgt_ctx != NULL);
199 tgt_gpregs = get_gpregs_ctx(tgt_ctx);
200 tgt_el3 = get_el3state_ctx(tgt_ctx);
201
202 CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
203 foo);
204
205 /* Restore general purpose and exception registers */
206 memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
207 write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
208 write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
209
210 #if DYNAMIC_WORKAROUND_CVE_2018_3639
211 cve_2018_3639_t *tgt_cve_2018_3639;
212 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
213
214 /* Restore CVE-2018-3639 mitigation state */
215 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
216 disp_ctx->disable_cve_2018_3639);
217 #endif
218 }
219
save_secure_context(void)220 static void save_secure_context(void)
221 {
222 cm_el1_sysregs_context_save(SECURE);
223 }
224
225 /* Restore Secure context and arrange to resume it at the next ERET */
restore_and_resume_secure_context(void)226 static void restore_and_resume_secure_context(void)
227 {
228 cm_el1_sysregs_context_restore(SECURE);
229 cm_set_next_eret_context(SECURE);
230 }
231
232 /*
233 * Restore Non-secure context and arrange to resume it at the next ERET. Return
234 * pointer to the Non-secure context.
235 */
restore_and_resume_ns_context(void)236 static cpu_context_t *restore_and_resume_ns_context(void)
237 {
238 cpu_context_t *ns_ctx;
239
240 cm_el1_sysregs_context_restore(NON_SECURE);
241 cm_set_next_eret_context(NON_SECURE);
242
243 ns_ctx = cm_get_context(NON_SECURE);
244 assert(ns_ctx != NULL);
245
246 return ns_ctx;
247 }
248
249 /*
250 * Prepare for ERET:
251 * - Set the ELR to the registered handler address
252 * - Set the SPSR register by calling the common create_spsr() function
253 */
254
sdei_set_elr_spsr(sdei_entry_t * se,sdei_dispatch_context_t * disp_ctx)255 static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
256 {
257 unsigned int client_el = sdei_client_el();
258 u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
259 DISABLE_ALL_EXCEPTIONS);
260
261 u_register_t interrupted_pstate = disp_ctx->spsr_el3;
262
263 sdei_spsr = create_spsr(interrupted_pstate, client_el);
264
265 cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
266 }
267
268 /*
269 * Populate the Non-secure context so that the next ERET will dispatch to the
270 * SDEI client.
271 */
setup_ns_dispatch(sdei_ev_map_t * map,sdei_entry_t * se,cpu_context_t * ctx,jmp_buf * dispatch_jmp)272 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
273 cpu_context_t *ctx, jmp_buf *dispatch_jmp)
274 {
275 sdei_dispatch_context_t *disp_ctx;
276
277 /* Push the event and context */
278 disp_ctx = save_event_ctx(map, ctx);
279
280 /*
281 * Setup handler arguments:
282 *
283 * - x0: Event number
284 * - x1: Handler argument supplied at the time of event registration
285 * - x2: Interrupted PC
286 * - x3: Interrupted SPSR
287 */
288 SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
289 SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
290 SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
291 SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
292
293 /* Setup the elr and spsr register to prepare for ERET */
294 sdei_set_elr_spsr(se, disp_ctx);
295
296 #if DYNAMIC_WORKAROUND_CVE_2018_3639
297 cve_2018_3639_t *tgt_cve_2018_3639;
298 tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
299
300 /* Save CVE-2018-3639 mitigation state */
301 disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
302 CTX_CVE_2018_3639_DISABLE);
303
304 /* Force SDEI handler to execute with mitigation enabled by default */
305 write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
306 #endif
307
308 disp_ctx->dispatch_jmp = dispatch_jmp;
309 }
310
311 /* Handle a triggered SDEI interrupt while events were masked on this PE */
handle_masked_trigger(sdei_ev_map_t * map,sdei_entry_t * se,sdei_cpu_state_t * state,unsigned int intr_raw)312 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
313 sdei_cpu_state_t *state, unsigned int intr_raw)
314 {
315 uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
316 bool disable = false;
317
318 /* Nothing to do for event 0 */
319 if (map->ev_num == SDEI_EVENT_0)
320 return;
321
322 /*
323 * For a private event, or for a shared event specifically routed to
324 * this CPU, we disable interrupt, leave the interrupt pending, and do
325 * EOI.
326 */
327 if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
328 disable = true;
329
330 if (se->reg_flags == SDEI_REGF_RM_PE)
331 assert(se->affinity == my_mpidr);
332
333 if (disable) {
334 plat_ic_disable_interrupt(map->intr);
335 plat_ic_set_interrupt_pending(map->intr);
336 plat_ic_end_of_interrupt(intr_raw);
337 state->pending_enables = true;
338
339 return;
340 }
341
342 /*
343 * We just received a shared event with routing set to ANY PE. The
344 * interrupt can't be delegated on this PE as SDEI events are masked.
345 * However, because its routing mode is ANY, it is possible that the
346 * event can be delegated on any other PE that hasn't masked events.
347 * Therefore, we set the interrupt back pending so as to give other
348 * suitable PEs a chance of handling it.
349 */
350 assert(plat_ic_is_spi(map->intr) != 0);
351 plat_ic_set_interrupt_pending(map->intr);
352
353 /*
354 * Leaving the same interrupt pending also means that the same interrupt
355 * can target this PE again as soon as this PE leaves EL3. Whether and
356 * how often that happens depends on the implementation of GIC.
357 *
358 * We therefore call a platform handler to resolve this situation.
359 */
360 plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
361
362 /* This PE is masked. We EOI the interrupt, as it can't be delegated */
363 plat_ic_end_of_interrupt(intr_raw);
364 }
365
366 /* SDEI main interrupt handler */
sdei_intr_handler(uint32_t intr_raw,uint32_t flags,void * handle,void * cookie)367 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
368 void *cookie)
369 {
370 sdei_entry_t *se;
371 cpu_context_t *ctx;
372 sdei_ev_map_t *map;
373 const sdei_dispatch_context_t *disp_ctx;
374 unsigned int sec_state;
375 sdei_cpu_state_t *state;
376 uint32_t intr;
377 jmp_buf dispatch_jmp;
378 const uint64_t mpidr = read_mpidr_el1();
379
380 /*
381 * To handle an event, the following conditions must be true:
382 *
383 * 1. Event must be signalled
384 * 2. Event must be enabled
385 * 3. This PE must be a target PE for the event
386 * 4. PE must be unmasked for SDEI
387 * 5. If this is a normal event, no event must be running
388 * 6. If this is a critical event, no critical event must be running
389 *
390 * (1) and (2) are true when this function is running
391 * (3) is enforced in GIC by selecting the appropriate routing option
392 * (4) is satisfied by client calling PE_UNMASK
393 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
394 * - Normal SDEI events belong to Normal SDE priority class
395 * - Critical SDEI events belong to Critical CSDE priority class
396 *
397 * The interrupt has already been acknowledged, and therefore is active,
398 * so no other PE can handle this event while we are at it.
399 *
400 * Find if this is an SDEI interrupt. There must be an event mapped to
401 * this interrupt
402 */
403 intr = plat_ic_get_interrupt_id(intr_raw);
404 map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
405 if (map == NULL) {
406 ERROR("No SDEI map for interrupt %u\n", intr);
407 panic();
408 }
409
410 /*
411 * Received interrupt number must either correspond to event 0, or must
412 * be bound interrupt.
413 */
414 assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
415
416 se = get_event_entry(map);
417 state = sdei_get_this_pe_state();
418
419 if (state->pe_masked) {
420 /*
421 * Interrupts received while this PE was masked can't be
422 * dispatched.
423 */
424 SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
425 map->intr, mpidr);
426 if (is_event_shared(map))
427 sdei_map_lock(map);
428
429 handle_masked_trigger(map, se, state, intr_raw);
430
431 if (is_event_shared(map))
432 sdei_map_unlock(map);
433
434 return 0;
435 }
436
437 /* Insert load barrier for signalled SDEI event */
438 if (map->ev_num == SDEI_EVENT_0)
439 dmbld();
440
441 if (is_event_shared(map))
442 sdei_map_lock(map);
443
444 /* Assert shared event routed to this PE had been configured so */
445 if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
446 assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
447 }
448
449 if (!can_sdei_state_trans(se, DO_DISPATCH)) {
450 SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
451 map->ev_num, se->state);
452
453 /*
454 * If the event is registered, leave the interrupt pending so
455 * that it's delivered when the event is enabled.
456 */
457 if (GET_EV_STATE(se, REGISTERED))
458 plat_ic_set_interrupt_pending(map->intr);
459
460 /*
461 * The interrupt was disabled or unregistered after the handler
462 * started to execute, which means now the interrupt is already
463 * disabled and we just need to EOI the interrupt.
464 */
465 plat_ic_end_of_interrupt(intr_raw);
466
467 if (is_event_shared(map))
468 sdei_map_unlock(map);
469
470 return 0;
471 }
472
473 disp_ctx = get_outstanding_dispatch();
474 if (is_event_critical(map)) {
475 /*
476 * If this event is Critical, and if there's an outstanding
477 * dispatch, assert the latter is a Normal dispatch. Critical
478 * events can preempt an outstanding Normal event dispatch.
479 */
480 if (disp_ctx != NULL)
481 assert(is_event_normal(disp_ctx->map));
482 } else {
483 /*
484 * If this event is Normal, assert that there are no outstanding
485 * dispatches. Normal events can't preempt any outstanding event
486 * dispatches.
487 */
488 assert(disp_ctx == NULL);
489 }
490
491 sec_state = get_interrupt_src_ss(flags);
492
493 if (is_event_shared(map))
494 sdei_map_unlock(map);
495
496 SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
497 mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
498
499 ctx = handle;
500
501 /*
502 * Check if we interrupted secure state. Perform a context switch so
503 * that we can delegate to NS.
504 */
505 if (sec_state == SECURE) {
506 save_secure_context();
507 ctx = restore_and_resume_ns_context();
508 }
509
510 /* Synchronously dispatch event */
511 setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
512 begin_sdei_synchronous_dispatch(&dispatch_jmp);
513
514 /*
515 * We reach here when client completes the event.
516 *
517 * If the cause of dispatch originally interrupted the Secure world,
518 * resume Secure.
519 *
520 * No need to save the Non-secure context ahead of a world switch: the
521 * Non-secure context was fully saved before dispatch, and has been
522 * returned to its pre-dispatch state.
523 */
524 if (sec_state == SECURE)
525 restore_and_resume_secure_context();
526
527 /*
528 * The event was dispatched after receiving SDEI interrupt. With
529 * the event handling completed, EOI the corresponding
530 * interrupt.
531 */
532 if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
533 ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
534 panic();
535 }
536 plat_ic_end_of_interrupt(intr_raw);
537
538 return 0;
539 }
540
541 /*
542 * Explicitly dispatch the given SDEI event.
543 *
544 * When calling this API, the caller must be prepared for the SDEI dispatcher to
545 * restore and make Non-secure context as active. This call returns only after
546 * the client has completed the dispatch. Then, the Non-secure context will be
547 * active, and the following ERET will return to Non-secure.
548 *
549 * Should the caller require re-entry to Secure, it must restore the Secure
550 * context and program registers for ERET.
551 */
sdei_dispatch_event(int ev_num)552 int sdei_dispatch_event(int ev_num)
553 {
554 sdei_entry_t *se;
555 sdei_ev_map_t *map;
556 cpu_context_t *ns_ctx;
557 sdei_dispatch_context_t *disp_ctx;
558 sdei_cpu_state_t *state;
559 jmp_buf dispatch_jmp;
560
561 /* Can't dispatch if events are masked on this PE */
562 state = sdei_get_this_pe_state();
563 if (state->pe_masked)
564 return -1;
565
566 /* Event 0 can't be dispatched */
567 if (ev_num == SDEI_EVENT_0)
568 return -1;
569
570 /* Locate mapping corresponding to this event */
571 map = find_event_map(ev_num);
572 if (map == NULL)
573 return -1;
574
575 /* Only explicit events can be dispatched */
576 if (!is_map_explicit(map))
577 return -1;
578
579 /* Examine state of dispatch stack */
580 disp_ctx = get_outstanding_dispatch();
581 if (disp_ctx != NULL) {
582 /*
583 * There's an outstanding dispatch. If the outstanding dispatch
584 * is critical, no more dispatches are possible.
585 */
586 if (is_event_critical(disp_ctx->map))
587 return -1;
588
589 /*
590 * If the outstanding dispatch is Normal, only critical events
591 * can be dispatched.
592 */
593 if (is_event_normal(map))
594 return -1;
595 }
596
597 se = get_event_entry(map);
598 if (!can_sdei_state_trans(se, DO_DISPATCH))
599 return -1;
600
601 /*
602 * Prepare for NS dispatch by restoring the Non-secure context and
603 * marking that as active.
604 */
605 ns_ctx = restore_and_resume_ns_context();
606
607 /* Activate the priority corresponding to the event being dispatched */
608 ehf_activate_priority(sdei_event_priority(map));
609
610 /* Dispatch event synchronously */
611 setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
612 begin_sdei_synchronous_dispatch(&dispatch_jmp);
613
614 /*
615 * We reach here when client completes the event.
616 *
617 * Deactivate the priority level that was activated at the time of
618 * explicit dispatch.
619 */
620 ehf_deactivate_priority(sdei_event_priority(map));
621
622 return 0;
623 }
624
end_sdei_synchronous_dispatch(jmp_buf * buffer)625 static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
626 {
627 longjmp(*buffer, 1);
628 }
629
sdei_event_complete(bool resume,uint64_t pc)630 int sdei_event_complete(bool resume, uint64_t pc)
631 {
632 sdei_dispatch_context_t *disp_ctx;
633 sdei_entry_t *se;
634 sdei_ev_map_t *map;
635 cpu_context_t *ctx;
636 sdei_action_t act;
637 unsigned int client_el = sdei_client_el();
638
639 /* Return error if called without an active event */
640 disp_ctx = get_outstanding_dispatch();
641 if (disp_ctx == NULL)
642 return SDEI_EDENY;
643
644 /* Validate resumption point */
645 if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
646 return SDEI_EDENY;
647
648 map = disp_ctx->map;
649 assert(map != NULL);
650 se = get_event_entry(map);
651
652 if (is_event_shared(map))
653 sdei_map_lock(map);
654
655 act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
656 if (!can_sdei_state_trans(se, act)) {
657 if (is_event_shared(map))
658 sdei_map_unlock(map);
659 return SDEI_EDENY;
660 }
661
662 if (is_event_shared(map))
663 sdei_map_unlock(map);
664
665 /* Having done sanity checks, pop dispatch */
666 (void) pop_dispatch();
667
668 SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
669 map->ev_num, read_spsr_el3(), read_elr_el3());
670
671 /*
672 * Restore Non-secure to how it was originally interrupted. Once done,
673 * it's up-to-date with the saved copy.
674 */
675 ctx = cm_get_context(NON_SECURE);
676 restore_event_ctx(disp_ctx, ctx);
677
678 if (resume) {
679 /*
680 * Complete-and-resume call. Prepare the Non-secure context
681 * (currently active) for complete and resume.
682 */
683 cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
684 MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
685
686 /*
687 * Make it look as if a synchronous exception were taken at the
688 * supplied Non-secure resumption point. Populate SPSR and
689 * ELR_ELx so that an ERET from there works as expected.
690 *
691 * The assumption is that the client, if necessary, would have
692 * saved any live content in these registers before making this
693 * call.
694 */
695 if (client_el == MODE_EL2) {
696 write_elr_el2(disp_ctx->elr_el3);
697 write_spsr_el2(disp_ctx->spsr_el3);
698 } else {
699 /* EL1 */
700 write_elr_el1(disp_ctx->elr_el3);
701 write_spsr_el1(disp_ctx->spsr_el3);
702 }
703 }
704
705 /* End the outstanding dispatch */
706 end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
707
708 return 0;
709 }
710
sdei_event_context(void * handle,unsigned int param)711 int64_t sdei_event_context(void *handle, unsigned int param)
712 {
713 sdei_dispatch_context_t *disp_ctx;
714
715 if (param >= SDEI_SAVED_GPREGS)
716 return SDEI_EINVAL;
717
718 /* Get outstanding dispatch on this CPU */
719 disp_ctx = get_outstanding_dispatch();
720 if (disp_ctx == NULL)
721 return SDEI_EDENY;
722
723 assert(disp_ctx->map != NULL);
724
725 if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
726 return SDEI_EDENY;
727
728 /*
729 * No locking is required for the Running status as this is the only CPU
730 * which can complete the event
731 */
732
733 return (int64_t) disp_ctx->x[param];
734 }
735