xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_intr_mgmt.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <inttypes.h>
9 #include <stdint.h>
10 #include <string.h>
11 
12 #include <arch_helpers.h>
13 #include <arch_features.h>
14 #include <bl31/ehf.h>
15 #include <bl31/interrupt_mgmt.h>
16 #include <bl31/sync_handle.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <lib/cassert.h>
21 #include <services/sdei.h>
22 
23 #include "sdei_private.h"
24 
25 /* x0-x17 GPREGS context */
26 #define SDEI_SAVED_GPREGS	18U
27 
28 /* Maximum preemption nesting levels: Critical priority and Normal priority */
29 #define MAX_EVENT_NESTING	2U
30 
31 /* Per-CPU SDEI state access macro */
32 #define sdei_get_this_pe_state()		(&cpu_state[plat_my_core_pos()])
33 #define sdei_get_target_pe_state(_pe)	(&cpu_state[plat_core_pos_by_mpidr(_pe)])
34 
35 /* Structure to store information about an outstanding dispatch */
36 typedef struct sdei_dispatch_context {
37 	sdei_ev_map_t *map;
38 	uint64_t x[SDEI_SAVED_GPREGS];
39 	jmp_buf *dispatch_jmp;
40 
41 	/* Exception state registers */
42 	uint64_t elr_el3;
43 	uint64_t spsr_el3;
44 
45 #if DYNAMIC_WORKAROUND_CVE_2018_3639
46 	/* CVE-2018-3639 mitigation state */
47 	uint64_t disable_cve_2018_3639;
48 #endif
49 } sdei_dispatch_context_t;
50 
51 /* Per-CPU SDEI state data */
52 typedef struct sdei_cpu_state {
53 	sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
54 	unsigned short stack_top; /* Empty ascending */
55 	bool pe_masked;
56 	bool pending_enables;
57 } sdei_cpu_state_t;
58 
59 /* SDEI states for all cores in the system */
60 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
61 
62 bool sdei_is_target_pe_masked(uint64_t target_pe)
63 {
64 	const sdei_cpu_state_t *state = sdei_get_target_pe_state(target_pe);
65 
66 	return state->pe_masked;
67 }
68 
69 int64_t sdei_pe_mask(void)
70 {
71 	int64_t ret = 0;
72 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
73 
74 	/*
75 	 * Return value indicates whether this call had any effect in the mask
76 	 * status of this PE.
77 	 */
78 	if (!state->pe_masked) {
79 		state->pe_masked = true;
80 		ret = 1;
81 	}
82 
83 	return ret;
84 }
85 
86 void sdei_pe_unmask(void)
87 {
88 	unsigned int i;
89 	sdei_ev_map_t *map;
90 	sdei_entry_t *se;
91 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
92 	uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
93 
94 	/*
95 	 * If there are pending enables, iterate through the private mappings
96 	 * and enable those bound maps that are in enabled state. Also, iterate
97 	 * through shared mappings and enable interrupts of events that are
98 	 * targeted to this PE.
99 	 */
100 	if (state->pending_enables) {
101 		for_each_private_map(i, map) {
102 			se = get_event_entry(map);
103 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
104 				plat_ic_enable_interrupt(map->intr);
105 		}
106 
107 		for_each_shared_map(i, map) {
108 			se = get_event_entry(map);
109 
110 			sdei_map_lock(map);
111 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
112 					(se->reg_flags == SDEI_REGF_RM_PE) &&
113 					(se->affinity == my_mpidr)) {
114 				plat_ic_enable_interrupt(map->intr);
115 			}
116 			sdei_map_unlock(map);
117 		}
118 	}
119 
120 	state->pending_enables = false;
121 	state->pe_masked = false;
122 }
123 
124 /* Push a dispatch context to the dispatch stack */
125 static sdei_dispatch_context_t *push_dispatch(void)
126 {
127 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
128 	sdei_dispatch_context_t *disp_ctx;
129 
130 	/* Cannot have more than max events */
131 	assert(state->stack_top < MAX_EVENT_NESTING);
132 
133 	disp_ctx = &state->dispatch_stack[state->stack_top];
134 	state->stack_top++;
135 
136 	return disp_ctx;
137 }
138 
139 /* Pop a dispatch context to the dispatch stack */
140 static sdei_dispatch_context_t *pop_dispatch(void)
141 {
142 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
143 
144 	if (state->stack_top == 0U)
145 		return NULL;
146 
147 	assert(state->stack_top <= MAX_EVENT_NESTING);
148 
149 	state->stack_top--;
150 
151 	return &state->dispatch_stack[state->stack_top];
152 }
153 
154 /* Retrieve the context at the top of dispatch stack */
155 static sdei_dispatch_context_t *get_outstanding_dispatch(void)
156 {
157 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
158 
159 	if (state->stack_top == 0U)
160 		return NULL;
161 
162 	assert(state->stack_top <= MAX_EVENT_NESTING);
163 
164 	return &state->dispatch_stack[state->stack_top - 1U];
165 }
166 
167 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
168 		void *tgt_ctx)
169 {
170 	sdei_dispatch_context_t *disp_ctx;
171 	const gp_regs_t *tgt_gpregs;
172 	const el3_state_t *tgt_el3;
173 
174 	assert(tgt_ctx != NULL);
175 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
176 	tgt_el3 = get_el3state_ctx(tgt_ctx);
177 
178 	disp_ctx = push_dispatch();
179 	assert(disp_ctx != NULL);
180 	disp_ctx->map = map;
181 
182 	/* Save general purpose and exception registers */
183 	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
184 	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
185 	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
186 
187 	return disp_ctx;
188 }
189 
190 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
191 {
192 	gp_regs_t *tgt_gpregs;
193 	el3_state_t *tgt_el3;
194 
195 	assert(tgt_ctx != NULL);
196 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
197 	tgt_el3 = get_el3state_ctx(tgt_ctx);
198 
199 	CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
200 			foo);
201 
202 	/* Restore general purpose and exception registers */
203 	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
204 	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
205 	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
206 
207 #if DYNAMIC_WORKAROUND_CVE_2018_3639
208 	cve_2018_3639_t *tgt_cve_2018_3639;
209 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
210 
211 	/* Restore CVE-2018-3639 mitigation state */
212 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
213 		disp_ctx->disable_cve_2018_3639);
214 #endif
215 }
216 
217 static void save_secure_context(void)
218 {
219 	cm_el1_sysregs_context_save(SECURE);
220 }
221 
222 /* Restore Secure context and arrange to resume it at the next ERET */
223 static void restore_and_resume_secure_context(void)
224 {
225 	cm_el1_sysregs_context_restore(SECURE);
226 	cm_set_next_eret_context(SECURE);
227 }
228 
229 /*
230  * Restore Non-secure context and arrange to resume it at the next ERET. Return
231  * pointer to the Non-secure context.
232  */
233 static cpu_context_t *restore_and_resume_ns_context(void)
234 {
235 	cpu_context_t *ns_ctx;
236 
237 	cm_el1_sysregs_context_restore(NON_SECURE);
238 	cm_set_next_eret_context(NON_SECURE);
239 
240 	ns_ctx = cm_get_context(NON_SECURE);
241 	assert(ns_ctx != NULL);
242 
243 	return ns_ctx;
244 }
245 
246 /*
247  * Prepare for ERET:
248  * - Set the ELR to the registered handler address
249  * - Set the SPSR register by calling the common create_spsr() function
250  */
251 
252 static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
253 {
254 	unsigned int client_el = sdei_client_el();
255 	u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
256 					DISABLE_ALL_EXCEPTIONS);
257 
258 	u_register_t interrupted_pstate = disp_ctx->spsr_el3;
259 
260 	sdei_spsr = create_spsr(interrupted_pstate, client_el);
261 
262 	cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
263 }
264 
265 /*
266  * Populate the Non-secure context so that the next ERET will dispatch to the
267  * SDEI client.
268  */
269 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
270 		cpu_context_t *ctx, jmp_buf *dispatch_jmp)
271 {
272 	sdei_dispatch_context_t *disp_ctx;
273 
274 	/* Push the event and context */
275 	disp_ctx = save_event_ctx(map, ctx);
276 
277 	/*
278 	 * Setup handler arguments:
279 	 *
280 	 * - x0: Event number
281 	 * - x1: Handler argument supplied at the time of event registration
282 	 * - x2: Interrupted PC
283 	 * - x3: Interrupted SPSR
284 	 */
285 	SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
286 	SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
287 	SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
288 	SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
289 
290 	/* Setup the elr and spsr register to prepare for ERET */
291 	sdei_set_elr_spsr(se, disp_ctx);
292 
293 #if DYNAMIC_WORKAROUND_CVE_2018_3639
294 	cve_2018_3639_t *tgt_cve_2018_3639;
295 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
296 
297 	/* Save CVE-2018-3639 mitigation state */
298 	disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
299 		CTX_CVE_2018_3639_DISABLE);
300 
301 	/* Force SDEI handler to execute with mitigation enabled by default */
302 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
303 #endif
304 
305 	disp_ctx->dispatch_jmp = dispatch_jmp;
306 }
307 
308 /* Handle a triggered SDEI interrupt while events were masked on this PE */
309 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
310 		sdei_cpu_state_t *state, unsigned int intr_raw)
311 {
312 	uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
313 	bool disable = false;
314 
315 	/* Nothing to do for event 0 */
316 	if (map->ev_num == SDEI_EVENT_0)
317 		return;
318 
319 	/*
320 	 * For a private event, or for a shared event specifically routed to
321 	 * this CPU, we disable interrupt, leave the interrupt pending, and do
322 	 * EOI.
323 	 */
324 	if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
325 		disable = true;
326 
327 	if (se->reg_flags == SDEI_REGF_RM_PE)
328 		assert(se->affinity == my_mpidr);
329 
330 	if (disable) {
331 		plat_ic_disable_interrupt(map->intr);
332 		plat_ic_set_interrupt_pending(map->intr);
333 		plat_ic_end_of_interrupt(intr_raw);
334 		state->pending_enables = true;
335 
336 		return;
337 	}
338 
339 	/*
340 	 * We just received a shared event with routing set to ANY PE. The
341 	 * interrupt can't be delegated on this PE as SDEI events are masked.
342 	 * However, because its routing mode is ANY, it is possible that the
343 	 * event can be delegated on any other PE that hasn't masked events.
344 	 * Therefore, we set the interrupt back pending so as to give other
345 	 * suitable PEs a chance of handling it.
346 	 */
347 	assert(plat_ic_is_spi(map->intr) != 0);
348 	plat_ic_set_interrupt_pending(map->intr);
349 
350 	/*
351 	 * Leaving the same interrupt pending also means that the same interrupt
352 	 * can target this PE again as soon as this PE leaves EL3. Whether and
353 	 * how often that happens depends on the implementation of GIC.
354 	 *
355 	 * We therefore call a platform handler to resolve this situation.
356 	 */
357 	plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
358 
359 	/* This PE is masked. We EOI the interrupt, as it can't be delegated */
360 	plat_ic_end_of_interrupt(intr_raw);
361 }
362 
363 /* SDEI main interrupt handler */
364 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
365 		void *cookie)
366 {
367 	sdei_entry_t *se;
368 	cpu_context_t *ctx;
369 	sdei_ev_map_t *map;
370 	const sdei_dispatch_context_t *disp_ctx;
371 	unsigned int sec_state;
372 	sdei_cpu_state_t *state;
373 	uint32_t intr;
374 	jmp_buf dispatch_jmp;
375 	const uint64_t mpidr = read_mpidr_el1();
376 
377 	/*
378 	 * To handle an event, the following conditions must be true:
379 	 *
380 	 * 1. Event must be signalled
381 	 * 2. Event must be enabled
382 	 * 3. This PE must be a target PE for the event
383 	 * 4. PE must be unmasked for SDEI
384 	 * 5. If this is a normal event, no event must be running
385 	 * 6. If this is a critical event, no critical event must be running
386 	 *
387 	 * (1) and (2) are true when this function is running
388 	 * (3) is enforced in GIC by selecting the appropriate routing option
389 	 * (4) is satisfied by client calling PE_UNMASK
390 	 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
391 	 *   - Normal SDEI events belong to Normal SDE priority class
392 	 *   - Critical SDEI events belong to Critical CSDE priority class
393 	 *
394 	 * The interrupt has already been acknowledged, and therefore is active,
395 	 * so no other PE can handle this event while we are at it.
396 	 *
397 	 * Find if this is an SDEI interrupt. There must be an event mapped to
398 	 * this interrupt
399 	 */
400 	intr = plat_ic_get_interrupt_id(intr_raw);
401 	map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
402 	if (map == NULL) {
403 		ERROR("No SDEI map for interrupt %u\n", intr);
404 		panic();
405 	}
406 
407 	/*
408 	 * Received interrupt number must either correspond to event 0, or must
409 	 * be bound interrupt.
410 	 */
411 	assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
412 
413 	se = get_event_entry(map);
414 	state = sdei_get_this_pe_state();
415 
416 	if (state->pe_masked) {
417 		/*
418 		 * Interrupts received while this PE was masked can't be
419 		 * dispatched.
420 		 */
421 		SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
422 			 map->intr, mpidr);
423 		if (is_event_shared(map))
424 			sdei_map_lock(map);
425 
426 		handle_masked_trigger(map, se, state, intr_raw);
427 
428 		if (is_event_shared(map))
429 			sdei_map_unlock(map);
430 
431 		return 0;
432 	}
433 
434 	/* Insert load barrier for signalled SDEI event */
435 	if (map->ev_num == SDEI_EVENT_0)
436 		dmbld();
437 
438 	if (is_event_shared(map))
439 		sdei_map_lock(map);
440 
441 	/* Assert shared event routed to this PE had been configured so */
442 	if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
443 		assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
444 	}
445 
446 	if (!can_sdei_state_trans(se, DO_DISPATCH)) {
447 		SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
448 				map->ev_num, se->state);
449 
450 		/*
451 		 * If the event is registered, leave the interrupt pending so
452 		 * that it's delivered when the event is enabled.
453 		 */
454 		if (GET_EV_STATE(se, REGISTERED))
455 			plat_ic_set_interrupt_pending(map->intr);
456 
457 		/*
458 		 * The interrupt was disabled or unregistered after the handler
459 		 * started to execute, which means now the interrupt is already
460 		 * disabled and we just need to EOI the interrupt.
461 		 */
462 		plat_ic_end_of_interrupt(intr_raw);
463 
464 		if (is_event_shared(map))
465 			sdei_map_unlock(map);
466 
467 		return 0;
468 	}
469 
470 	disp_ctx = get_outstanding_dispatch();
471 	if (is_event_critical(map)) {
472 		/*
473 		 * If this event is Critical, and if there's an outstanding
474 		 * dispatch, assert the latter is a Normal dispatch. Critical
475 		 * events can preempt an outstanding Normal event dispatch.
476 		 */
477 		if (disp_ctx != NULL)
478 			assert(is_event_normal(disp_ctx->map));
479 	} else {
480 		/*
481 		 * If this event is Normal, assert that there are no outstanding
482 		 * dispatches. Normal events can't preempt any outstanding event
483 		 * dispatches.
484 		 */
485 		assert(disp_ctx == NULL);
486 	}
487 
488 	sec_state = get_interrupt_src_ss(flags);
489 
490 	if (is_event_shared(map))
491 		sdei_map_unlock(map);
492 
493 	SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
494 		 mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
495 
496 	ctx = handle;
497 
498 	/*
499 	 * Check if we interrupted secure state. Perform a context switch so
500 	 * that we can delegate to NS.
501 	 */
502 	if (sec_state == SECURE) {
503 		save_secure_context();
504 		ctx = restore_and_resume_ns_context();
505 	}
506 
507 	/* Synchronously dispatch event */
508 	setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
509 	begin_sdei_synchronous_dispatch(&dispatch_jmp);
510 
511 	/*
512 	 * We reach here when client completes the event.
513 	 *
514 	 * If the cause of dispatch originally interrupted the Secure world,
515 	 * resume Secure.
516 	 *
517 	 * No need to save the Non-secure context ahead of a world switch: the
518 	 * Non-secure context was fully saved before dispatch, and has been
519 	 * returned to its pre-dispatch state.
520 	 */
521 	if (sec_state == SECURE)
522 		restore_and_resume_secure_context();
523 
524 	/*
525 	 * The event was dispatched after receiving SDEI interrupt. With
526 	 * the event handling completed, EOI the corresponding
527 	 * interrupt.
528 	 */
529 	if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
530 		ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
531 		panic();
532 	}
533 	plat_ic_end_of_interrupt(intr_raw);
534 
535 	return 0;
536 }
537 
538 /*
539  * Explicitly dispatch the given SDEI event.
540  *
541  * When calling this API, the caller must be prepared for the SDEI dispatcher to
542  * restore and make Non-secure context as active. This call returns only after
543  * the client has completed the dispatch. Then, the Non-secure context will be
544  * active, and the following ERET will return to Non-secure.
545  *
546  * Should the caller require re-entry to Secure, it must restore the Secure
547  * context and program registers for ERET.
548  */
549 int sdei_dispatch_event(int ev_num)
550 {
551 	sdei_entry_t *se;
552 	sdei_ev_map_t *map;
553 	cpu_context_t *ns_ctx;
554 	sdei_dispatch_context_t *disp_ctx;
555 	sdei_cpu_state_t *state;
556 	jmp_buf dispatch_jmp;
557 
558 	/* Can't dispatch if events are masked on this PE */
559 	state = sdei_get_this_pe_state();
560 	if (state->pe_masked)
561 		return -1;
562 
563 	/* Event 0 can't be dispatched */
564 	if (ev_num == SDEI_EVENT_0)
565 		return -1;
566 
567 	/* Locate mapping corresponding to this event */
568 	map = find_event_map(ev_num);
569 	if (map == NULL)
570 		return -1;
571 
572 	/* Only explicit events can be dispatched */
573 	if (!is_map_explicit(map))
574 		return -1;
575 
576 	/* Examine state of dispatch stack */
577 	disp_ctx = get_outstanding_dispatch();
578 	if (disp_ctx != NULL) {
579 		/*
580 		 * There's an outstanding dispatch. If the outstanding dispatch
581 		 * is critical, no more dispatches are possible.
582 		 */
583 		if (is_event_critical(disp_ctx->map))
584 			return -1;
585 
586 		/*
587 		 * If the outstanding dispatch is Normal, only critical events
588 		 * can be dispatched.
589 		 */
590 		if (is_event_normal(map))
591 			return -1;
592 	}
593 
594 	se = get_event_entry(map);
595 	if (!can_sdei_state_trans(se, DO_DISPATCH))
596 		return -1;
597 
598 	/*
599 	 * Prepare for NS dispatch by restoring the Non-secure context and
600 	 * marking that as active.
601 	 */
602 	ns_ctx = restore_and_resume_ns_context();
603 
604 	/* Activate the priority corresponding to the event being dispatched */
605 	ehf_activate_priority(sdei_event_priority(map));
606 
607 	/* Dispatch event synchronously */
608 	setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
609 	begin_sdei_synchronous_dispatch(&dispatch_jmp);
610 
611 	/*
612 	 * We reach here when client completes the event.
613 	 *
614 	 * Deactivate the priority level that was activated at the time of
615 	 * explicit dispatch.
616 	 */
617 	ehf_deactivate_priority(sdei_event_priority(map));
618 
619 	return 0;
620 }
621 
622 static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
623 {
624 	longjmp(*buffer, 1);
625 }
626 
627 int sdei_event_complete(bool resume, uint64_t pc)
628 {
629 	sdei_dispatch_context_t *disp_ctx;
630 	sdei_entry_t *se;
631 	sdei_ev_map_t *map;
632 	cpu_context_t *ctx;
633 	sdei_action_t act;
634 	unsigned int client_el = sdei_client_el();
635 
636 	/* Return error if called without an active event */
637 	disp_ctx = get_outstanding_dispatch();
638 	if (disp_ctx == NULL)
639 		return SDEI_EDENY;
640 
641 	/* Validate resumption point */
642 	if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
643 		return SDEI_EDENY;
644 
645 	map = disp_ctx->map;
646 	assert(map != NULL);
647 	se = get_event_entry(map);
648 
649 	if (is_event_shared(map))
650 		sdei_map_lock(map);
651 
652 	act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
653 	if (!can_sdei_state_trans(se, act)) {
654 		if (is_event_shared(map))
655 			sdei_map_unlock(map);
656 		return SDEI_EDENY;
657 	}
658 
659 	if (is_event_shared(map))
660 		sdei_map_unlock(map);
661 
662 	/* Having done sanity checks, pop dispatch */
663 	(void) pop_dispatch();
664 
665 	SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
666 			map->ev_num, read_spsr_el3(), read_elr_el3());
667 
668 	/*
669 	 * Restore Non-secure to how it was originally interrupted. Once done,
670 	 * it's up-to-date with the saved copy.
671 	 */
672 	ctx = cm_get_context(NON_SECURE);
673 	restore_event_ctx(disp_ctx, ctx);
674 
675 	if (resume) {
676 		/*
677 		 * Complete-and-resume call. Prepare the Non-secure context
678 		 * (currently active) for complete and resume.
679 		 */
680 		cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
681 					MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
682 
683 		/*
684 		 * Make it look as if a synchronous exception were taken at the
685 		 * supplied Non-secure resumption point. Populate SPSR and
686 		 * ELR_ELx so that an ERET from there works as expected.
687 		 *
688 		 * The assumption is that the client, if necessary, would have
689 		 * saved any live content in these registers before making this
690 		 * call.
691 		 */
692 		if (client_el == MODE_EL2) {
693 			write_elr_el2(disp_ctx->elr_el3);
694 			write_spsr_el2(disp_ctx->spsr_el3);
695 		} else {
696 			/* EL1 */
697 			write_elr_el1(disp_ctx->elr_el3);
698 			write_spsr_el1(disp_ctx->spsr_el3);
699 		}
700 	}
701 
702 	/* End the outstanding dispatch */
703 	end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
704 
705 	return 0;
706 }
707 
708 int64_t sdei_event_context(void *handle, unsigned int param)
709 {
710 	sdei_dispatch_context_t *disp_ctx;
711 
712 	if (param >= SDEI_SAVED_GPREGS)
713 		return SDEI_EINVAL;
714 
715 	/* Get outstanding dispatch on this CPU */
716 	disp_ctx = get_outstanding_dispatch();
717 	if (disp_ctx == NULL)
718 		return SDEI_EDENY;
719 
720 	assert(disp_ctx->map != NULL);
721 
722 	if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
723 		return SDEI_EDENY;
724 
725 	/*
726 	 * No locking is required for the Running status as this is the only CPU
727 	 * which can complete the event
728 	 */
729 
730 	return (int64_t) disp_ctx->x[param];
731 }
732