xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_intr_mgmt.c (revision 2ccfcb2ea555eb86122e7780010cc50fcee08f54)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl_common.h>
10 #include <cassert.h>
11 #include <debug.h>
12 #include <ehf.h>
13 #include <interrupt_mgmt.h>
14 #include <runtime_svc.h>
15 #include <sdei.h>
16 #include <string.h>
17 #include "sdei_private.h"
18 
19 #define PE_MASKED	1
20 #define PE_NOT_MASKED	0
21 
22 /* x0-x17 GPREGS context */
23 #define SDEI_SAVED_GPREGS	18
24 
25 /* Maximum preemption nesting levels: Critical priority and Normal priority */
26 #define MAX_EVENT_NESTING	2
27 
28 /* Per-CPU SDEI state access macro */
29 #define sdei_get_this_pe_state()	(&sdei_cpu_state[plat_my_core_pos()])
30 
31 /* Structure to store information about an outstanding dispatch */
32 typedef struct sdei_dispatch_context {
33 	sdei_ev_map_t *map;
34 	unsigned int sec_state;
35 	unsigned int intr_raw;
36 	uint64_t x[SDEI_SAVED_GPREGS];
37 
38 	/* Exception state registers */
39 	uint64_t elr_el3;
40 	uint64_t spsr_el3;
41 
42 #if DYNAMIC_WORKAROUND_CVE_2018_3639
43 	/* CVE-2018-3639 mitigation state */
44 	uint64_t disable_cve_2018_3639;
45 #endif
46 } sdei_dispatch_context_t;
47 
48 /* Per-CPU SDEI state data */
49 typedef struct sdei_cpu_state {
50 	sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
51 	unsigned short stack_top; /* Empty ascending */
52 	unsigned int pe_masked:1;
53 	unsigned int pending_enables:1;
54 } sdei_cpu_state_t;
55 
56 /* SDEI states for all cores in the system */
57 static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
58 
59 unsigned int sdei_pe_mask(void)
60 {
61 	unsigned int ret;
62 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
63 
64 	/*
65 	 * Return value indicates whether this call had any effect in the mask
66 	 * status of this PE.
67 	 */
68 	ret = (state->pe_masked ^ PE_MASKED);
69 	state->pe_masked = PE_MASKED;
70 
71 	return ret;
72 }
73 
74 void sdei_pe_unmask(void)
75 {
76 	int i;
77 	sdei_ev_map_t *map;
78 	sdei_entry_t *se;
79 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
80 	uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
81 
82 	/*
83 	 * If there are pending enables, iterate through the private mappings
84 	 * and enable those bound maps that are in enabled state. Also, iterate
85 	 * through shared mappings and enable interrupts of events that are
86 	 * targeted to this PE.
87 	 */
88 	if (state->pending_enables) {
89 		for_each_private_map(i, map) {
90 			se = get_event_entry(map);
91 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
92 				plat_ic_enable_interrupt(map->intr);
93 		}
94 
95 		for_each_shared_map(i, map) {
96 			se = get_event_entry(map);
97 
98 			sdei_map_lock(map);
99 			if (is_map_bound(map) &&
100 					GET_EV_STATE(se, ENABLED) &&
101 					(se->reg_flags == SDEI_REGF_RM_PE) &&
102 					(se->affinity == my_mpidr)) {
103 				plat_ic_enable_interrupt(map->intr);
104 			}
105 			sdei_map_unlock(map);
106 		}
107 	}
108 
109 	state->pending_enables = 0;
110 	state->pe_masked = PE_NOT_MASKED;
111 }
112 
113 /* Push a dispatch context to the dispatch stack */
114 static sdei_dispatch_context_t *push_dispatch(void)
115 {
116 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
117 	sdei_dispatch_context_t *disp_ctx;
118 
119 	/* Cannot have more than max events */
120 	assert(state->stack_top < MAX_EVENT_NESTING);
121 
122 	disp_ctx = &state->dispatch_stack[state->stack_top];
123 	state->stack_top++;
124 
125 	return disp_ctx;
126 }
127 
128 /* Pop a dispatch context to the dispatch stack */
129 static sdei_dispatch_context_t *pop_dispatch(void)
130 {
131 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
132 
133 	if (state->stack_top == 0)
134 		return NULL;
135 
136 	assert(state->stack_top <= MAX_EVENT_NESTING);
137 
138 	state->stack_top--;
139 
140 	return &state->dispatch_stack[state->stack_top];
141 }
142 
143 /* Retrieve the context at the top of dispatch stack */
144 static sdei_dispatch_context_t *get_outstanding_dispatch(void)
145 {
146 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
147 
148 	if (state->stack_top == 0)
149 		return NULL;
150 
151 	assert(state->stack_top <= MAX_EVENT_NESTING);
152 
153 	return &state->dispatch_stack[state->stack_top - 1];
154 }
155 
156 static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
157 		unsigned int intr_raw)
158 {
159 	sdei_dispatch_context_t *disp_ctx;
160 	gp_regs_t *tgt_gpregs;
161 	el3_state_t *tgt_el3;
162 
163 	assert(tgt_ctx);
164 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
165 	tgt_el3 = get_el3state_ctx(tgt_ctx);
166 
167 	disp_ctx = push_dispatch();
168 	assert(disp_ctx);
169 	disp_ctx->sec_state = sec_state;
170 	disp_ctx->map = map;
171 	disp_ctx->intr_raw = intr_raw;
172 
173 	/* Save general purpose and exception registers */
174 	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
175 	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
176 	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
177 
178 #if DYNAMIC_WORKAROUND_CVE_2018_3639
179 	cve_2018_3639_t *tgt_cve_2018_3639;
180 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
181 
182 	/* Save CVE-2018-3639 mitigation state */
183 	disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
184 		CTX_CVE_2018_3639_DISABLE);
185 
186 	/* Force SDEI handler to execute with mitigation enabled by default */
187 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
188 #endif
189 }
190 
191 static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
192 {
193 	gp_regs_t *tgt_gpregs;
194 	el3_state_t *tgt_el3;
195 
196 	assert(tgt_ctx);
197 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
198 	tgt_el3 = get_el3state_ctx(tgt_ctx);
199 
200 	CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
201 			foo);
202 
203 	/* Restore general purpose and exception registers */
204 	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
205 	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
206 	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
207 
208 #if DYNAMIC_WORKAROUND_CVE_2018_3639
209 	cve_2018_3639_t *tgt_cve_2018_3639;
210 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
211 
212 	/* Restore CVE-2018-3639 mitigation state */
213 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
214 		disp_ctx->disable_cve_2018_3639);
215 #endif
216 }
217 
218 static void save_secure_context(void)
219 {
220 	cm_el1_sysregs_context_save(SECURE);
221 }
222 
223 /* Restore Secure context and arrange to resume it at the next ERET */
224 static void restore_and_resume_secure_context(void)
225 {
226 	cm_el1_sysregs_context_restore(SECURE);
227 	cm_set_next_eret_context(SECURE);
228 }
229 
230 /*
231  * Restore Non-secure context and arrange to resume it at the next ERET. Return
232  * pointer to the Non-secure context.
233  */
234 static cpu_context_t *restore_and_resume_ns_context(void)
235 {
236 	cpu_context_t *ns_ctx;
237 
238 	cm_el1_sysregs_context_restore(NON_SECURE);
239 	cm_set_next_eret_context(NON_SECURE);
240 
241 	ns_ctx = cm_get_context(NON_SECURE);
242 	assert(ns_ctx);
243 
244 	return ns_ctx;
245 }
246 
247 /*
248  * Populate the Non-secure context so that the next ERET will dispatch to the
249  * SDEI client.
250  */
251 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
252 		cpu_context_t *ctx, int sec_state_to_resume,
253 		unsigned int intr_raw)
254 {
255 	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
256 
257 	/* Push the event and context */
258 	save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
259 
260 	/*
261 	 * Setup handler arguments:
262 	 *
263 	 * - x0: Event number
264 	 * - x1: Handler argument supplied at the time of event registration
265 	 * - x2: Interrupted PC
266 	 * - x3: Interrupted SPSR
267 	 */
268 	SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
269 	SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
270 	SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
271 	SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
272 
273 	/*
274 	 * Prepare for ERET:
275 	 *
276 	 * - Set PC to the registered handler address
277 	 * - Set SPSR to jump to client EL with exceptions masked
278 	 */
279 	cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
280 			SPSR_64(sdei_client_el(), MODE_SP_ELX,
281 				DISABLE_ALL_EXCEPTIONS));
282 }
283 
284 /* Handle a triggered SDEI interrupt while events were masked on this PE */
285 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
286 		sdei_cpu_state_t *state, unsigned int intr_raw)
287 {
288 	uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
289 	int disable = 0;
290 
291 	/* Nothing to do for event 0 */
292 	if (map->ev_num == SDEI_EVENT_0)
293 		return;
294 
295 	/*
296 	 * For a private event, or for a shared event specifically routed to
297 	 * this CPU, we disable interrupt, leave the interrupt pending, and do
298 	 * EOI.
299 	 */
300 	if (is_event_private(map)) {
301 		disable = 1;
302 	} else if (se->reg_flags == SDEI_REGF_RM_PE) {
303 		assert(se->affinity == my_mpidr);
304 		disable = 1;
305 	}
306 
307 	if (disable) {
308 		plat_ic_disable_interrupt(map->intr);
309 		plat_ic_set_interrupt_pending(map->intr);
310 		plat_ic_end_of_interrupt(intr_raw);
311 		state->pending_enables = 1;
312 
313 		return;
314 	}
315 
316 	/*
317 	 * We just received a shared event with routing set to ANY PE. The
318 	 * interrupt can't be delegated on this PE as SDEI events are masked.
319 	 * However, because its routing mode is ANY, it is possible that the
320 	 * event can be delegated on any other PE that hasn't masked events.
321 	 * Therefore, we set the interrupt back pending so as to give other
322 	 * suitable PEs a chance of handling it.
323 	 */
324 	assert(plat_ic_is_spi(map->intr));
325 	plat_ic_set_interrupt_pending(map->intr);
326 
327 	/*
328 	 * Leaving the same interrupt pending also means that the same interrupt
329 	 * can target this PE again as soon as this PE leaves EL3. Whether and
330 	 * how often that happens depends on the implementation of GIC.
331 	 *
332 	 * We therefore call a platform handler to resolve this situation.
333 	 */
334 	plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
335 
336 	/* This PE is masked. We EOI the interrupt, as it can't be delegated */
337 	plat_ic_end_of_interrupt(intr_raw);
338 }
339 
340 /* SDEI main interrupt handler */
341 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
342 		void *cookie)
343 {
344 	sdei_entry_t *se;
345 	cpu_context_t *ctx;
346 	sdei_ev_map_t *map;
347 	sdei_dispatch_context_t *disp_ctx;
348 	unsigned int sec_state;
349 	sdei_cpu_state_t *state;
350 	uint32_t intr;
351 
352 	/*
353 	 * To handle an event, the following conditions must be true:
354 	 *
355 	 * 1. Event must be signalled
356 	 * 2. Event must be enabled
357 	 * 3. This PE must be a target PE for the event
358 	 * 4. PE must be unmasked for SDEI
359 	 * 5. If this is a normal event, no event must be running
360 	 * 6. If this is a critical event, no critical event must be running
361 	 *
362 	 * (1) and (2) are true when this function is running
363 	 * (3) is enforced in GIC by selecting the appropriate routing option
364 	 * (4) is satisfied by client calling PE_UNMASK
365 	 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
366 	 *   - Normal SDEI events belong to Normal SDE priority class
367 	 *   - Critical SDEI events belong to Critical CSDE priority class
368 	 *
369 	 * The interrupt has already been acknowledged, and therefore is active,
370 	 * so no other PE can handle this event while we are at it.
371 	 *
372 	 * Find if this is an SDEI interrupt. There must be an event mapped to
373 	 * this interrupt
374 	 */
375 	intr = plat_ic_get_interrupt_id(intr_raw);
376 	map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
377 	if (!map) {
378 		ERROR("No SDEI map for interrupt %u\n", intr);
379 		panic();
380 	}
381 
382 	/*
383 	 * Received interrupt number must either correspond to event 0, or must
384 	 * be bound interrupt.
385 	 */
386 	assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
387 
388 	se = get_event_entry(map);
389 	state = sdei_get_this_pe_state();
390 
391 	if (state->pe_masked == PE_MASKED) {
392 		/*
393 		 * Interrupts received while this PE was masked can't be
394 		 * dispatched.
395 		 */
396 		SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
397 				read_mpidr_el1());
398 		if (is_event_shared(map))
399 			sdei_map_lock(map);
400 
401 		handle_masked_trigger(map, se, state, intr_raw);
402 
403 		if (is_event_shared(map))
404 			sdei_map_unlock(map);
405 
406 		return 0;
407 	}
408 
409 	/* Insert load barrier for signalled SDEI event */
410 	if (map->ev_num == SDEI_EVENT_0)
411 		dmbld();
412 
413 	if (is_event_shared(map))
414 		sdei_map_lock(map);
415 
416 	/* Assert shared event routed to this PE had been configured so */
417 	if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
418 		assert(se->affinity ==
419 				(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
420 	}
421 
422 	if (!can_sdei_state_trans(se, DO_DISPATCH)) {
423 		SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
424 				map->ev_num, se->state);
425 
426 		/*
427 		 * If the event is registered, leave the interrupt pending so
428 		 * that it's delivered when the event is enabled.
429 		 */
430 		if (GET_EV_STATE(se, REGISTERED))
431 			plat_ic_set_interrupt_pending(map->intr);
432 
433 		/*
434 		 * The interrupt was disabled or unregistered after the handler
435 		 * started to execute, which means now the interrupt is already
436 		 * disabled and we just need to EOI the interrupt.
437 		 */
438 		plat_ic_end_of_interrupt(intr_raw);
439 
440 		if (is_event_shared(map))
441 			sdei_map_unlock(map);
442 
443 		return 0;
444 	}
445 
446 	disp_ctx = get_outstanding_dispatch();
447 	if (is_event_critical(map)) {
448 		/*
449 		 * If this event is Critical, and if there's an outstanding
450 		 * dispatch, assert the latter is a Normal dispatch. Critical
451 		 * events can preempt an outstanding Normal event dispatch.
452 		 */
453 		if (disp_ctx)
454 			assert(is_event_normal(disp_ctx->map));
455 	} else {
456 		/*
457 		 * If this event is Normal, assert that there are no outstanding
458 		 * dispatches. Normal events can't preempt any outstanding event
459 		 * dispatches.
460 		 */
461 		assert(disp_ctx == NULL);
462 	}
463 
464 	sec_state = get_interrupt_src_ss(flags);
465 
466 	if (is_event_shared(map))
467 		sdei_map_unlock(map);
468 
469 	SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
470 			map->ev_num, sec_state, read_spsr_el3(),
471 			read_elr_el3());
472 
473 	ctx = handle;
474 
475 	/*
476 	 * Check if we interrupted secure state. Perform a context switch so
477 	 * that we can delegate to NS.
478 	 */
479 	if (sec_state == SECURE) {
480 		save_secure_context();
481 		ctx = restore_and_resume_ns_context();
482 	}
483 
484 	setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
485 
486 	/*
487 	 * End of interrupt is done in sdei_event_complete, when the client
488 	 * signals completion.
489 	 */
490 	return 0;
491 }
492 
493 /* Explicitly dispatch the given SDEI event */
494 int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state)
495 {
496 	sdei_entry_t *se;
497 	sdei_ev_map_t *map;
498 	cpu_context_t *ctx;
499 	sdei_dispatch_context_t *disp_ctx;
500 	sdei_cpu_state_t *state;
501 
502 	/* Validate preempted security state */
503 	if ((preempted_sec_state != SECURE) &&
504 			(preempted_sec_state != NON_SECURE)) {
505 		return -1;
506 	}
507 
508 	/* Can't dispatch if events are masked on this PE */
509 	state = sdei_get_this_pe_state();
510 	if (state->pe_masked == PE_MASKED)
511 		return -1;
512 
513 	/* Event 0 can't be dispatched */
514 	if (ev_num == SDEI_EVENT_0)
515 		return -1;
516 
517 	/* Locate mapping corresponding to this event */
518 	map = find_event_map(ev_num);
519 	if (!map)
520 		return -1;
521 
522 	/* Only explicit events can be dispatched */
523 	if (!is_map_explicit(map))
524 		return -1;
525 
526 	/* Examine state of dispatch stack */
527 	disp_ctx = get_outstanding_dispatch();
528 	if (disp_ctx) {
529 		/*
530 		 * There's an outstanding dispatch. If the outstanding dispatch
531 		 * is critical, no more dispatches are possible.
532 		 */
533 		if (is_event_critical(disp_ctx->map))
534 			return -1;
535 
536 		/*
537 		 * If the outstanding dispatch is Normal, only critical events
538 		 * can be dispatched.
539 		 */
540 		if (is_event_normal(map))
541 			return -1;
542 	}
543 
544 	se = get_event_entry(map);
545 	if (!can_sdei_state_trans(se, DO_DISPATCH))
546 		return -1;
547 
548 	/* Activate the priority corresponding to the event being dispatched */
549 	ehf_activate_priority(sdei_event_priority(map));
550 
551 	/*
552 	 * We assume the current context is SECURE, and that it's already been
553 	 * saved.
554 	 */
555 	ctx = restore_and_resume_ns_context();
556 
557 	/*
558 	 * The caller has effectively terminated execution. Record to resume the
559 	 * preempted context later when the event completes or
560 	 * complete-and-resumes.
561 	 */
562 	setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0);
563 
564 	return 0;
565 }
566 
567 int sdei_event_complete(int resume, uint64_t pc)
568 {
569 	sdei_dispatch_context_t *disp_ctx;
570 	sdei_entry_t *se;
571 	sdei_ev_map_t *map;
572 	cpu_context_t *ctx;
573 	sdei_action_t act;
574 	unsigned int client_el = sdei_client_el();
575 
576 	/* Return error if called without an active event */
577 	disp_ctx = get_outstanding_dispatch();
578 	if (!disp_ctx)
579 		return SDEI_EDENY;
580 
581 	/* Validate resumption point */
582 	if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
583 		return SDEI_EDENY;
584 
585 	map = disp_ctx->map;
586 	assert(map);
587 	se = get_event_entry(map);
588 
589 	act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
590 	if (!can_sdei_state_trans(se, act)) {
591 		if (is_event_shared(map))
592 			sdei_map_unlock(map);
593 		return SDEI_EDENY;
594 	}
595 
596 	/* Having done sanity checks, pop dispatch */
597 	pop_dispatch();
598 
599 	SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
600 			map->ev_num, read_spsr_el3(), read_elr_el3());
601 
602 	if (is_event_shared(map))
603 		sdei_map_lock(map);
604 
605 	/*
606 	 * Restore Non-secure to how it was originally interrupted. Once done,
607 	 * it's up-to-date with the saved copy.
608 	 */
609 	ctx = cm_get_context(NON_SECURE);
610 	restore_event_ctx(disp_ctx, ctx);
611 
612 	if (resume) {
613 		/*
614 		 * Complete-and-resume call. Prepare the Non-secure context
615 		 * (currently active) for complete and resume.
616 		 */
617 		cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
618 					MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
619 
620 		/*
621 		 * Make it look as if a synchronous exception were taken at the
622 		 * supplied Non-secure resumption point. Populate SPSR and
623 		 * ELR_ELx so that an ERET from there works as expected.
624 		 *
625 		 * The assumption is that the client, if necessary, would have
626 		 * saved any live content in these registers before making this
627 		 * call.
628 		 */
629 		if (client_el == MODE_EL2) {
630 			write_elr_el2(disp_ctx->elr_el3);
631 			write_spsr_el2(disp_ctx->spsr_el3);
632 		} else {
633 			/* EL1 */
634 			write_elr_el1(disp_ctx->elr_el3);
635 			write_spsr_el1(disp_ctx->spsr_el3);
636 		}
637 	}
638 
639 	/*
640 	 * If the cause of dispatch originally interrupted the Secure world, and
641 	 * if Non-secure world wasn't allowed to preempt Secure execution,
642 	 * resume Secure.
643 	 *
644 	 * No need to save the Non-secure context ahead of a world switch: the
645 	 * Non-secure context was fully saved before dispatch, and has been
646 	 * returned to its pre-dispatch state.
647 	 */
648 	if ((disp_ctx->sec_state == SECURE) &&
649 			(ehf_is_ns_preemption_allowed() == 0)) {
650 		restore_and_resume_secure_context();
651 	}
652 
653 	if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
654 		/*
655 		 * The event was dispatched after receiving SDEI interrupt. With
656 		 * the event handling completed, EOI the corresponding
657 		 * interrupt.
658 		 */
659 		plat_ic_end_of_interrupt(disp_ctx->intr_raw);
660 	} else {
661 		/*
662 		 * An unbound event must have been dispatched explicitly.
663 		 * Deactivate the priority level that was activated at the time
664 		 * of explicit dispatch.
665 		 */
666 		ehf_deactivate_priority(sdei_event_priority(map));
667 	}
668 
669 	if (is_event_shared(map))
670 		sdei_map_unlock(map);
671 
672 	return 0;
673 }
674 
675 int sdei_event_context(void *handle, unsigned int param)
676 {
677 	sdei_dispatch_context_t *disp_ctx;
678 
679 	if (param >= SDEI_SAVED_GPREGS)
680 		return SDEI_EINVAL;
681 
682 	/* Get outstanding dispatch on this CPU */
683 	disp_ctx = get_outstanding_dispatch();
684 	if (!disp_ctx)
685 		return SDEI_EDENY;
686 
687 	assert(disp_ctx->map);
688 
689 	if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
690 		return SDEI_EDENY;
691 
692 	/*
693 	 * No locking is required for the Running status as this is the only CPU
694 	 * which can complete the event
695 	 */
696 
697 	return disp_ctx->x[param];
698 }
699