xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_intr_mgmt.c (revision ff2743e544f0f82381ebb9dff8f14eacb837d2e0)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl_common.h>
10 #include <cassert.h>
11 #include <context_mgmt.h>
12 #include <debug.h>
13 #include <ehf.h>
14 #include <interrupt_mgmt.h>
15 #include <runtime_svc.h>
16 #include <sdei.h>
17 #include <string.h>
18 #include "sdei_private.h"
19 
20 #define PE_MASKED	1
21 #define PE_NOT_MASKED	0
22 
23 /* x0-x17 GPREGS context */
24 #define SDEI_SAVED_GPREGS	18
25 
26 /* Maximum preemption nesting levels: Critical priority and Normal priority */
27 #define MAX_EVENT_NESTING	2
28 
29 /* Per-CPU SDEI state access macro */
30 #define sdei_get_this_pe_state()	(&sdei_cpu_state[plat_my_core_pos()])
31 
32 /* Structure to store information about an outstanding dispatch */
33 typedef struct sdei_dispatch_context {
34 	sdei_ev_map_t *map;
35 	unsigned int sec_state;
36 	unsigned int intr_raw;
37 	uint64_t x[SDEI_SAVED_GPREGS];
38 
39 	/* Exception state registers */
40 	uint64_t elr_el3;
41 	uint64_t spsr_el3;
42 
43 #if DYNAMIC_WORKAROUND_CVE_2018_3639
44 	/* CVE-2018-3639 mitigation state */
45 	uint64_t disable_cve_2018_3639;
46 #endif
47 } sdei_dispatch_context_t;
48 
49 /* Per-CPU SDEI state data */
50 typedef struct sdei_cpu_state {
51 	sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
52 	unsigned short stack_top; /* Empty ascending */
53 	unsigned int pe_masked:1;
54 	unsigned int pending_enables:1;
55 } sdei_cpu_state_t;
56 
57 /* SDEI states for all cores in the system */
58 static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
59 
60 unsigned int sdei_pe_mask(void)
61 {
62 	unsigned int ret;
63 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
64 
65 	/*
66 	 * Return value indicates whether this call had any effect in the mask
67 	 * status of this PE.
68 	 */
69 	ret = (state->pe_masked ^ PE_MASKED);
70 	state->pe_masked = PE_MASKED;
71 
72 	return ret;
73 }
74 
75 void sdei_pe_unmask(void)
76 {
77 	int i;
78 	sdei_ev_map_t *map;
79 	sdei_entry_t *se;
80 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
81 	uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
82 
83 	/*
84 	 * If there are pending enables, iterate through the private mappings
85 	 * and enable those bound maps that are in enabled state. Also, iterate
86 	 * through shared mappings and enable interrupts of events that are
87 	 * targeted to this PE.
88 	 */
89 	if (state->pending_enables) {
90 		for_each_private_map(i, map) {
91 			se = get_event_entry(map);
92 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
93 				plat_ic_enable_interrupt(map->intr);
94 		}
95 
96 		for_each_shared_map(i, map) {
97 			se = get_event_entry(map);
98 
99 			sdei_map_lock(map);
100 			if (is_map_bound(map) &&
101 					GET_EV_STATE(se, ENABLED) &&
102 					(se->reg_flags == SDEI_REGF_RM_PE) &&
103 					(se->affinity == my_mpidr)) {
104 				plat_ic_enable_interrupt(map->intr);
105 			}
106 			sdei_map_unlock(map);
107 		}
108 	}
109 
110 	state->pending_enables = 0;
111 	state->pe_masked = PE_NOT_MASKED;
112 }
113 
114 /* Push a dispatch context to the dispatch stack */
115 static sdei_dispatch_context_t *push_dispatch(void)
116 {
117 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
118 	sdei_dispatch_context_t *disp_ctx;
119 
120 	/* Cannot have more than max events */
121 	assert(state->stack_top < MAX_EVENT_NESTING);
122 
123 	disp_ctx = &state->dispatch_stack[state->stack_top];
124 	state->stack_top++;
125 
126 	return disp_ctx;
127 }
128 
129 /* Pop a dispatch context to the dispatch stack */
130 static sdei_dispatch_context_t *pop_dispatch(void)
131 {
132 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
133 
134 	if (state->stack_top == 0)
135 		return NULL;
136 
137 	assert(state->stack_top <= MAX_EVENT_NESTING);
138 
139 	state->stack_top--;
140 
141 	return &state->dispatch_stack[state->stack_top];
142 }
143 
144 /* Retrieve the context at the top of dispatch stack */
145 static sdei_dispatch_context_t *get_outstanding_dispatch(void)
146 {
147 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
148 
149 	if (state->stack_top == 0)
150 		return NULL;
151 
152 	assert(state->stack_top <= MAX_EVENT_NESTING);
153 
154 	return &state->dispatch_stack[state->stack_top - 1];
155 }
156 
157 static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
158 		unsigned int intr_raw)
159 {
160 	sdei_dispatch_context_t *disp_ctx;
161 	gp_regs_t *tgt_gpregs;
162 	el3_state_t *tgt_el3;
163 
164 	assert(tgt_ctx);
165 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
166 	tgt_el3 = get_el3state_ctx(tgt_ctx);
167 
168 	disp_ctx = push_dispatch();
169 	assert(disp_ctx);
170 	disp_ctx->sec_state = sec_state;
171 	disp_ctx->map = map;
172 	disp_ctx->intr_raw = intr_raw;
173 
174 	/* Save general purpose and exception registers */
175 	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
176 	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
177 	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
178 
179 #if DYNAMIC_WORKAROUND_CVE_2018_3639
180 	cve_2018_3639_t *tgt_cve_2018_3639;
181 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
182 
183 	/* Save CVE-2018-3639 mitigation state */
184 	disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
185 		CTX_CVE_2018_3639_DISABLE);
186 
187 	/* Force SDEI handler to execute with mitigation enabled by default */
188 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
189 #endif
190 }
191 
192 static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
193 {
194 	gp_regs_t *tgt_gpregs;
195 	el3_state_t *tgt_el3;
196 
197 	assert(tgt_ctx);
198 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
199 	tgt_el3 = get_el3state_ctx(tgt_ctx);
200 
201 	CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
202 			foo);
203 
204 	/* Restore general purpose and exception registers */
205 	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
206 	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
207 	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
208 
209 #if DYNAMIC_WORKAROUND_CVE_2018_3639
210 	cve_2018_3639_t *tgt_cve_2018_3639;
211 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
212 
213 	/* Restore CVE-2018-3639 mitigation state */
214 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
215 		disp_ctx->disable_cve_2018_3639);
216 #endif
217 }
218 
219 static void save_secure_context(void)
220 {
221 	cm_el1_sysregs_context_save(SECURE);
222 }
223 
224 /* Restore Secure context and arrange to resume it at the next ERET */
225 static void restore_and_resume_secure_context(void)
226 {
227 	cm_el1_sysregs_context_restore(SECURE);
228 	cm_set_next_eret_context(SECURE);
229 }
230 
231 /*
232  * Restore Non-secure context and arrange to resume it at the next ERET. Return
233  * pointer to the Non-secure context.
234  */
235 static cpu_context_t *restore_and_resume_ns_context(void)
236 {
237 	cpu_context_t *ns_ctx;
238 
239 	cm_el1_sysregs_context_restore(NON_SECURE);
240 	cm_set_next_eret_context(NON_SECURE);
241 
242 	ns_ctx = cm_get_context(NON_SECURE);
243 	assert(ns_ctx);
244 
245 	return ns_ctx;
246 }
247 
248 /*
249  * Populate the Non-secure context so that the next ERET will dispatch to the
250  * SDEI client.
251  */
252 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
253 		cpu_context_t *ctx, int sec_state_to_resume,
254 		unsigned int intr_raw)
255 {
256 	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
257 
258 	/* Push the event and context */
259 	save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
260 
261 	/*
262 	 * Setup handler arguments:
263 	 *
264 	 * - x0: Event number
265 	 * - x1: Handler argument supplied at the time of event registration
266 	 * - x2: Interrupted PC
267 	 * - x3: Interrupted SPSR
268 	 */
269 	SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
270 	SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
271 	SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
272 	SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
273 
274 	/*
275 	 * Prepare for ERET:
276 	 *
277 	 * - Set PC to the registered handler address
278 	 * - Set SPSR to jump to client EL with exceptions masked
279 	 */
280 	cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
281 			SPSR_64(sdei_client_el(), MODE_SP_ELX,
282 				DISABLE_ALL_EXCEPTIONS));
283 }
284 
285 /* Handle a triggered SDEI interrupt while events were masked on this PE */
286 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
287 		sdei_cpu_state_t *state, unsigned int intr_raw)
288 {
289 	uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
290 	int disable = 0;
291 
292 	/* Nothing to do for event 0 */
293 	if (map->ev_num == SDEI_EVENT_0)
294 		return;
295 
296 	/*
297 	 * For a private event, or for a shared event specifically routed to
298 	 * this CPU, we disable interrupt, leave the interrupt pending, and do
299 	 * EOI.
300 	 */
301 	if (is_event_private(map)) {
302 		disable = 1;
303 	} else if (se->reg_flags == SDEI_REGF_RM_PE) {
304 		assert(se->affinity == my_mpidr);
305 		disable = 1;
306 	}
307 
308 	if (disable) {
309 		plat_ic_disable_interrupt(map->intr);
310 		plat_ic_set_interrupt_pending(map->intr);
311 		plat_ic_end_of_interrupt(intr_raw);
312 		state->pending_enables = 1;
313 
314 		return;
315 	}
316 
317 	/*
318 	 * We just received a shared event with routing set to ANY PE. The
319 	 * interrupt can't be delegated on this PE as SDEI events are masked.
320 	 * However, because its routing mode is ANY, it is possible that the
321 	 * event can be delegated on any other PE that hasn't masked events.
322 	 * Therefore, we set the interrupt back pending so as to give other
323 	 * suitable PEs a chance of handling it.
324 	 */
325 	assert(plat_ic_is_spi(map->intr));
326 	plat_ic_set_interrupt_pending(map->intr);
327 
328 	/*
329 	 * Leaving the same interrupt pending also means that the same interrupt
330 	 * can target this PE again as soon as this PE leaves EL3. Whether and
331 	 * how often that happens depends on the implementation of GIC.
332 	 *
333 	 * We therefore call a platform handler to resolve this situation.
334 	 */
335 	plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
336 
337 	/* This PE is masked. We EOI the interrupt, as it can't be delegated */
338 	plat_ic_end_of_interrupt(intr_raw);
339 }
340 
341 /* SDEI main interrupt handler */
342 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
343 		void *cookie)
344 {
345 	sdei_entry_t *se;
346 	cpu_context_t *ctx;
347 	sdei_ev_map_t *map;
348 	sdei_dispatch_context_t *disp_ctx;
349 	unsigned int sec_state;
350 	sdei_cpu_state_t *state;
351 	uint32_t intr;
352 
353 	/*
354 	 * To handle an event, the following conditions must be true:
355 	 *
356 	 * 1. Event must be signalled
357 	 * 2. Event must be enabled
358 	 * 3. This PE must be a target PE for the event
359 	 * 4. PE must be unmasked for SDEI
360 	 * 5. If this is a normal event, no event must be running
361 	 * 6. If this is a critical event, no critical event must be running
362 	 *
363 	 * (1) and (2) are true when this function is running
364 	 * (3) is enforced in GIC by selecting the appropriate routing option
365 	 * (4) is satisfied by client calling PE_UNMASK
366 	 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
367 	 *   - Normal SDEI events belong to Normal SDE priority class
368 	 *   - Critical SDEI events belong to Critical CSDE priority class
369 	 *
370 	 * The interrupt has already been acknowledged, and therefore is active,
371 	 * so no other PE can handle this event while we are at it.
372 	 *
373 	 * Find if this is an SDEI interrupt. There must be an event mapped to
374 	 * this interrupt
375 	 */
376 	intr = plat_ic_get_interrupt_id(intr_raw);
377 	map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
378 	if (!map) {
379 		ERROR("No SDEI map for interrupt %u\n", intr);
380 		panic();
381 	}
382 
383 	/*
384 	 * Received interrupt number must either correspond to event 0, or must
385 	 * be bound interrupt.
386 	 */
387 	assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
388 
389 	se = get_event_entry(map);
390 	state = sdei_get_this_pe_state();
391 
392 	if (state->pe_masked == PE_MASKED) {
393 		/*
394 		 * Interrupts received while this PE was masked can't be
395 		 * dispatched.
396 		 */
397 		SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
398 				read_mpidr_el1());
399 		if (is_event_shared(map))
400 			sdei_map_lock(map);
401 
402 		handle_masked_trigger(map, se, state, intr_raw);
403 
404 		if (is_event_shared(map))
405 			sdei_map_unlock(map);
406 
407 		return 0;
408 	}
409 
410 	/* Insert load barrier for signalled SDEI event */
411 	if (map->ev_num == SDEI_EVENT_0)
412 		dmbld();
413 
414 	if (is_event_shared(map))
415 		sdei_map_lock(map);
416 
417 	/* Assert shared event routed to this PE had been configured so */
418 	if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
419 		assert(se->affinity ==
420 				(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
421 	}
422 
423 	if (!can_sdei_state_trans(se, DO_DISPATCH)) {
424 		SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
425 				map->ev_num, se->state);
426 
427 		/*
428 		 * If the event is registered, leave the interrupt pending so
429 		 * that it's delivered when the event is enabled.
430 		 */
431 		if (GET_EV_STATE(se, REGISTERED))
432 			plat_ic_set_interrupt_pending(map->intr);
433 
434 		/*
435 		 * The interrupt was disabled or unregistered after the handler
436 		 * started to execute, which means now the interrupt is already
437 		 * disabled and we just need to EOI the interrupt.
438 		 */
439 		plat_ic_end_of_interrupt(intr_raw);
440 
441 		if (is_event_shared(map))
442 			sdei_map_unlock(map);
443 
444 		return 0;
445 	}
446 
447 	disp_ctx = get_outstanding_dispatch();
448 	if (is_event_critical(map)) {
449 		/*
450 		 * If this event is Critical, and if there's an outstanding
451 		 * dispatch, assert the latter is a Normal dispatch. Critical
452 		 * events can preempt an outstanding Normal event dispatch.
453 		 */
454 		if (disp_ctx)
455 			assert(is_event_normal(disp_ctx->map));
456 	} else {
457 		/*
458 		 * If this event is Normal, assert that there are no outstanding
459 		 * dispatches. Normal events can't preempt any outstanding event
460 		 * dispatches.
461 		 */
462 		assert(disp_ctx == NULL);
463 	}
464 
465 	sec_state = get_interrupt_src_ss(flags);
466 
467 	if (is_event_shared(map))
468 		sdei_map_unlock(map);
469 
470 	SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
471 			map->ev_num, sec_state, read_spsr_el3(),
472 			read_elr_el3());
473 
474 	ctx = handle;
475 
476 	/*
477 	 * Check if we interrupted secure state. Perform a context switch so
478 	 * that we can delegate to NS.
479 	 */
480 	if (sec_state == SECURE) {
481 		save_secure_context();
482 		ctx = restore_and_resume_ns_context();
483 	}
484 
485 	setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
486 
487 	/*
488 	 * End of interrupt is done in sdei_event_complete, when the client
489 	 * signals completion.
490 	 */
491 	return 0;
492 }
493 
494 /* Explicitly dispatch the given SDEI event */
495 int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state)
496 {
497 	sdei_entry_t *se;
498 	sdei_ev_map_t *map;
499 	cpu_context_t *ctx;
500 	sdei_dispatch_context_t *disp_ctx;
501 	sdei_cpu_state_t *state;
502 
503 	/* Validate preempted security state */
504 	if ((preempted_sec_state != SECURE) &&
505 			(preempted_sec_state != NON_SECURE)) {
506 		return -1;
507 	}
508 
509 	/* Can't dispatch if events are masked on this PE */
510 	state = sdei_get_this_pe_state();
511 	if (state->pe_masked == PE_MASKED)
512 		return -1;
513 
514 	/* Event 0 can't be dispatched */
515 	if (ev_num == SDEI_EVENT_0)
516 		return -1;
517 
518 	/* Locate mapping corresponding to this event */
519 	map = find_event_map(ev_num);
520 	if (!map)
521 		return -1;
522 
523 	/*
524 	 * Statically-bound or dynamic maps are dispatched only as a result of
525 	 * interrupt, and not upon explicit request.
526 	 */
527 	if (is_map_dynamic(map) || is_map_bound(map))
528 		return -1;
529 
530 	/* The event must be private */
531 	if (is_event_shared(map))
532 		return -1;
533 
534 	/* Examine state of dispatch stack */
535 	disp_ctx = get_outstanding_dispatch();
536 	if (disp_ctx) {
537 		/*
538 		 * There's an outstanding dispatch. If the outstanding dispatch
539 		 * is critical, no more dispatches are possible.
540 		 */
541 		if (is_event_critical(disp_ctx->map))
542 			return -1;
543 
544 		/*
545 		 * If the outstanding dispatch is Normal, only critical events
546 		 * can be dispatched.
547 		 */
548 		if (is_event_normal(map))
549 			return -1;
550 	}
551 
552 	se = get_event_entry(map);
553 	if (!can_sdei_state_trans(se, DO_DISPATCH))
554 		return -1;
555 
556 	/* Activate the priority corresponding to the event being dispatched */
557 	ehf_activate_priority(sdei_event_priority(map));
558 
559 	/*
560 	 * We assume the current context is SECURE, and that it's already been
561 	 * saved.
562 	 */
563 	ctx = restore_and_resume_ns_context();
564 
565 	/*
566 	 * The caller has effectively terminated execution. Record to resume the
567 	 * preempted context later when the event completes or
568 	 * complete-and-resumes.
569 	 */
570 	setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0);
571 
572 	return 0;
573 }
574 
575 int sdei_event_complete(int resume, uint64_t pc)
576 {
577 	sdei_dispatch_context_t *disp_ctx;
578 	sdei_entry_t *se;
579 	sdei_ev_map_t *map;
580 	cpu_context_t *ctx;
581 	sdei_action_t act;
582 	unsigned int client_el = sdei_client_el();
583 
584 	/* Return error if called without an active event */
585 	disp_ctx = get_outstanding_dispatch();
586 	if (!disp_ctx)
587 		return SDEI_EDENY;
588 
589 	/* Validate resumption point */
590 	if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
591 		return SDEI_EDENY;
592 
593 	map = disp_ctx->map;
594 	assert(map);
595 	se = get_event_entry(map);
596 
597 	act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
598 	if (!can_sdei_state_trans(se, act)) {
599 		if (is_event_shared(map))
600 			sdei_map_unlock(map);
601 		return SDEI_EDENY;
602 	}
603 
604 	/* Having done sanity checks, pop dispatch */
605 	pop_dispatch();
606 
607 	SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
608 			map->ev_num, read_spsr_el3(), read_elr_el3());
609 
610 	if (is_event_shared(map))
611 		sdei_map_lock(map);
612 
613 	/*
614 	 * Restore Non-secure to how it was originally interrupted. Once done,
615 	 * it's up-to-date with the saved copy.
616 	 */
617 	ctx = cm_get_context(NON_SECURE);
618 	restore_event_ctx(disp_ctx, ctx);
619 
620 	if (resume) {
621 		/*
622 		 * Complete-and-resume call. Prepare the Non-secure context
623 		 * (currently active) for complete and resume.
624 		 */
625 		cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
626 					MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
627 
628 		/*
629 		 * Make it look as if a synchronous exception were taken at the
630 		 * supplied Non-secure resumption point. Populate SPSR and
631 		 * ELR_ELx so that an ERET from there works as expected.
632 		 *
633 		 * The assumption is that the client, if necessary, would have
634 		 * saved any live content in these registers before making this
635 		 * call.
636 		 */
637 		if (client_el == MODE_EL2) {
638 			write_elr_el2(disp_ctx->elr_el3);
639 			write_spsr_el2(disp_ctx->spsr_el3);
640 		} else {
641 			/* EL1 */
642 			write_elr_el1(disp_ctx->elr_el3);
643 			write_spsr_el1(disp_ctx->spsr_el3);
644 		}
645 	}
646 
647 	/*
648 	 * If the cause of dispatch originally interrupted the Secure world, and
649 	 * if Non-secure world wasn't allowed to preempt Secure execution,
650 	 * resume Secure.
651 	 *
652 	 * No need to save the Non-secure context ahead of a world switch: the
653 	 * Non-secure context was fully saved before dispatch, and has been
654 	 * returned to its pre-dispatch state.
655 	 */
656 	if ((disp_ctx->sec_state == SECURE) &&
657 			(ehf_is_ns_preemption_allowed() == 0)) {
658 		restore_and_resume_secure_context();
659 	}
660 
661 	if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
662 		/*
663 		 * The event was dispatched after receiving SDEI interrupt. With
664 		 * the event handling completed, EOI the corresponding
665 		 * interrupt.
666 		 */
667 		plat_ic_end_of_interrupt(disp_ctx->intr_raw);
668 	} else {
669 		/*
670 		 * An unbound event must have been dispatched explicitly.
671 		 * Deactivate the priority level that was activated at the time
672 		 * of explicit dispatch.
673 		 */
674 		ehf_deactivate_priority(sdei_event_priority(map));
675 	}
676 
677 	if (is_event_shared(map))
678 		sdei_map_unlock(map);
679 
680 	return 0;
681 }
682 
683 int sdei_event_context(void *handle, unsigned int param)
684 {
685 	sdei_dispatch_context_t *disp_ctx;
686 
687 	if (param >= SDEI_SAVED_GPREGS)
688 		return SDEI_EINVAL;
689 
690 	/* Get outstanding dispatch on this CPU */
691 	disp_ctx = get_outstanding_dispatch();
692 	if (!disp_ctx)
693 		return SDEI_EDENY;
694 
695 	assert(disp_ctx->map);
696 
697 	if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
698 		return SDEI_EDENY;
699 
700 	/*
701 	 * No locking is required for the Running status as this is the only CPU
702 	 * which can complete the event
703 	 */
704 
705 	return disp_ctx->x[param];
706 }
707