xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_intr_mgmt.c (revision 091f39675a98ee9e22ed78f52e239880bedf8911)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl_common.h>
10 #include <cassert.h>
11 #include <debug.h>
12 #include <ehf.h>
13 #include <interrupt_mgmt.h>
14 #include <runtime_svc.h>
15 #include <sdei.h>
16 #include <string.h>
17 #include "sdei_private.h"
18 
19 /* x0-x17 GPREGS context */
20 #define SDEI_SAVED_GPREGS	18U
21 
22 /* Maximum preemption nesting levels: Critical priority and Normal priority */
23 #define MAX_EVENT_NESTING	2U
24 
25 /* Per-CPU SDEI state access macro */
26 #define sdei_get_this_pe_state()	(&cpu_state[plat_my_core_pos()])
27 
28 /* Structure to store information about an outstanding dispatch */
29 typedef struct sdei_dispatch_context {
30 	sdei_ev_map_t *map;
31 	uint64_t x[SDEI_SAVED_GPREGS];
32 	struct jmpbuf *dispatch_jmp;
33 
34 	/* Exception state registers */
35 	uint64_t elr_el3;
36 	uint64_t spsr_el3;
37 
38 #if DYNAMIC_WORKAROUND_CVE_2018_3639
39 	/* CVE-2018-3639 mitigation state */
40 	uint64_t disable_cve_2018_3639;
41 #endif
42 } sdei_dispatch_context_t;
43 
44 /* Per-CPU SDEI state data */
45 typedef struct sdei_cpu_state {
46 	sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
47 	unsigned short stack_top; /* Empty ascending */
48 	bool pe_masked;
49 	bool pending_enables;
50 } sdei_cpu_state_t;
51 
52 /* SDEI states for all cores in the system */
53 static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
54 
55 int64_t sdei_pe_mask(void)
56 {
57 	int64_t ret = 0;
58 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
59 
60 	/*
61 	 * Return value indicates whether this call had any effect in the mask
62 	 * status of this PE.
63 	 */
64 	if (!state->pe_masked) {
65 		state->pe_masked = true;
66 		ret = 1;
67 	}
68 
69 	return ret;
70 }
71 
72 void sdei_pe_unmask(void)
73 {
74 	unsigned int i;
75 	sdei_ev_map_t *map;
76 	sdei_entry_t *se;
77 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
78 	uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
79 
80 	/*
81 	 * If there are pending enables, iterate through the private mappings
82 	 * and enable those bound maps that are in enabled state. Also, iterate
83 	 * through shared mappings and enable interrupts of events that are
84 	 * targeted to this PE.
85 	 */
86 	if (state->pending_enables) {
87 		for_each_private_map(i, map) {
88 			se = get_event_entry(map);
89 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
90 				plat_ic_enable_interrupt(map->intr);
91 		}
92 
93 		for_each_shared_map(i, map) {
94 			se = get_event_entry(map);
95 
96 			sdei_map_lock(map);
97 			if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
98 					(se->reg_flags == SDEI_REGF_RM_PE) &&
99 					(se->affinity == my_mpidr)) {
100 				plat_ic_enable_interrupt(map->intr);
101 			}
102 			sdei_map_unlock(map);
103 		}
104 	}
105 
106 	state->pending_enables = false;
107 	state->pe_masked = false;
108 }
109 
110 /* Push a dispatch context to the dispatch stack */
111 static sdei_dispatch_context_t *push_dispatch(void)
112 {
113 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
114 	sdei_dispatch_context_t *disp_ctx;
115 
116 	/* Cannot have more than max events */
117 	assert(state->stack_top < MAX_EVENT_NESTING);
118 
119 	disp_ctx = &state->dispatch_stack[state->stack_top];
120 	state->stack_top++;
121 
122 	return disp_ctx;
123 }
124 
125 /* Pop a dispatch context to the dispatch stack */
126 static sdei_dispatch_context_t *pop_dispatch(void)
127 {
128 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
129 
130 	if (state->stack_top == 0U)
131 		return NULL;
132 
133 	assert(state->stack_top <= MAX_EVENT_NESTING);
134 
135 	state->stack_top--;
136 
137 	return &state->dispatch_stack[state->stack_top];
138 }
139 
140 /* Retrieve the context at the top of dispatch stack */
141 static sdei_dispatch_context_t *get_outstanding_dispatch(void)
142 {
143 	sdei_cpu_state_t *state = sdei_get_this_pe_state();
144 
145 	if (state->stack_top == 0U)
146 		return NULL;
147 
148 	assert(state->stack_top <= MAX_EVENT_NESTING);
149 
150 	return &state->dispatch_stack[state->stack_top - 1U];
151 }
152 
153 static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
154 		void *tgt_ctx)
155 {
156 	sdei_dispatch_context_t *disp_ctx;
157 	const gp_regs_t *tgt_gpregs;
158 	const el3_state_t *tgt_el3;
159 
160 	assert(tgt_ctx != NULL);
161 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
162 	tgt_el3 = get_el3state_ctx(tgt_ctx);
163 
164 	disp_ctx = push_dispatch();
165 	assert(disp_ctx != NULL);
166 	disp_ctx->map = map;
167 
168 	/* Save general purpose and exception registers */
169 	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
170 	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
171 	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
172 
173 	return disp_ctx;
174 }
175 
176 static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
177 {
178 	gp_regs_t *tgt_gpregs;
179 	el3_state_t *tgt_el3;
180 
181 	assert(tgt_ctx != NULL);
182 	tgt_gpregs = get_gpregs_ctx(tgt_ctx);
183 	tgt_el3 = get_el3state_ctx(tgt_ctx);
184 
185 	CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
186 			foo);
187 
188 	/* Restore general purpose and exception registers */
189 	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
190 	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
191 	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
192 
193 #if DYNAMIC_WORKAROUND_CVE_2018_3639
194 	cve_2018_3639_t *tgt_cve_2018_3639;
195 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
196 
197 	/* Restore CVE-2018-3639 mitigation state */
198 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
199 		disp_ctx->disable_cve_2018_3639);
200 #endif
201 }
202 
203 static void save_secure_context(void)
204 {
205 	cm_el1_sysregs_context_save(SECURE);
206 }
207 
208 /* Restore Secure context and arrange to resume it at the next ERET */
209 static void restore_and_resume_secure_context(void)
210 {
211 	cm_el1_sysregs_context_restore(SECURE);
212 	cm_set_next_eret_context(SECURE);
213 }
214 
215 /*
216  * Restore Non-secure context and arrange to resume it at the next ERET. Return
217  * pointer to the Non-secure context.
218  */
219 static cpu_context_t *restore_and_resume_ns_context(void)
220 {
221 	cpu_context_t *ns_ctx;
222 
223 	cm_el1_sysregs_context_restore(NON_SECURE);
224 	cm_set_next_eret_context(NON_SECURE);
225 
226 	ns_ctx = cm_get_context(NON_SECURE);
227 	assert(ns_ctx != NULL);
228 
229 	return ns_ctx;
230 }
231 
232 /*
233  * Populate the Non-secure context so that the next ERET will dispatch to the
234  * SDEI client.
235  */
236 static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
237 		cpu_context_t *ctx, struct jmpbuf *dispatch_jmp)
238 {
239 	sdei_dispatch_context_t *disp_ctx;
240 
241 	/* Push the event and context */
242 	disp_ctx = save_event_ctx(map, ctx);
243 
244 	/*
245 	 * Setup handler arguments:
246 	 *
247 	 * - x0: Event number
248 	 * - x1: Handler argument supplied at the time of event registration
249 	 * - x2: Interrupted PC
250 	 * - x3: Interrupted SPSR
251 	 */
252 	SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
253 	SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
254 	SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
255 	SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
256 
257 	/*
258 	 * Prepare for ERET:
259 	 *
260 	 * - Set PC to the registered handler address
261 	 * - Set SPSR to jump to client EL with exceptions masked
262 	 */
263 	cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
264 			SPSR_64(sdei_client_el(), MODE_SP_ELX,
265 				DISABLE_ALL_EXCEPTIONS));
266 
267 #if DYNAMIC_WORKAROUND_CVE_2018_3639
268 	cve_2018_3639_t *tgt_cve_2018_3639;
269 	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
270 
271 	/* Save CVE-2018-3639 mitigation state */
272 	disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
273 		CTX_CVE_2018_3639_DISABLE);
274 
275 	/* Force SDEI handler to execute with mitigation enabled by default */
276 	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
277 #endif
278 
279 	disp_ctx->dispatch_jmp = dispatch_jmp;
280 }
281 
282 /* Handle a triggered SDEI interrupt while events were masked on this PE */
283 static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
284 		sdei_cpu_state_t *state, unsigned int intr_raw)
285 {
286 	uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
287 	bool disable = false;
288 
289 	/* Nothing to do for event 0 */
290 	if (map->ev_num == SDEI_EVENT_0)
291 		return;
292 
293 	/*
294 	 * For a private event, or for a shared event specifically routed to
295 	 * this CPU, we disable interrupt, leave the interrupt pending, and do
296 	 * EOI.
297 	 */
298 	if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
299 		disable = true;
300 
301 	if (se->reg_flags == SDEI_REGF_RM_PE)
302 		assert(se->affinity == my_mpidr);
303 
304 	if (disable) {
305 		plat_ic_disable_interrupt(map->intr);
306 		plat_ic_set_interrupt_pending(map->intr);
307 		plat_ic_end_of_interrupt(intr_raw);
308 		state->pending_enables = true;
309 
310 		return;
311 	}
312 
313 	/*
314 	 * We just received a shared event with routing set to ANY PE. The
315 	 * interrupt can't be delegated on this PE as SDEI events are masked.
316 	 * However, because its routing mode is ANY, it is possible that the
317 	 * event can be delegated on any other PE that hasn't masked events.
318 	 * Therefore, we set the interrupt back pending so as to give other
319 	 * suitable PEs a chance of handling it.
320 	 */
321 	assert(plat_ic_is_spi(map->intr) != 0);
322 	plat_ic_set_interrupt_pending(map->intr);
323 
324 	/*
325 	 * Leaving the same interrupt pending also means that the same interrupt
326 	 * can target this PE again as soon as this PE leaves EL3. Whether and
327 	 * how often that happens depends on the implementation of GIC.
328 	 *
329 	 * We therefore call a platform handler to resolve this situation.
330 	 */
331 	plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
332 
333 	/* This PE is masked. We EOI the interrupt, as it can't be delegated */
334 	plat_ic_end_of_interrupt(intr_raw);
335 }
336 
337 /* SDEI main interrupt handler */
338 int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
339 		void *cookie)
340 {
341 	sdei_entry_t *se;
342 	cpu_context_t *ctx;
343 	sdei_ev_map_t *map;
344 	const sdei_dispatch_context_t *disp_ctx;
345 	unsigned int sec_state;
346 	sdei_cpu_state_t *state;
347 	uint32_t intr;
348 	struct jmpbuf dispatch_jmp;
349 	const uint64_t mpidr = read_mpidr_el1();
350 
351 	/*
352 	 * To handle an event, the following conditions must be true:
353 	 *
354 	 * 1. Event must be signalled
355 	 * 2. Event must be enabled
356 	 * 3. This PE must be a target PE for the event
357 	 * 4. PE must be unmasked for SDEI
358 	 * 5. If this is a normal event, no event must be running
359 	 * 6. If this is a critical event, no critical event must be running
360 	 *
361 	 * (1) and (2) are true when this function is running
362 	 * (3) is enforced in GIC by selecting the appropriate routing option
363 	 * (4) is satisfied by client calling PE_UNMASK
364 	 * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
365 	 *   - Normal SDEI events belong to Normal SDE priority class
366 	 *   - Critical SDEI events belong to Critical CSDE priority class
367 	 *
368 	 * The interrupt has already been acknowledged, and therefore is active,
369 	 * so no other PE can handle this event while we are at it.
370 	 *
371 	 * Find if this is an SDEI interrupt. There must be an event mapped to
372 	 * this interrupt
373 	 */
374 	intr = plat_ic_get_interrupt_id(intr_raw);
375 	map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
376 	if (map == NULL) {
377 		ERROR("No SDEI map for interrupt %u\n", intr);
378 		panic();
379 	}
380 
381 	/*
382 	 * Received interrupt number must either correspond to event 0, or must
383 	 * be bound interrupt.
384 	 */
385 	assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
386 
387 	se = get_event_entry(map);
388 	state = sdei_get_this_pe_state();
389 
390 	if (state->pe_masked) {
391 		/*
392 		 * Interrupts received while this PE was masked can't be
393 		 * dispatched.
394 		 */
395 		SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr,
396 				mpidr);
397 		if (is_event_shared(map))
398 			sdei_map_lock(map);
399 
400 		handle_masked_trigger(map, se, state, intr_raw);
401 
402 		if (is_event_shared(map))
403 			sdei_map_unlock(map);
404 
405 		return 0;
406 	}
407 
408 	/* Insert load barrier for signalled SDEI event */
409 	if (map->ev_num == SDEI_EVENT_0)
410 		dmbld();
411 
412 	if (is_event_shared(map))
413 		sdei_map_lock(map);
414 
415 	/* Assert shared event routed to this PE had been configured so */
416 	if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
417 		assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
418 	}
419 
420 	if (!can_sdei_state_trans(se, DO_DISPATCH)) {
421 		SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
422 				map->ev_num, se->state);
423 
424 		/*
425 		 * If the event is registered, leave the interrupt pending so
426 		 * that it's delivered when the event is enabled.
427 		 */
428 		if (GET_EV_STATE(se, REGISTERED))
429 			plat_ic_set_interrupt_pending(map->intr);
430 
431 		/*
432 		 * The interrupt was disabled or unregistered after the handler
433 		 * started to execute, which means now the interrupt is already
434 		 * disabled and we just need to EOI the interrupt.
435 		 */
436 		plat_ic_end_of_interrupt(intr_raw);
437 
438 		if (is_event_shared(map))
439 			sdei_map_unlock(map);
440 
441 		return 0;
442 	}
443 
444 	disp_ctx = get_outstanding_dispatch();
445 	if (is_event_critical(map)) {
446 		/*
447 		 * If this event is Critical, and if there's an outstanding
448 		 * dispatch, assert the latter is a Normal dispatch. Critical
449 		 * events can preempt an outstanding Normal event dispatch.
450 		 */
451 		if (disp_ctx != NULL)
452 			assert(is_event_normal(disp_ctx->map));
453 	} else {
454 		/*
455 		 * If this event is Normal, assert that there are no outstanding
456 		 * dispatches. Normal events can't preempt any outstanding event
457 		 * dispatches.
458 		 */
459 		assert(disp_ctx == NULL);
460 	}
461 
462 	sec_state = get_interrupt_src_ss(flags);
463 
464 	if (is_event_shared(map))
465 		sdei_map_unlock(map);
466 
467 	SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num,
468 			sec_state, read_spsr_el3(), read_elr_el3());
469 
470 	ctx = handle;
471 
472 	/*
473 	 * Check if we interrupted secure state. Perform a context switch so
474 	 * that we can delegate to NS.
475 	 */
476 	if (sec_state == SECURE) {
477 		save_secure_context();
478 		ctx = restore_and_resume_ns_context();
479 	}
480 
481 	/* Synchronously dispatch event */
482 	setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
483 	begin_sdei_synchronous_dispatch(&dispatch_jmp);
484 
485 	/*
486 	 * We reach here when client completes the event.
487 	 *
488 	 * If the cause of dispatch originally interrupted the Secure world, and
489 	 * if Non-secure world wasn't allowed to preempt Secure execution,
490 	 * resume Secure.
491 	 *
492 	 * No need to save the Non-secure context ahead of a world switch: the
493 	 * Non-secure context was fully saved before dispatch, and has been
494 	 * returned to its pre-dispatch state.
495 	 */
496 	if ((sec_state == SECURE) && (ehf_is_ns_preemption_allowed() == 0U))
497 		restore_and_resume_secure_context();
498 
499 	/*
500 	 * The event was dispatched after receiving SDEI interrupt. With
501 	 * the event handling completed, EOI the corresponding
502 	 * interrupt.
503 	 */
504 	if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
505 		ERROR("Invalid SDEI mapping: ev=%u\n", map->ev_num);
506 		panic();
507 	}
508 	plat_ic_end_of_interrupt(intr_raw);
509 
510 	return 0;
511 }
512 
513 /*
514  * Explicitly dispatch the given SDEI event.
515  *
516  * When calling this API, the caller must be prepared for the SDEI dispatcher to
517  * restore and make Non-secure context as active. This call returns only after
518  * the client has completed the dispatch. Then, the Non-secure context will be
519  * active, and the following ERET will return to Non-secure.
520  *
521  * Should the caller require re-entry to Secure, it must restore the Secure
522  * context and program registers for ERET.
523  */
524 int sdei_dispatch_event(int ev_num)
525 {
526 	sdei_entry_t *se;
527 	sdei_ev_map_t *map;
528 	cpu_context_t *ns_ctx;
529 	sdei_dispatch_context_t *disp_ctx;
530 	sdei_cpu_state_t *state;
531 	struct jmpbuf dispatch_jmp;
532 
533 	/* Can't dispatch if events are masked on this PE */
534 	state = sdei_get_this_pe_state();
535 	if (state->pe_masked)
536 		return -1;
537 
538 	/* Event 0 can't be dispatched */
539 	if (ev_num == SDEI_EVENT_0)
540 		return -1;
541 
542 	/* Locate mapping corresponding to this event */
543 	map = find_event_map(ev_num);
544 	if (map == NULL)
545 		return -1;
546 
547 	/* Only explicit events can be dispatched */
548 	if (!is_map_explicit(map))
549 		return -1;
550 
551 	/* Examine state of dispatch stack */
552 	disp_ctx = get_outstanding_dispatch();
553 	if (disp_ctx != NULL) {
554 		/*
555 		 * There's an outstanding dispatch. If the outstanding dispatch
556 		 * is critical, no more dispatches are possible.
557 		 */
558 		if (is_event_critical(disp_ctx->map))
559 			return -1;
560 
561 		/*
562 		 * If the outstanding dispatch is Normal, only critical events
563 		 * can be dispatched.
564 		 */
565 		if (is_event_normal(map))
566 			return -1;
567 	}
568 
569 	se = get_event_entry(map);
570 	if (!can_sdei_state_trans(se, DO_DISPATCH))
571 		return -1;
572 
573 	/* Activate the priority corresponding to the event being dispatched */
574 	ehf_activate_priority(sdei_event_priority(map));
575 
576 	/*
577 	 * Prepare for NS dispatch by restoring the Non-secure context and
578 	 * marking that as active.
579 	 */
580 	ns_ctx = restore_and_resume_ns_context();
581 
582 	/* Dispatch event synchronously */
583 	setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
584 	begin_sdei_synchronous_dispatch(&dispatch_jmp);
585 
586 	/*
587 	 * We reach here when client completes the event.
588 	 *
589 	 * Deactivate the priority level that was activated at the time of
590 	 * explicit dispatch.
591 	 */
592 	ehf_deactivate_priority(sdei_event_priority(map));
593 
594 	return 0;
595 }
596 
597 static void end_sdei_synchronous_dispatch(struct jmpbuf *buffer)
598 {
599 	longjmp(buffer);
600 }
601 
602 int sdei_event_complete(bool resume, uint64_t pc)
603 {
604 	sdei_dispatch_context_t *disp_ctx;
605 	sdei_entry_t *se;
606 	sdei_ev_map_t *map;
607 	cpu_context_t *ctx;
608 	sdei_action_t act;
609 	unsigned int client_el = sdei_client_el();
610 
611 	/* Return error if called without an active event */
612 	disp_ctx = get_outstanding_dispatch();
613 	if (disp_ctx == NULL)
614 		return SDEI_EDENY;
615 
616 	/* Validate resumption point */
617 	if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
618 		return SDEI_EDENY;
619 
620 	map = disp_ctx->map;
621 	assert(map != NULL);
622 	se = get_event_entry(map);
623 
624 	if (is_event_shared(map))
625 		sdei_map_lock(map);
626 
627 	act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
628 	if (!can_sdei_state_trans(se, act)) {
629 		if (is_event_shared(map))
630 			sdei_map_unlock(map);
631 		return SDEI_EDENY;
632 	}
633 
634 	if (is_event_shared(map))
635 		sdei_map_unlock(map);
636 
637 	/* Having done sanity checks, pop dispatch */
638 	(void) pop_dispatch();
639 
640 	SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
641 			map->ev_num, read_spsr_el3(), read_elr_el3());
642 
643 	/*
644 	 * Restore Non-secure to how it was originally interrupted. Once done,
645 	 * it's up-to-date with the saved copy.
646 	 */
647 	ctx = cm_get_context(NON_SECURE);
648 	restore_event_ctx(disp_ctx, ctx);
649 
650 	if (resume) {
651 		/*
652 		 * Complete-and-resume call. Prepare the Non-secure context
653 		 * (currently active) for complete and resume.
654 		 */
655 		cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
656 					MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
657 
658 		/*
659 		 * Make it look as if a synchronous exception were taken at the
660 		 * supplied Non-secure resumption point. Populate SPSR and
661 		 * ELR_ELx so that an ERET from there works as expected.
662 		 *
663 		 * The assumption is that the client, if necessary, would have
664 		 * saved any live content in these registers before making this
665 		 * call.
666 		 */
667 		if (client_el == MODE_EL2) {
668 			write_elr_el2(disp_ctx->elr_el3);
669 			write_spsr_el2(disp_ctx->spsr_el3);
670 		} else {
671 			/* EL1 */
672 			write_elr_el1(disp_ctx->elr_el3);
673 			write_spsr_el1(disp_ctx->spsr_el3);
674 		}
675 	}
676 
677 	/* End the outstanding dispatch */
678 	end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
679 
680 	return 0;
681 }
682 
683 int64_t sdei_event_context(void *handle, unsigned int param)
684 {
685 	sdei_dispatch_context_t *disp_ctx;
686 
687 	if (param >= SDEI_SAVED_GPREGS)
688 		return SDEI_EINVAL;
689 
690 	/* Get outstanding dispatch on this CPU */
691 	disp_ctx = get_outstanding_dispatch();
692 	if (disp_ctx == NULL)
693 		return SDEI_EDENY;
694 
695 	assert(disp_ctx->map != NULL);
696 
697 	if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
698 		return SDEI_EDENY;
699 
700 	/*
701 	 * No locking is required for the Running status as this is the only CPU
702 	 * which can complete the event
703 	 */
704 
705 	return (int64_t) disp_ctx->x[param];
706 }
707