xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_main.c (revision af2c9ecdf1b68c6243f6d23e26d0281d182d3b45)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl31.h>
10 #include <bl_common.h>
11 #include <cassert.h>
12 #include <context.h>
13 #include <context_mgmt.h>
14 #include <debug.h>
15 #include <ehf.h>
16 #include <interrupt_mgmt.h>
17 #include <platform.h>
18 #include <pubsub.h>
19 #include <runtime_svc.h>
20 #include <sdei.h>
21 #include <stddef.h>
22 #include <string.h>
23 #include <utils.h>
24 #include "sdei_private.h"
25 
26 #define MAJOR_VERSION	1
27 #define MINOR_VERSION	0
28 #define VENDOR_VERSION	0
29 
30 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
31 	((((unsigned long long)(_major)) << 48) | \
32 	 (((unsigned long long)(_minor)) << 32) | \
33 	 (_vendor))
34 
35 #define LOWEST_INTR_PRIORITY		0xff
36 
37 #define is_valid_affinity(_mpidr)	(plat_core_pos_by_mpidr(_mpidr) >= 0)
38 
39 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
40 		sdei_critical_must_have_higher_priority);
41 
42 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
43 
44 /* Initialise SDEI map entries */
45 static void init_map(sdei_ev_map_t *map)
46 {
47 	map->reg_count = 0;
48 }
49 
50 /* Convert mapping to SDEI class */
51 sdei_class_t map_to_class(sdei_ev_map_t *map)
52 {
53 	return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
54 }
55 
56 /* Clear SDEI event entries except state */
57 static void clear_event_entries(sdei_entry_t *se)
58 {
59 	se->ep = 0;
60 	se->arg = 0;
61 	se->affinity = 0;
62 	se->reg_flags = 0;
63 }
64 
65 /* Perform CPU-specific state initialisation */
66 static void *sdei_cpu_on_init(const void *arg)
67 {
68 	int i;
69 	sdei_ev_map_t *map;
70 	sdei_entry_t *se;
71 
72 	/* Initialize private mappings on this CPU */
73 	for_each_private_map(i, map) {
74 		se = get_event_entry(map);
75 		clear_event_entries(se);
76 		se->state = 0;
77 	}
78 
79 	SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
80 
81 	/* All PEs start with SDEI events masked */
82 	sdei_pe_mask();
83 
84 	return 0;
85 }
86 
87 /* Initialise an SDEI class */
88 void sdei_class_init(sdei_class_t class)
89 {
90 	unsigned int i, zero_found __unused = 0;
91 	int ev_num_so_far __unused;
92 	sdei_ev_map_t *map;
93 
94 	/* Sanity check and configuration of shared events */
95 	ev_num_so_far = -1;
96 	for_each_shared_map(i, map) {
97 #if ENABLE_ASSERTIONS
98 		/* Ensure mappings are sorted */
99 		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
100 
101 		ev_num_so_far = map->ev_num;
102 
103 		/* Event 0 must not be shared */
104 		assert(map->ev_num != SDEI_EVENT_0);
105 
106 		/* Check for valid event */
107 		assert(map->ev_num >= 0);
108 
109 		/* Make sure it's a shared event */
110 		assert(is_event_shared(map));
111 
112 		/* No shared mapping should have signalable property */
113 		assert(!is_event_signalable(map));
114 
115 		/* Shared mappings can't be explicit */
116 		assert(!is_map_explicit(map));
117 #endif
118 
119 		/* Skip initializing the wrong priority */
120 		if (map_to_class(map) != class)
121 			continue;
122 
123 		/* Platform events are always bound, so set the bound flag */
124 		if (is_map_dynamic(map)) {
125 			assert(map->intr == SDEI_DYN_IRQ);
126 			assert(is_event_normal(map));
127 			num_dyn_shrd_slots++;
128 		} else {
129 			/* Shared mappings must be bound to shared interrupt */
130 			assert(plat_ic_is_spi(map->intr));
131 			set_map_bound(map);
132 		}
133 
134 		init_map(map);
135 	}
136 
137 	/* Sanity check and configuration of private events for this CPU */
138 	ev_num_so_far = -1;
139 	for_each_private_map(i, map) {
140 #if ENABLE_ASSERTIONS
141 		/* Ensure mappings are sorted */
142 		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
143 
144 		ev_num_so_far = map->ev_num;
145 
146 		if (map->ev_num == SDEI_EVENT_0) {
147 			zero_found = 1;
148 
149 			/* Event 0 must be a Secure SGI */
150 			assert(is_secure_sgi(map->intr));
151 
152 			/*
153 			 * Event 0 can have only have signalable flag (apart
154 			 * from being private
155 			 */
156 			assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
157 						SDEI_MAPF_PRIVATE));
158 		} else {
159 			/* No other mapping should have signalable property */
160 			assert(!is_event_signalable(map));
161 		}
162 
163 		/* Check for valid event */
164 		assert(map->ev_num >= 0);
165 
166 		/* Make sure it's a private event */
167 		assert(is_event_private(map));
168 
169 		/*
170 		 * Other than priority, explicit events can only have explicit
171 		 * and private flags set.
172 		 */
173 		if (is_map_explicit(map)) {
174 			assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
175 					(SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
176 					| SDEI_MAPF_CRITICAL));
177 		}
178 #endif
179 
180 		/* Skip initializing the wrong priority */
181 		if (map_to_class(map) != class)
182 			continue;
183 
184 		/* Platform events are always bound, so set the bound flag */
185 		if (map->ev_num != SDEI_EVENT_0) {
186 			if (is_map_dynamic(map)) {
187 				assert(map->intr == SDEI_DYN_IRQ);
188 				assert(is_event_normal(map));
189 				num_dyn_priv_slots++;
190 			} else if (is_map_explicit(map)) {
191 				/*
192 				 * Explicit mappings don't have a backing
193 				 * SDEI interrupt, but verify that anyway.
194 				 */
195 				assert(map->intr == SDEI_DYN_IRQ);
196 			} else {
197 				/*
198 				 * Private mappings must be bound to private
199 				 * interrupt.
200 				 */
201 				assert(plat_ic_is_ppi(map->intr));
202 				set_map_bound(map);
203 			}
204 		}
205 
206 		init_map(map);
207 	}
208 
209 	/* Ensure event 0 is in the mapping */
210 	assert(zero_found);
211 
212 	sdei_cpu_on_init(NULL);
213 }
214 
215 /* SDEI dispatcher initialisation */
216 void sdei_init(void)
217 {
218 	sdei_class_init(SDEI_CRITICAL);
219 	sdei_class_init(SDEI_NORMAL);
220 
221 	/* Register priority level handlers */
222 	ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
223 			sdei_intr_handler);
224 	ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
225 			sdei_intr_handler);
226 }
227 
228 /* Populate SDEI event entry */
229 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
230 		unsigned int flags, uint64_t affinity)
231 {
232 	assert(se != NULL);
233 
234 	se->ep = ep;
235 	se->arg = arg;
236 	se->affinity = (affinity & MPIDR_AFFINITY_MASK);
237 	se->reg_flags = flags;
238 }
239 
240 static unsigned long long sdei_version(void)
241 {
242 	return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
243 }
244 
245 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
246 static int validate_flags(uint64_t flags, uint64_t mpidr)
247 {
248 	/* Validate flags */
249 	switch (flags) {
250 	case SDEI_REGF_RM_PE:
251 		if (!is_valid_affinity(mpidr))
252 			return SDEI_EINVAL;
253 		break;
254 	case SDEI_REGF_RM_ANY:
255 		break;
256 	default:
257 		/* Unknown flags */
258 		return SDEI_EINVAL;
259 	}
260 
261 	return 0;
262 }
263 
264 /* Set routing of an SDEI event */
265 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
266 {
267 	int ret, routing;
268 	sdei_ev_map_t *map;
269 	sdei_entry_t *se;
270 
271 	ret = validate_flags(flags, mpidr);
272 	if (ret)
273 		return ret;
274 
275 	/* Check if valid event number */
276 	map = find_event_map(ev_num);
277 	if (!map)
278 		return SDEI_EINVAL;
279 
280 	/* The event must not be private */
281 	if (is_event_private(map))
282 		return SDEI_EINVAL;
283 
284 	se = get_event_entry(map);
285 
286 	sdei_map_lock(map);
287 
288 	if (!is_map_bound(map) || is_event_private(map)) {
289 		ret = SDEI_EINVAL;
290 		goto finish;
291 	}
292 
293 	if (!can_sdei_state_trans(se, DO_ROUTING)) {
294 		ret = SDEI_EDENY;
295 		goto finish;
296 	}
297 
298 	/* Choose appropriate routing */
299 	routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
300 		INTR_ROUTING_MODE_PE;
301 
302 	/* Update event registration flag */
303 	se->reg_flags = flags;
304 
305 	/*
306 	 * ROUTING_SET is permissible only when event composite state is
307 	 * 'registered, disabled, and not running'. This means that the
308 	 * interrupt is currently disabled, and not active.
309 	 */
310 	plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
311 
312 finish:
313 	sdei_map_unlock(map);
314 
315 	return ret;
316 }
317 
318 /* Register handler and argument for an SDEI event */
319 static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
320 		uint64_t flags, uint64_t mpidr)
321 {
322 	int ret;
323 	sdei_entry_t *se;
324 	sdei_ev_map_t *map;
325 	sdei_state_t backup_state;
326 
327 	if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
328 		return SDEI_EINVAL;
329 
330 	ret = validate_flags(flags, mpidr);
331 	if (ret)
332 		return ret;
333 
334 	/* Check if valid event number */
335 	map = find_event_map(ev_num);
336 	if (!map)
337 		return SDEI_EINVAL;
338 
339 	/* Private events always target the PE */
340 	if (is_event_private(map))
341 		flags = SDEI_REGF_RM_PE;
342 
343 	se = get_event_entry(map);
344 
345 	/*
346 	 * Even though register operation is per-event (additionally for private
347 	 * events, registration is required individually), it has to be
348 	 * serialised with respect to bind/release, which are global operations.
349 	 * So we hold the lock throughout, unconditionally.
350 	 */
351 	sdei_map_lock(map);
352 
353 	backup_state = se->state;
354 	if (!can_sdei_state_trans(se, DO_REGISTER))
355 		goto fallback;
356 
357 	/*
358 	 * When registering for dynamic events, make sure it's been bound
359 	 * already. This has to be the case as, without binding, the client
360 	 * can't know about the event number to register for.
361 	 */
362 	if (is_map_dynamic(map) && !is_map_bound(map))
363 		goto fallback;
364 
365 	if (is_event_private(map)) {
366 		/* Multiple calls to register are possible for private events */
367 		assert(map->reg_count >= 0);
368 	} else {
369 		/* Only single call to register is possible for shared events */
370 		assert(map->reg_count == 0);
371 	}
372 
373 	if (is_map_bound(map)) {
374 		/* Meanwhile, did any PE ACK the interrupt? */
375 		if (plat_ic_get_interrupt_active(map->intr))
376 			goto fallback;
377 
378 		/* The interrupt must currently owned by Non-secure */
379 		if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
380 			goto fallback;
381 
382 		/*
383 		 * Disable forwarding of new interrupt triggers to CPU
384 		 * interface.
385 		 */
386 		plat_ic_disable_interrupt(map->intr);
387 
388 		/*
389 		 * Any events that are triggered after register and before
390 		 * enable should remain pending. Clear any previous interrupt
391 		 * triggers which are pending (except for SGIs). This has no
392 		 * affect on level-triggered interrupts.
393 		 */
394 		if (ev_num != SDEI_EVENT_0)
395 			plat_ic_clear_interrupt_pending(map->intr);
396 
397 		/* Map interrupt to EL3 and program the correct priority */
398 		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
399 
400 		/* Program the appropriate interrupt priority */
401 		plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
402 
403 		/*
404 		 * Set the routing mode for shared event as requested. We
405 		 * already ensure that shared events get bound to SPIs.
406 		 */
407 		if (is_event_shared(map)) {
408 			plat_ic_set_spi_routing(map->intr,
409 					((flags == SDEI_REGF_RM_ANY) ?
410 					 INTR_ROUTING_MODE_ANY :
411 					 INTR_ROUTING_MODE_PE),
412 					(u_register_t) mpidr);
413 		}
414 	}
415 
416 	/* Populate event entries */
417 	set_sdei_entry(se, ep, arg, flags, mpidr);
418 
419 	/* Increment register count */
420 	map->reg_count++;
421 
422 	sdei_map_unlock(map);
423 
424 	return 0;
425 
426 fallback:
427 	/* Reinstate previous state */
428 	se->state = backup_state;
429 
430 	sdei_map_unlock(map);
431 
432 	return SDEI_EDENY;
433 }
434 
435 /* Enable SDEI event */
436 static int sdei_event_enable(int ev_num)
437 {
438 	sdei_ev_map_t *map;
439 	sdei_entry_t *se;
440 	int ret, before, after;
441 
442 	/* Check if valid event number */
443 	map = find_event_map(ev_num);
444 	if (!map)
445 		return SDEI_EINVAL;
446 
447 	se = get_event_entry(map);
448 	ret = SDEI_EDENY;
449 
450 	if (is_event_shared(map))
451 		sdei_map_lock(map);
452 
453 	before = GET_EV_STATE(se, ENABLED);
454 	if (!can_sdei_state_trans(se, DO_ENABLE))
455 		goto finish;
456 	after = GET_EV_STATE(se, ENABLED);
457 
458 	/*
459 	 * Enable interrupt for bound events only if there's a change in enabled
460 	 * state.
461 	 */
462 	if (is_map_bound(map) && (!before && after))
463 		plat_ic_enable_interrupt(map->intr);
464 
465 	ret = 0;
466 
467 finish:
468 	if (is_event_shared(map))
469 		sdei_map_unlock(map);
470 
471 	return ret;
472 }
473 
474 /* Disable SDEI event */
475 static int sdei_event_disable(int ev_num)
476 {
477 	sdei_ev_map_t *map;
478 	sdei_entry_t *se;
479 	int ret, before, after;
480 
481 	/* Check if valid event number */
482 	map = find_event_map(ev_num);
483 	if (!map)
484 		return SDEI_EINVAL;
485 
486 	se = get_event_entry(map);
487 	ret = SDEI_EDENY;
488 
489 	if (is_event_shared(map))
490 		sdei_map_lock(map);
491 
492 	before = GET_EV_STATE(se, ENABLED);
493 	if (!can_sdei_state_trans(se, DO_DISABLE))
494 		goto finish;
495 	after = GET_EV_STATE(se, ENABLED);
496 
497 	/*
498 	 * Disable interrupt for bound events only if there's a change in
499 	 * enabled state.
500 	 */
501 	if (is_map_bound(map) && (before && !after))
502 		plat_ic_disable_interrupt(map->intr);
503 
504 	ret = 0;
505 
506 finish:
507 	if (is_event_shared(map))
508 		sdei_map_unlock(map);
509 
510 	return ret;
511 }
512 
513 /* Query SDEI event information */
514 static uint64_t sdei_event_get_info(int ev_num, int info)
515 {
516 	sdei_entry_t *se;
517 	sdei_ev_map_t *map;
518 
519 	unsigned int flags, registered;
520 	uint64_t affinity;
521 
522 	/* Check if valid event number */
523 	map = find_event_map(ev_num);
524 	if (!map)
525 		return SDEI_EINVAL;
526 
527 	se = get_event_entry(map);
528 
529 	if (is_event_shared(map))
530 		sdei_map_lock(map);
531 
532 	/* Sample state under lock */
533 	registered = GET_EV_STATE(se, REGISTERED);
534 	flags = se->reg_flags;
535 	affinity = se->affinity;
536 
537 	if (is_event_shared(map))
538 		sdei_map_unlock(map);
539 
540 	switch (info) {
541 	case SDEI_INFO_EV_TYPE:
542 		return is_event_shared(map);
543 
544 	case SDEI_INFO_EV_NOT_SIGNALED:
545 		return !is_event_signalable(map);
546 
547 	case SDEI_INFO_EV_PRIORITY:
548 		return is_event_critical(map);
549 
550 	case SDEI_INFO_EV_ROUTING_MODE:
551 		if (!is_event_shared(map))
552 			return SDEI_EINVAL;
553 		if (!registered)
554 			return SDEI_EDENY;
555 		return (flags == SDEI_REGF_RM_PE);
556 
557 	case SDEI_INFO_EV_ROUTING_AFF:
558 		if (!is_event_shared(map))
559 			return SDEI_EINVAL;
560 		if (!registered)
561 			return SDEI_EDENY;
562 		if (flags != SDEI_REGF_RM_PE)
563 			return SDEI_EINVAL;
564 		return affinity;
565 
566 	default:
567 		return SDEI_EINVAL;
568 	}
569 }
570 
571 /* Unregister an SDEI event */
572 static int sdei_event_unregister(int ev_num)
573 {
574 	int ret = 0;
575 	sdei_entry_t *se;
576 	sdei_ev_map_t *map;
577 
578 	/* Check if valid event number */
579 	map = find_event_map(ev_num);
580 	if (!map)
581 		return SDEI_EINVAL;
582 
583 	se = get_event_entry(map);
584 
585 	/*
586 	 * Even though unregister operation is per-event (additionally for
587 	 * private events, unregistration is required individually), it has to
588 	 * be serialised with respect to bind/release, which are global
589 	 * operations.  So we hold the lock throughout, unconditionally.
590 	 */
591 	sdei_map_lock(map);
592 
593 	if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
594 		/*
595 		 * Even if the call is invalid, and the handler is running (for
596 		 * example, having unregistered from a running handler earlier),
597 		 * return pending error code; otherwise, return deny.
598 		 */
599 		ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
600 
601 		goto finish;
602 	}
603 
604 	map->reg_count--;
605 	if (is_event_private(map)) {
606 		/* Multiple calls to register are possible for private events */
607 		assert(map->reg_count >= 0);
608 	} else {
609 		/* Only single call to register is possible for shared events */
610 		assert(map->reg_count == 0);
611 	}
612 
613 	if (is_map_bound(map)) {
614 		plat_ic_disable_interrupt(map->intr);
615 
616 		/*
617 		 * Clear pending interrupt. Skip for SGIs as they may not be
618 		 * cleared on interrupt controllers.
619 		 */
620 		if (ev_num != SDEI_EVENT_0)
621 			plat_ic_clear_interrupt_pending(map->intr);
622 
623 		assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
624 		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
625 		plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
626 	}
627 
628 	clear_event_entries(se);
629 
630 	/*
631 	 * If the handler is running at the time of unregister, return the
632 	 * pending error code.
633 	 */
634 	if (GET_EV_STATE(se, RUNNING))
635 		ret = SDEI_EPEND;
636 
637 finish:
638 	sdei_map_unlock(map);
639 
640 	return ret;
641 }
642 
643 /* Query status of an SDEI event */
644 static int sdei_event_status(int ev_num)
645 {
646 	sdei_ev_map_t *map;
647 	sdei_entry_t *se;
648 	sdei_state_t state;
649 
650 	/* Check if valid event number */
651 	map = find_event_map(ev_num);
652 	if (!map)
653 		return SDEI_EINVAL;
654 
655 	se = get_event_entry(map);
656 
657 	if (is_event_shared(map))
658 		sdei_map_lock(map);
659 
660 	/* State value directly maps to the expected return format */
661 	state = se->state;
662 
663 	if (is_event_shared(map))
664 		sdei_map_unlock(map);
665 
666 	return state;
667 }
668 
669 /* Bind an SDEI event to an interrupt */
670 static int sdei_interrupt_bind(int intr_num)
671 {
672 	sdei_ev_map_t *map;
673 	int retry = 1, shared_mapping;
674 
675 	/* SGIs are not allowed to be bound */
676 	if (plat_ic_is_sgi(intr_num))
677 		return SDEI_EINVAL;
678 
679 	shared_mapping = plat_ic_is_spi(intr_num);
680 	do {
681 		/*
682 		 * Bail out if there is already an event for this interrupt,
683 		 * either platform-defined or dynamic.
684 		 */
685 		map = find_event_map_by_intr(intr_num, shared_mapping);
686 		if (map) {
687 			if (is_map_dynamic(map)) {
688 				if (is_map_bound(map)) {
689 					/*
690 					 * Dynamic event, already bound. Return
691 					 * event number.
692 					 */
693 					return map->ev_num;
694 				}
695 			} else {
696 				/* Binding non-dynamic event */
697 				return SDEI_EINVAL;
698 			}
699 		}
700 
701 		/*
702 		 * The interrupt is not bound yet. Try to find a free slot to
703 		 * bind it. Free dynamic mappings have their interrupt set as
704 		 * SDEI_DYN_IRQ.
705 		 */
706 		map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
707 		if (!map)
708 			return SDEI_ENOMEM;
709 
710 		/* The returned mapping must be dynamic */
711 		assert(is_map_dynamic(map));
712 
713 		/*
714 		 * We cannot assert for bound maps here, as we might be racing
715 		 * with another bind.
716 		 */
717 
718 		/* The requested interrupt must already belong to NS */
719 		if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
720 			return SDEI_EDENY;
721 
722 		/*
723 		 * Interrupt programming and ownership transfer are deferred
724 		 * until register.
725 		 */
726 
727 		sdei_map_lock(map);
728 		if (!is_map_bound(map)) {
729 			map->intr = intr_num;
730 			set_map_bound(map);
731 			retry = 0;
732 		}
733 		sdei_map_unlock(map);
734 	} while (retry);
735 
736 	return map->ev_num;
737 }
738 
739 /* Release a bound SDEI event previously to an interrupt */
740 static int sdei_interrupt_release(int ev_num)
741 {
742 	int ret = 0;
743 	sdei_ev_map_t *map;
744 	sdei_entry_t *se;
745 
746 	/* Check if valid event number */
747 	map = find_event_map(ev_num);
748 	if (!map)
749 		return SDEI_EINVAL;
750 
751 	if (!is_map_dynamic(map))
752 		return SDEI_EINVAL;
753 
754 	se = get_event_entry(map);
755 
756 	sdei_map_lock(map);
757 
758 	/* Event must have been unregistered before release */
759 	if (map->reg_count != 0) {
760 		ret = SDEI_EDENY;
761 		goto finish;
762 	}
763 
764 	/*
765 	 * Interrupt release never causes the state to change. We only check
766 	 * whether it's permissible or not.
767 	 */
768 	if (!can_sdei_state_trans(se, DO_RELEASE)) {
769 		ret = SDEI_EDENY;
770 		goto finish;
771 	}
772 
773 	if (is_map_bound(map)) {
774 		/*
775 		 * Deny release if the interrupt is active, which means it's
776 		 * probably being acknowledged and handled elsewhere.
777 		 */
778 		if (plat_ic_get_interrupt_active(map->intr)) {
779 			ret = SDEI_EDENY;
780 			goto finish;
781 		}
782 
783 		/*
784 		 * Interrupt programming and ownership transfer are already done
785 		 * during unregister.
786 		 */
787 
788 		map->intr = SDEI_DYN_IRQ;
789 		clr_map_bound(map);
790 	} else {
791 		SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
792 				map->reg_count);
793 		ret = SDEI_EINVAL;
794 	}
795 
796 finish:
797 	sdei_map_unlock(map);
798 
799 	return ret;
800 }
801 
802 /* Perform reset of private SDEI events */
803 static int sdei_private_reset(void)
804 {
805 	sdei_ev_map_t *map;
806 	int ret = 0, final_ret = 0, i;
807 
808 	/* Unregister all private events */
809 	for_each_private_map(i, map) {
810 		/*
811 		 * The unregister can fail if the event is not registered, which
812 		 * is allowed, and a deny will be returned. But if the event is
813 		 * running or unregister pending, the call fails.
814 		 */
815 		ret = sdei_event_unregister(map->ev_num);
816 		if ((ret == SDEI_EPEND) && (final_ret == 0))
817 			final_ret = SDEI_EDENY;
818 	}
819 
820 	return final_ret;
821 }
822 
823 /* Perform reset of shared SDEI events */
824 static int sdei_shared_reset(void)
825 {
826 	const sdei_mapping_t *mapping;
827 	sdei_ev_map_t *map;
828 	int ret = 0, final_ret = 0, i, j;
829 
830 	/* Unregister all shared events */
831 	for_each_shared_map(i, map) {
832 		/*
833 		 * The unregister can fail if the event is not registered, which
834 		 * is allowed, and a deny will be returned. But if the event is
835 		 * running or unregister pending, the call fails.
836 		 */
837 		ret = sdei_event_unregister(map->ev_num);
838 		if ((ret == SDEI_EPEND) && (final_ret == 0))
839 			final_ret = SDEI_EDENY;
840 	}
841 
842 	if (final_ret != 0)
843 		return final_ret;
844 
845 	/*
846 	 * Loop through both private and shared mappings, and release all
847 	 * bindings.
848 	 */
849 	for_each_mapping_type(i, mapping) {
850 		iterate_mapping(mapping, j, map) {
851 			/*
852 			 * Release bindings for mappings that are dynamic and
853 			 * bound.
854 			 */
855 			if (is_map_dynamic(map) && is_map_bound(map)) {
856 				/*
857 				 * Any failure to release would mean there is at
858 				 * least a PE registered for the event.
859 				 */
860 				ret = sdei_interrupt_release(map->ev_num);
861 				if ((ret != 0) && (final_ret == 0))
862 					final_ret = ret;
863 			}
864 		}
865 	}
866 
867 	return final_ret;
868 }
869 
870 /* Send a signal to another SDEI client PE */
871 int sdei_signal(int event, uint64_t target_pe)
872 {
873 	sdei_ev_map_t *map;
874 
875 	/* Only event 0 can be signalled */
876 	if (event != SDEI_EVENT_0)
877 		return SDEI_EINVAL;
878 
879 	/* Find mapping for event 0 */
880 	map = find_event_map(SDEI_EVENT_0);
881 	if (!map)
882 		return SDEI_EINVAL;
883 
884 	/* The event must be signalable */
885 	if (!is_event_signalable(map))
886 		return SDEI_EINVAL;
887 
888 	/* Validate target */
889 	if (plat_core_pos_by_mpidr(target_pe) < 0)
890 		return SDEI_EINVAL;
891 
892 	/* Raise SGI. Platform will validate target_pe */
893 	plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
894 
895 	return 0;
896 }
897 
898 /* Query SDEI dispatcher features */
899 uint64_t sdei_features(unsigned int feature)
900 {
901 	if (feature == SDEI_FEATURE_BIND_SLOTS) {
902 		return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
903 				num_dyn_shrd_slots);
904 	}
905 
906 	return SDEI_EINVAL;
907 }
908 
909 /* SDEI top level handler for servicing SMCs */
910 uint64_t sdei_smc_handler(uint32_t smc_fid,
911 			  uint64_t x1,
912 			  uint64_t x2,
913 			  uint64_t x3,
914 			  uint64_t x4,
915 			  void *cookie,
916 			  void *handle,
917 			  uint64_t flags)
918 {
919 
920 	uint64_t x5;
921 	int ss = get_interrupt_src_ss(flags);
922 	int64_t ret;
923 	unsigned int resume = 0;
924 
925 	if (ss != NON_SECURE)
926 		SMC_RET1(handle, SMC_UNK);
927 
928 	/* Verify the caller EL */
929 	if (GET_EL(read_spsr_el3()) != sdei_client_el())
930 		SMC_RET1(handle, SMC_UNK);
931 
932 	switch (smc_fid) {
933 	case SDEI_VERSION:
934 		SDEI_LOG("> VER\n");
935 		ret = sdei_version();
936 		SDEI_LOG("< VER:%lx\n", ret);
937 		SMC_RET1(handle, ret);
938 
939 	case SDEI_EVENT_REGISTER:
940 		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
941 		SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
942 				x2, x3, (int) x4, x5);
943 		ret = sdei_event_register(x1, x2, x3, x4, x5);
944 		SDEI_LOG("< REG:%ld\n", ret);
945 		SMC_RET1(handle, ret);
946 
947 	case SDEI_EVENT_ENABLE:
948 		SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
949 		ret = sdei_event_enable(x1);
950 		SDEI_LOG("< ENABLE:%ld\n", ret);
951 		SMC_RET1(handle, ret);
952 
953 	case SDEI_EVENT_DISABLE:
954 		SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
955 		ret = sdei_event_disable(x1);
956 		SDEI_LOG("< DISABLE:%ld\n", ret);
957 		SMC_RET1(handle, ret);
958 
959 	case SDEI_EVENT_CONTEXT:
960 		SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
961 		ret = sdei_event_context(handle, x1);
962 		SDEI_LOG("< CTX:%ld\n", ret);
963 		SMC_RET1(handle, ret);
964 
965 	case SDEI_EVENT_COMPLETE_AND_RESUME:
966 		resume = 1;
967 
968 	case SDEI_EVENT_COMPLETE:
969 		SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
970 				read_mpidr_el1());
971 		ret = sdei_event_complete(resume, x1);
972 		SDEI_LOG("< COMPLETE:%lx\n", ret);
973 
974 		/*
975 		 * Set error code only if the call failed. If the call
976 		 * succeeded, we discard the dispatched context, and restore the
977 		 * interrupted context to a pristine condition, and therefore
978 		 * shouldn't be modified. We don't return to the caller in this
979 		 * case anyway.
980 		 */
981 		if (ret)
982 			SMC_RET1(handle, ret);
983 
984 		SMC_RET0(handle);
985 
986 	case SDEI_EVENT_STATUS:
987 		SDEI_LOG("> STAT(n:%d)\n", (int) x1);
988 		ret = sdei_event_status(x1);
989 		SDEI_LOG("< STAT:%ld\n", ret);
990 		SMC_RET1(handle, ret);
991 
992 	case SDEI_EVENT_GET_INFO:
993 		SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
994 		ret = sdei_event_get_info(x1, x2);
995 		SDEI_LOG("< INFO:%ld\n", ret);
996 		SMC_RET1(handle, ret);
997 
998 	case SDEI_EVENT_UNREGISTER:
999 		SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
1000 		ret = sdei_event_unregister(x1);
1001 		SDEI_LOG("< UNREG:%ld\n", ret);
1002 		SMC_RET1(handle, ret);
1003 
1004 	case SDEI_PE_UNMASK:
1005 		SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
1006 		sdei_pe_unmask();
1007 		SDEI_LOG("< UNMASK:%d\n", 0);
1008 		SMC_RET1(handle, 0);
1009 
1010 	case SDEI_PE_MASK:
1011 		SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
1012 		ret = sdei_pe_mask();
1013 		SDEI_LOG("< MASK:%ld\n", ret);
1014 		SMC_RET1(handle, ret);
1015 
1016 	case SDEI_INTERRUPT_BIND:
1017 		SDEI_LOG("> BIND(%d)\n", (int) x1);
1018 		ret = sdei_interrupt_bind(x1);
1019 		SDEI_LOG("< BIND:%ld\n", ret);
1020 		SMC_RET1(handle, ret);
1021 
1022 	case SDEI_INTERRUPT_RELEASE:
1023 		SDEI_LOG("> REL(%d)\n", (int) x1);
1024 		ret = sdei_interrupt_release(x1);
1025 		SDEI_LOG("< REL:%ld\n", ret);
1026 		SMC_RET1(handle, ret);
1027 
1028 	case SDEI_SHARED_RESET:
1029 		SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
1030 		ret = sdei_shared_reset();
1031 		SDEI_LOG("< S_RESET:%ld\n", ret);
1032 		SMC_RET1(handle, ret);
1033 
1034 	case SDEI_PRIVATE_RESET:
1035 		SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
1036 		ret = sdei_private_reset();
1037 		SDEI_LOG("< P_RESET:%ld\n", ret);
1038 		SMC_RET1(handle, ret);
1039 
1040 	case SDEI_EVENT_ROUTING_SET:
1041 		SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
1042 		ret = sdei_event_routing_set(x1, x2, x3);
1043 		SDEI_LOG("< ROUTE_SET:%ld\n", ret);
1044 		SMC_RET1(handle, ret);
1045 
1046 	case SDEI_FEATURES:
1047 		SDEI_LOG("> FTRS(f:%lx)\n", x1);
1048 		ret = sdei_features(x1);
1049 		SDEI_LOG("< FTRS:%lx\n", ret);
1050 		SMC_RET1(handle, ret);
1051 
1052 	case SDEI_EVENT_SIGNAL:
1053 		SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
1054 		ret = sdei_signal(x1, x2);
1055 		SDEI_LOG("< SIGNAL:%ld\n", ret);
1056 		SMC_RET1(handle, ret);
1057 
1058 	default:
1059 		/* Do nothing in default case */
1060 		break;
1061 	}
1062 
1063 	WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
1064 	SMC_RET1(handle, SMC_UNK);
1065 }
1066 
1067 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
1068 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
1069