xref: /rk3399_ARM-atf/services/std_svc/sdei/sdei_main.c (revision 9a3088a5f509084e60d9c55bf53985c5ec4ca821)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <bl31.h>
10 #include <bl_common.h>
11 #include <cassert.h>
12 #include <context.h>
13 #include <context_mgmt.h>
14 #include <debug.h>
15 #include <ehf.h>
16 #include <interrupt_mgmt.h>
17 #include <platform.h>
18 #include <pubsub.h>
19 #include <runtime_svc.h>
20 #include <sdei.h>
21 #include <stddef.h>
22 #include <string.h>
23 #include <utils.h>
24 #include "sdei_private.h"
25 
26 #define MAJOR_VERSION	1
27 #define MINOR_VERSION	0
28 #define VENDOR_VERSION	0
29 
30 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
31 	((((unsigned long long)(_major)) << 48) | \
32 	 (((unsigned long long)(_minor)) << 32) | \
33 	 (_vendor))
34 
35 #define LOWEST_INTR_PRIORITY		0xff
36 
37 #define is_valid_affinity(_mpidr)	(plat_core_pos_by_mpidr(_mpidr) >= 0)
38 
39 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
40 		sdei_critical_must_have_higher_priority);
41 
42 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
43 
44 /* Initialise SDEI map entries */
45 static void init_map(sdei_ev_map_t *map)
46 {
47 	map->reg_count = 0;
48 }
49 
50 /* Convert mapping to SDEI class */
51 sdei_class_t map_to_class(sdei_ev_map_t *map)
52 {
53 	return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
54 }
55 
56 /* Clear SDEI event entries except state */
57 static void clear_event_entries(sdei_entry_t *se)
58 {
59 	se->ep = 0;
60 	se->arg = 0;
61 	se->affinity = 0;
62 	se->reg_flags = 0;
63 }
64 
65 /* Perform CPU-specific state initialisation */
66 static void *sdei_cpu_on_init(const void *arg)
67 {
68 	int i;
69 	sdei_ev_map_t *map;
70 	sdei_entry_t *se;
71 
72 	/* Initialize private mappings on this CPU */
73 	for_each_private_map(i, map) {
74 		se = get_event_entry(map);
75 		clear_event_entries(se);
76 		se->state = 0;
77 	}
78 
79 	SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
80 
81 	/* All PEs start with SDEI events masked */
82 	sdei_pe_mask();
83 
84 	return 0;
85 }
86 
87 /* Initialise an SDEI class */
88 void sdei_class_init(sdei_class_t class)
89 {
90 	unsigned int i, zero_found __unused = 0;
91 	int ev_num_so_far __unused;
92 	sdei_ev_map_t *map;
93 
94 	/* Sanity check and configuration of shared events */
95 	ev_num_so_far = -1;
96 	for_each_shared_map(i, map) {
97 #if ENABLE_ASSERTIONS
98 		/* Ensure mappings are sorted */
99 		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
100 
101 		ev_num_so_far = map->ev_num;
102 
103 		/* Event 0 must not be shared */
104 		assert(map->ev_num != SDEI_EVENT_0);
105 
106 		/* Check for valid event */
107 		assert(map->ev_num >= 0);
108 
109 		/* Make sure it's a shared event */
110 		assert(is_event_shared(map));
111 
112 		/* No shared mapping should have signalable property */
113 		assert(!is_event_signalable(map));
114 #endif
115 
116 		/* Skip initializing the wrong priority */
117 		if (map_to_class(map) != class)
118 			continue;
119 
120 		/* Platform events are always bound, so set the bound flag */
121 		if (is_map_dynamic(map)) {
122 			assert(map->intr == SDEI_DYN_IRQ);
123 			num_dyn_shrd_slots++;
124 		} else {
125 			/* Shared mappings must be bound to shared interrupt */
126 			assert(plat_ic_is_spi(map->intr));
127 			set_map_bound(map);
128 		}
129 
130 		init_map(map);
131 	}
132 
133 	/* Sanity check and configuration of private events for this CPU */
134 	ev_num_so_far = -1;
135 	for_each_private_map(i, map) {
136 #if ENABLE_ASSERTIONS
137 		/* Ensure mappings are sorted */
138 		assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
139 
140 		ev_num_so_far = map->ev_num;
141 
142 		if (map->ev_num == SDEI_EVENT_0) {
143 			zero_found = 1;
144 
145 			/* Event 0 must be a Secure SGI */
146 			assert(is_secure_sgi(map->intr));
147 
148 			/*
149 			 * Event 0 can have only have signalable flag (apart
150 			 * from being private
151 			 */
152 			assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
153 						SDEI_MAPF_PRIVATE));
154 		} else {
155 			/* No other mapping should have signalable property */
156 			assert(!is_event_signalable(map));
157 		}
158 
159 		/* Check for valid event */
160 		assert(map->ev_num >= 0);
161 
162 		/* Make sure it's a private event */
163 		assert(is_event_private(map));
164 #endif
165 
166 		/* Skip initializing the wrong priority */
167 		if (map_to_class(map) != class)
168 			continue;
169 
170 		/* Platform events are always bound, so set the bound flag */
171 		if (map->ev_num != SDEI_EVENT_0) {
172 			if (is_map_dynamic(map)) {
173 				assert(map->intr == SDEI_DYN_IRQ);
174 				num_dyn_priv_slots++;
175 			} else {
176 				/*
177 				 * Private mappings must be bound to private
178 				 * interrupt.
179 				 */
180 				assert(plat_ic_is_ppi(map->intr));
181 				set_map_bound(map);
182 			}
183 		}
184 
185 		init_map(map);
186 	}
187 
188 	/* Ensure event 0 is in the mapping */
189 	assert(zero_found);
190 
191 	sdei_cpu_on_init(NULL);
192 }
193 
194 /* SDEI dispatcher initialisation */
195 void sdei_init(void)
196 {
197 	sdei_class_init(SDEI_CRITICAL);
198 	sdei_class_init(SDEI_NORMAL);
199 
200 	/* Register priority level handlers */
201 	ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
202 			sdei_intr_handler);
203 	ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
204 			sdei_intr_handler);
205 }
206 
207 /* Populate SDEI event entry */
208 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
209 		unsigned int flags, uint64_t affinity)
210 {
211 	assert(se != NULL);
212 
213 	se->ep = ep;
214 	se->arg = arg;
215 	se->affinity = (affinity & MPIDR_AFFINITY_MASK);
216 	se->reg_flags = flags;
217 }
218 
219 static unsigned long long sdei_version(void)
220 {
221 	return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
222 }
223 
224 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
225 static int validate_flags(uint64_t flags, uint64_t mpidr)
226 {
227 	/* Validate flags */
228 	switch (flags) {
229 	case SDEI_REGF_RM_PE:
230 		if (!is_valid_affinity(mpidr))
231 			return SDEI_EINVAL;
232 		break;
233 	case SDEI_REGF_RM_ANY:
234 		break;
235 	default:
236 		/* Unknown flags */
237 		return SDEI_EINVAL;
238 	}
239 
240 	return 0;
241 }
242 
243 /* Set routing of an SDEI event */
244 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
245 {
246 	int ret, routing;
247 	sdei_ev_map_t *map;
248 	sdei_entry_t *se;
249 
250 	ret = validate_flags(flags, mpidr);
251 	if (ret)
252 		return ret;
253 
254 	/* Check if valid event number */
255 	map = find_event_map(ev_num);
256 	if (!map)
257 		return SDEI_EINVAL;
258 
259 	/* The event must not be private */
260 	if (is_event_private(map))
261 		return SDEI_EINVAL;
262 
263 	se = get_event_entry(map);
264 
265 	sdei_map_lock(map);
266 
267 	if (!is_map_bound(map) || is_event_private(map)) {
268 		ret = SDEI_EINVAL;
269 		goto finish;
270 	}
271 
272 	if (!can_sdei_state_trans(se, DO_ROUTING)) {
273 		ret = SDEI_EDENY;
274 		goto finish;
275 	}
276 
277 	/* Choose appropriate routing */
278 	routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
279 		INTR_ROUTING_MODE_PE;
280 
281 	/* Update event registration flag */
282 	se->reg_flags = flags;
283 
284 	/*
285 	 * ROUTING_SET is permissible only when event composite state is
286 	 * 'registered, disabled, and not running'. This means that the
287 	 * interrupt is currently disabled, and not active.
288 	 */
289 	plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
290 
291 finish:
292 	sdei_map_unlock(map);
293 
294 	return ret;
295 }
296 
297 /* Register handler and argument for an SDEI event */
298 static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
299 		uint64_t flags, uint64_t mpidr)
300 {
301 	int ret;
302 	sdei_entry_t *se;
303 	sdei_ev_map_t *map;
304 	sdei_state_t backup_state;
305 
306 	if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
307 		return SDEI_EINVAL;
308 
309 	ret = validate_flags(flags, mpidr);
310 	if (ret)
311 		return ret;
312 
313 	/* Check if valid event number */
314 	map = find_event_map(ev_num);
315 	if (!map)
316 		return SDEI_EINVAL;
317 
318 	/* Private events always target the PE */
319 	if (is_event_private(map))
320 		flags = SDEI_REGF_RM_PE;
321 
322 	se = get_event_entry(map);
323 
324 	/*
325 	 * Even though register operation is per-event (additionally for private
326 	 * events, registration is required individually), it has to be
327 	 * serialised with respect to bind/release, which are global operations.
328 	 * So we hold the lock throughout, unconditionally.
329 	 */
330 	sdei_map_lock(map);
331 
332 	backup_state = se->state;
333 	if (!can_sdei_state_trans(se, DO_REGISTER))
334 		goto fallback;
335 
336 	/*
337 	 * When registering for dynamic events, make sure it's been bound
338 	 * already. This has to be the case as, without binding, the client
339 	 * can't know about the event number to register for.
340 	 */
341 	if (is_map_dynamic(map) && !is_map_bound(map))
342 		goto fallback;
343 
344 	if (is_event_private(map)) {
345 		/* Multiple calls to register are possible for private events */
346 		assert(map->reg_count >= 0);
347 	} else {
348 		/* Only single call to register is possible for shared events */
349 		assert(map->reg_count == 0);
350 	}
351 
352 	if (is_map_bound(map)) {
353 		/* Meanwhile, did any PE ACK the interrupt? */
354 		if (plat_ic_get_interrupt_active(map->intr))
355 			goto fallback;
356 
357 		/* The interrupt must currently owned by Non-secure */
358 		if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
359 			goto fallback;
360 
361 		/*
362 		 * Disable forwarding of new interrupt triggers to CPU
363 		 * interface.
364 		 */
365 		plat_ic_disable_interrupt(map->intr);
366 
367 		/*
368 		 * Any events that are triggered after register and before
369 		 * enable should remain pending. Clear any previous interrupt
370 		 * triggers which are pending (except for SGIs). This has no
371 		 * affect on level-triggered interrupts.
372 		 */
373 		if (ev_num != SDEI_EVENT_0)
374 			plat_ic_clear_interrupt_pending(map->intr);
375 
376 		/* Map interrupt to EL3 and program the correct priority */
377 		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
378 
379 		/* Program the appropriate interrupt priority */
380 		plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
381 
382 		/*
383 		 * Set the routing mode for shared event as requested. We
384 		 * already ensure that shared events get bound to SPIs.
385 		 */
386 		if (is_event_shared(map)) {
387 			plat_ic_set_spi_routing(map->intr,
388 					((flags == SDEI_REGF_RM_ANY) ?
389 					 INTR_ROUTING_MODE_ANY :
390 					 INTR_ROUTING_MODE_PE),
391 					(u_register_t) mpidr);
392 		}
393 	}
394 
395 	/* Populate event entries */
396 	set_sdei_entry(se, ep, arg, flags, mpidr);
397 
398 	/* Increment register count */
399 	map->reg_count++;
400 
401 	sdei_map_unlock(map);
402 
403 	return 0;
404 
405 fallback:
406 	/* Reinstate previous state */
407 	se->state = backup_state;
408 
409 	sdei_map_unlock(map);
410 
411 	return SDEI_EDENY;
412 }
413 
414 /* Enable SDEI event */
415 static int sdei_event_enable(int ev_num)
416 {
417 	sdei_ev_map_t *map;
418 	sdei_entry_t *se;
419 	int ret, before, after;
420 
421 	/* Check if valid event number */
422 	map = find_event_map(ev_num);
423 	if (!map)
424 		return SDEI_EINVAL;
425 
426 	se = get_event_entry(map);
427 	ret = SDEI_EDENY;
428 
429 	if (is_event_shared(map))
430 		sdei_map_lock(map);
431 
432 	before = GET_EV_STATE(se, ENABLED);
433 	if (!can_sdei_state_trans(se, DO_ENABLE))
434 		goto finish;
435 	after = GET_EV_STATE(se, ENABLED);
436 
437 	/*
438 	 * Enable interrupt for bound events only if there's a change in enabled
439 	 * state.
440 	 */
441 	if (is_map_bound(map) && (!before && after))
442 		plat_ic_enable_interrupt(map->intr);
443 
444 	ret = 0;
445 
446 finish:
447 	if (is_event_shared(map))
448 		sdei_map_unlock(map);
449 
450 	return ret;
451 }
452 
453 /* Disable SDEI event */
454 static int sdei_event_disable(int ev_num)
455 {
456 	sdei_ev_map_t *map;
457 	sdei_entry_t *se;
458 	int ret, before, after;
459 
460 	/* Check if valid event number */
461 	map = find_event_map(ev_num);
462 	if (!map)
463 		return SDEI_EINVAL;
464 
465 	se = get_event_entry(map);
466 	ret = SDEI_EDENY;
467 
468 	if (is_event_shared(map))
469 		sdei_map_lock(map);
470 
471 	before = GET_EV_STATE(se, ENABLED);
472 	if (!can_sdei_state_trans(se, DO_DISABLE))
473 		goto finish;
474 	after = GET_EV_STATE(se, ENABLED);
475 
476 	/*
477 	 * Disable interrupt for bound events only if there's a change in
478 	 * enabled state.
479 	 */
480 	if (is_map_bound(map) && (before && !after))
481 		plat_ic_disable_interrupt(map->intr);
482 
483 	ret = 0;
484 
485 finish:
486 	if (is_event_shared(map))
487 		sdei_map_unlock(map);
488 
489 	return ret;
490 }
491 
492 /* Query SDEI event information */
493 static uint64_t sdei_event_get_info(int ev_num, int info)
494 {
495 	sdei_entry_t *se;
496 	sdei_ev_map_t *map;
497 
498 	unsigned int flags, registered;
499 	uint64_t affinity;
500 
501 	/* Check if valid event number */
502 	map = find_event_map(ev_num);
503 	if (!map)
504 		return SDEI_EINVAL;
505 
506 	se = get_event_entry(map);
507 
508 	if (is_event_shared(map))
509 		sdei_map_lock(map);
510 
511 	/* Sample state under lock */
512 	registered = GET_EV_STATE(se, REGISTERED);
513 	flags = se->reg_flags;
514 	affinity = se->affinity;
515 
516 	if (is_event_shared(map))
517 		sdei_map_unlock(map);
518 
519 	switch (info) {
520 	case SDEI_INFO_EV_TYPE:
521 		return is_event_shared(map);
522 
523 	case SDEI_INFO_EV_NOT_SIGNALED:
524 		return !is_event_signalable(map);
525 
526 	case SDEI_INFO_EV_PRIORITY:
527 		return is_event_critical(map);
528 
529 	case SDEI_INFO_EV_ROUTING_MODE:
530 		if (!is_event_shared(map))
531 			return SDEI_EINVAL;
532 		if (!registered)
533 			return SDEI_EDENY;
534 		return (flags == SDEI_REGF_RM_PE);
535 
536 	case SDEI_INFO_EV_ROUTING_AFF:
537 		if (!is_event_shared(map))
538 			return SDEI_EINVAL;
539 		if (!registered)
540 			return SDEI_EDENY;
541 		if (flags != SDEI_REGF_RM_PE)
542 			return SDEI_EINVAL;
543 		return affinity;
544 
545 	default:
546 		return SDEI_EINVAL;
547 	}
548 }
549 
550 /* Unregister an SDEI event */
551 static int sdei_event_unregister(int ev_num)
552 {
553 	int ret = 0;
554 	sdei_entry_t *se;
555 	sdei_ev_map_t *map;
556 
557 	/* Check if valid event number */
558 	map = find_event_map(ev_num);
559 	if (!map)
560 		return SDEI_EINVAL;
561 
562 	se = get_event_entry(map);
563 
564 	/*
565 	 * Even though unregister operation is per-event (additionally for
566 	 * private events, unregistration is required individually), it has to
567 	 * be serialised with respect to bind/release, which are global
568 	 * operations.  So we hold the lock throughout, unconditionally.
569 	 */
570 	sdei_map_lock(map);
571 
572 	if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
573 		/*
574 		 * Even if the call is invalid, and the handler is running (for
575 		 * example, having unregistered from a running handler earlier),
576 		 * return pending error code; otherwise, return deny.
577 		 */
578 		ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
579 
580 		goto finish;
581 	}
582 
583 	map->reg_count--;
584 	if (is_event_private(map)) {
585 		/* Multiple calls to register are possible for private events */
586 		assert(map->reg_count >= 0);
587 	} else {
588 		/* Only single call to register is possible for shared events */
589 		assert(map->reg_count == 0);
590 	}
591 
592 	if (is_map_bound(map)) {
593 		plat_ic_disable_interrupt(map->intr);
594 
595 		/*
596 		 * Clear pending interrupt. Skip for SGIs as they may not be
597 		 * cleared on interrupt controllers.
598 		 */
599 		if (ev_num != SDEI_EVENT_0)
600 			plat_ic_clear_interrupt_pending(map->intr);
601 
602 		assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
603 		plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
604 		plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
605 	}
606 
607 	clear_event_entries(se);
608 
609 	/*
610 	 * If the handler is running at the time of unregister, return the
611 	 * pending error code.
612 	 */
613 	if (GET_EV_STATE(se, RUNNING))
614 		ret = SDEI_EPEND;
615 
616 finish:
617 	sdei_map_unlock(map);
618 
619 	return ret;
620 }
621 
622 /* Query status of an SDEI event */
623 static int sdei_event_status(int ev_num)
624 {
625 	sdei_ev_map_t *map;
626 	sdei_entry_t *se;
627 	sdei_state_t state;
628 
629 	/* Check if valid event number */
630 	map = find_event_map(ev_num);
631 	if (!map)
632 		return SDEI_EINVAL;
633 
634 	se = get_event_entry(map);
635 
636 	if (is_event_shared(map))
637 		sdei_map_lock(map);
638 
639 	/* State value directly maps to the expected return format */
640 	state = se->state;
641 
642 	if (is_event_shared(map))
643 		sdei_map_unlock(map);
644 
645 	return state;
646 }
647 
648 /* Bind an SDEI event to an interrupt */
649 static int sdei_interrupt_bind(int intr_num)
650 {
651 	sdei_ev_map_t *map;
652 	int retry = 1, shared_mapping;
653 
654 	/* SGIs are not allowed to be bound */
655 	if (plat_ic_is_sgi(intr_num))
656 		return SDEI_EINVAL;
657 
658 	shared_mapping = plat_ic_is_spi(intr_num);
659 	do {
660 		/*
661 		 * Bail out if there is already an event for this interrupt,
662 		 * either platform-defined or dynamic.
663 		 */
664 		map = find_event_map_by_intr(intr_num, shared_mapping);
665 		if (map) {
666 			if (is_map_dynamic(map)) {
667 				if (is_map_bound(map)) {
668 					/*
669 					 * Dynamic event, already bound. Return
670 					 * event number.
671 					 */
672 					return map->ev_num;
673 				}
674 			} else {
675 				/* Binding non-dynamic event */
676 				return SDEI_EINVAL;
677 			}
678 		}
679 
680 		/*
681 		 * The interrupt is not bound yet. Try to find a free slot to
682 		 * bind it. Free dynamic mappings have their interrupt set as
683 		 * SDEI_DYN_IRQ.
684 		 */
685 		map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
686 		if (!map)
687 			return SDEI_ENOMEM;
688 
689 		/* The returned mapping must be dynamic */
690 		assert(is_map_dynamic(map));
691 
692 		/*
693 		 * We cannot assert for bound maps here, as we might be racing
694 		 * with another bind.
695 		 */
696 
697 		/* The requested interrupt must already belong to NS */
698 		if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
699 			return SDEI_EDENY;
700 
701 		/*
702 		 * Interrupt programming and ownership transfer are deferred
703 		 * until register.
704 		 */
705 
706 		sdei_map_lock(map);
707 		if (!is_map_bound(map)) {
708 			map->intr = intr_num;
709 			set_map_bound(map);
710 			retry = 0;
711 		}
712 		sdei_map_unlock(map);
713 	} while (retry);
714 
715 	return map->ev_num;
716 }
717 
718 /* Release a bound SDEI event previously to an interrupt */
719 static int sdei_interrupt_release(int ev_num)
720 {
721 	int ret = 0;
722 	sdei_ev_map_t *map;
723 	sdei_entry_t *se;
724 
725 	/* Check if valid event number */
726 	map = find_event_map(ev_num);
727 	if (!map)
728 		return SDEI_EINVAL;
729 
730 	if (!is_map_dynamic(map))
731 		return SDEI_EINVAL;
732 
733 	se = get_event_entry(map);
734 
735 	sdei_map_lock(map);
736 
737 	/* Event must have been unregistered before release */
738 	if (map->reg_count != 0) {
739 		ret = SDEI_EDENY;
740 		goto finish;
741 	}
742 
743 	/*
744 	 * Interrupt release never causes the state to change. We only check
745 	 * whether it's permissible or not.
746 	 */
747 	if (!can_sdei_state_trans(se, DO_RELEASE)) {
748 		ret = SDEI_EDENY;
749 		goto finish;
750 	}
751 
752 	if (is_map_bound(map)) {
753 		/*
754 		 * Deny release if the interrupt is active, which means it's
755 		 * probably being acknowledged and handled elsewhere.
756 		 */
757 		if (plat_ic_get_interrupt_active(map->intr)) {
758 			ret = SDEI_EDENY;
759 			goto finish;
760 		}
761 
762 		/*
763 		 * Interrupt programming and ownership transfer are already done
764 		 * during unregister.
765 		 */
766 
767 		map->intr = SDEI_DYN_IRQ;
768 		clr_map_bound(map);
769 	} else {
770 		SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
771 				map->reg_count);
772 		ret = SDEI_EINVAL;
773 	}
774 
775 finish:
776 	sdei_map_unlock(map);
777 
778 	return ret;
779 }
780 
781 /* Perform reset of private SDEI events */
782 static int sdei_private_reset(void)
783 {
784 	sdei_ev_map_t *map;
785 	int ret = 0, final_ret = 0, i;
786 
787 	/* Unregister all private events */
788 	for_each_private_map(i, map) {
789 		/*
790 		 * The unregister can fail if the event is not registered, which
791 		 * is allowed, and a deny will be returned. But if the event is
792 		 * running or unregister pending, the call fails.
793 		 */
794 		ret = sdei_event_unregister(map->ev_num);
795 		if ((ret == SDEI_EPEND) && (final_ret == 0))
796 			final_ret = ret;
797 	}
798 
799 	return final_ret;
800 }
801 
802 /* Perform reset of shared SDEI events */
803 static int sdei_shared_reset(void)
804 {
805 	const sdei_mapping_t *mapping;
806 	sdei_ev_map_t *map;
807 	int ret = 0, final_ret = 0, i, j;
808 
809 	/* Unregister all shared events */
810 	for_each_shared_map(i, map) {
811 		/*
812 		 * The unregister can fail if the event is not registered, which
813 		 * is allowed, and a deny will be returned. But if the event is
814 		 * running or unregister pending, the call fails.
815 		 */
816 		ret = sdei_event_unregister(map->ev_num);
817 		if ((ret == SDEI_EPEND) && (final_ret == 0))
818 			final_ret = ret;
819 	}
820 
821 	if (final_ret != 0)
822 		return final_ret;
823 
824 	/*
825 	 * Loop through both private and shared mappings, and release all
826 	 * bindings.
827 	 */
828 	for_each_mapping_type(i, mapping) {
829 		iterate_mapping(mapping, j, map) {
830 			/*
831 			 * Release bindings for mappings that are dynamic and
832 			 * bound.
833 			 */
834 			if (is_map_dynamic(map) && is_map_bound(map)) {
835 				/*
836 				 * Any failure to release would mean there is at
837 				 * least a PE registered for the event.
838 				 */
839 				ret = sdei_interrupt_release(map->ev_num);
840 				if ((ret != 0) && (final_ret == 0))
841 					final_ret = ret;
842 			}
843 		}
844 	}
845 
846 	return final_ret;
847 }
848 
849 /* Send a signal to another SDEI client PE */
850 int sdei_signal(int event, uint64_t target_pe)
851 {
852 	sdei_ev_map_t *map;
853 
854 	/* Only event 0 can be signalled */
855 	if (event != SDEI_EVENT_0)
856 		return SDEI_EINVAL;
857 
858 	/* Find mapping for event 0 */
859 	map = find_event_map(SDEI_EVENT_0);
860 	if (!map)
861 		return SDEI_EINVAL;
862 
863 	/* The event must be signalable */
864 	if (!is_event_signalable(map))
865 		return SDEI_EINVAL;
866 
867 	/* Validate target */
868 	if (plat_core_pos_by_mpidr(target_pe) < 0)
869 		return SDEI_EINVAL;
870 
871 	/* Raise SGI. Platform will validate target_pe */
872 	plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
873 
874 	return 0;
875 }
876 
877 /* Query SDEI dispatcher features */
878 uint64_t sdei_features(unsigned int feature)
879 {
880 	if (feature == SDEI_FEATURE_BIND_SLOTS) {
881 		return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
882 				num_dyn_shrd_slots);
883 	}
884 
885 	return SDEI_EINVAL;
886 }
887 
888 /* SDEI top level handler for servicing SMCs */
889 uint64_t sdei_smc_handler(uint32_t smc_fid,
890 			  uint64_t x1,
891 			  uint64_t x2,
892 			  uint64_t x3,
893 			  uint64_t x4,
894 			  void *cookie,
895 			  void *handle,
896 			  uint64_t flags)
897 {
898 
899 	uint64_t x5;
900 	int ss = get_interrupt_src_ss(flags);
901 	int64_t ret;
902 	unsigned int resume = 0;
903 
904 	if (ss != NON_SECURE)
905 		SMC_RET1(handle, SMC_UNK);
906 
907 	/* Verify the caller EL */
908 	if (GET_EL(read_spsr_el3()) != sdei_client_el())
909 		SMC_RET1(handle, SMC_UNK);
910 
911 	switch (smc_fid) {
912 	case SDEI_VERSION:
913 		SDEI_LOG("> VER\n");
914 		ret = sdei_version();
915 		SDEI_LOG("< VER:%lx\n", ret);
916 		SMC_RET1(handle, ret);
917 		break;
918 
919 	case SDEI_EVENT_REGISTER:
920 		x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
921 		SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
922 				x2, x3, (int) x4, x5);
923 		ret = sdei_event_register(x1, x2, x3, x4, x5);
924 		SDEI_LOG("< REG:%ld\n", ret);
925 		SMC_RET1(handle, ret);
926 		break;
927 
928 	case SDEI_EVENT_ENABLE:
929 		SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
930 		ret = sdei_event_enable(x1);
931 		SDEI_LOG("< ENABLE:%ld\n", ret);
932 		SMC_RET1(handle, ret);
933 		break;
934 
935 	case SDEI_EVENT_DISABLE:
936 		SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
937 		ret = sdei_event_disable(x1);
938 		SDEI_LOG("< DISABLE:%ld\n", ret);
939 		SMC_RET1(handle, ret);
940 		break;
941 
942 	case SDEI_EVENT_CONTEXT:
943 		SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
944 		ret = sdei_event_context(handle, x1);
945 		SDEI_LOG("< CTX:%ld\n", ret);
946 		SMC_RET1(handle, ret);
947 		break;
948 
949 	case SDEI_EVENT_COMPLETE_AND_RESUME:
950 		resume = 1;
951 		/* Fall through */
952 
953 	case SDEI_EVENT_COMPLETE:
954 		SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
955 				read_mpidr_el1());
956 		ret = sdei_event_complete(resume, x1);
957 		SDEI_LOG("< COMPLETE:%lx\n", ret);
958 
959 		/*
960 		 * Set error code only if the call failed. If the call
961 		 * succeeded, we discard the dispatched context, and restore the
962 		 * interrupted context to a pristine condition, and therefore
963 		 * shouldn't be modified. We don't return to the caller in this
964 		 * case anyway.
965 		 */
966 		if (ret)
967 			SMC_RET1(handle, ret);
968 
969 		SMC_RET0(handle);
970 		break;
971 
972 	case SDEI_EVENT_STATUS:
973 		SDEI_LOG("> STAT(n:%d)\n", (int) x1);
974 		ret = sdei_event_status(x1);
975 		SDEI_LOG("< STAT:%ld\n", ret);
976 		SMC_RET1(handle, ret);
977 		break;
978 
979 	case SDEI_EVENT_GET_INFO:
980 		SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
981 		ret = sdei_event_get_info(x1, x2);
982 		SDEI_LOG("< INFO:%ld\n", ret);
983 		SMC_RET1(handle, ret);
984 		break;
985 
986 	case SDEI_EVENT_UNREGISTER:
987 		SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
988 		ret = sdei_event_unregister(x1);
989 		SDEI_LOG("< UNREG:%ld\n", ret);
990 		SMC_RET1(handle, ret);
991 		break;
992 
993 	case SDEI_PE_UNMASK:
994 		SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
995 		sdei_pe_unmask();
996 		SDEI_LOG("< UNMASK:%d\n", 0);
997 		SMC_RET1(handle, 0);
998 		break;
999 
1000 	case SDEI_PE_MASK:
1001 		SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
1002 		ret = sdei_pe_mask();
1003 		SDEI_LOG("< MASK:%ld\n", ret);
1004 		SMC_RET1(handle, ret);
1005 		break;
1006 
1007 	case SDEI_INTERRUPT_BIND:
1008 		SDEI_LOG("> BIND(%d)\n", (int) x1);
1009 		ret = sdei_interrupt_bind(x1);
1010 		SDEI_LOG("< BIND:%ld\n", ret);
1011 		SMC_RET1(handle, ret);
1012 		break;
1013 
1014 	case SDEI_INTERRUPT_RELEASE:
1015 		SDEI_LOG("> REL(%d)\n", (int) x1);
1016 		ret = sdei_interrupt_release(x1);
1017 		SDEI_LOG("< REL:%ld\n", ret);
1018 		SMC_RET1(handle, ret);
1019 		break;
1020 
1021 	case SDEI_SHARED_RESET:
1022 		SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
1023 		ret = sdei_shared_reset();
1024 		SDEI_LOG("< S_RESET:%ld\n", ret);
1025 		SMC_RET1(handle, ret);
1026 		break;
1027 
1028 	case SDEI_PRIVATE_RESET:
1029 		SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
1030 		ret = sdei_private_reset();
1031 		SDEI_LOG("< P_RESET:%ld\n", ret);
1032 		SMC_RET1(handle, ret);
1033 		break;
1034 
1035 	case SDEI_EVENT_ROUTING_SET:
1036 		SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
1037 		ret = sdei_event_routing_set(x1, x2, x3);
1038 		SDEI_LOG("< ROUTE_SET:%ld\n", ret);
1039 		SMC_RET1(handle, ret);
1040 		break;
1041 
1042 	case SDEI_FEATURES:
1043 		SDEI_LOG("> FTRS(f:%lx)\n", x1);
1044 		ret = sdei_features(x1);
1045 		SDEI_LOG("< FTRS:%lx\n", ret);
1046 		SMC_RET1(handle, ret);
1047 		break;
1048 
1049 	case SDEI_EVENT_SIGNAL:
1050 		SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
1051 		ret = sdei_signal(x1, x2);
1052 		SDEI_LOG("< SIGNAL:%ld\n", ret);
1053 		SMC_RET1(handle, ret);
1054 		break;
1055 	default:
1056 		break;
1057 	}
1058 
1059 	WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
1060 	SMC_RET1(handle, SMC_UNK);
1061 }
1062 
1063 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
1064 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
1065