xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision cf9346cb83804feb083b56a668eb0a462983e038)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30 
31 #include <platform_def.h>
32 
33 /* Declare the maximum number of SPs and El3 LPs. */
34 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
35 
36 /*
37  * Allocate a secure partition descriptor to describe each SP in the system that
38  * does not reside at EL3.
39  */
40 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
41 
42 /*
43  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
44  * the system that interacts with a SP. It is used to track the Hypervisor
45  * buffer pair, version and ID for now. It could be extended to track VM
46  * properties when the SPMC supports indirect messaging.
47  */
48 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
49 
50 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
51 					  uint32_t flags,
52 					  void *handle,
53 					  void *cookie);
54 
55 /*
56  * Helper function to obtain the array storing the EL3
57  * Logical Partition descriptors.
58  */
59 struct el3_lp_desc *get_el3_lp_array(void)
60 {
61 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
62 }
63 
64 /*
65  * Helper function to obtain the descriptor of the last SP to whom control was
66  * handed to on this physical cpu. Currently, we assume there is only one SP.
67  * TODO: Expand to track multiple partitions when required.
68  */
69 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
70 {
71 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
72 }
73 
74 /*
75  * Helper function to obtain the execution context of an SP on the
76  * current physical cpu.
77  */
78 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
79 {
80 	return &(sp->ec[get_ec_index(sp)]);
81 }
82 
83 /* Helper function to get pointer to SP context from its ID. */
84 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
85 {
86 	/* Check for Secure World Partitions. */
87 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
88 		if (sp_desc[i].sp_id == id) {
89 			return &(sp_desc[i]);
90 		}
91 	}
92 	return NULL;
93 }
94 
95 /*
96  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
97  * We assume that the first descriptor is reserved for this entity.
98  */
99 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
100 {
101 	return &(ns_ep_desc[0]);
102 }
103 
104 /*
105  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
106  * or OS kernel in the normal world or the last SP that was run.
107  */
108 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
109 {
110 	/* Obtain the RX/TX buffer pair descriptor. */
111 	if (secure_origin) {
112 		return &(spmc_get_current_sp_ctx()->mailbox);
113 	} else {
114 		return &(spmc_get_hyp_ctx()->mailbox);
115 	}
116 }
117 
118 /******************************************************************************
119  * This function returns to the place where spmc_sp_synchronous_entry() was
120  * called originally.
121  ******************************************************************************/
122 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
123 {
124 	/*
125 	 * The SPM must have initiated the original request through a
126 	 * synchronous entry into the secure partition. Jump back to the
127 	 * original C runtime context with the value of rc in x0;
128 	 */
129 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
130 
131 	panic();
132 }
133 
134 /*******************************************************************************
135  * Return FFA_ERROR with specified error code.
136  ******************************************************************************/
137 uint64_t spmc_ffa_error_return(void *handle, int error_code)
138 {
139 	SMC_RET8(handle, FFA_ERROR,
140 		 FFA_TARGET_INFO_MBZ, error_code,
141 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144 
145 /******************************************************************************
146  * Helper function to validate a secure partition ID to ensure it does not
147  * conflict with any other FF-A component and follows the convention to
148  * indicate it resides within the secure world.
149  ******************************************************************************/
150 bool is_ffa_secure_id_valid(uint16_t partition_id)
151 {
152 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
153 
154 	/* Ensure the ID is not the invalid partition ID. */
155 	if (partition_id == INV_SP_ID) {
156 		return false;
157 	}
158 
159 	/* Ensure the ID is not the SPMD ID. */
160 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
161 		return false;
162 	}
163 
164 	/*
165 	 * Ensure the ID follows the convention to indicate it resides
166 	 * in the secure world.
167 	 */
168 	if (!ffa_is_secure_world_id(partition_id)) {
169 		return false;
170 	}
171 
172 	/* Ensure we don't conflict with the SPMC partition ID. */
173 	if (partition_id == FFA_SPMC_ID) {
174 		return false;
175 	}
176 
177 	/* Ensure we do not already have an SP context with this ID. */
178 	if (spmc_get_sp_ctx(partition_id)) {
179 		return false;
180 	}
181 
182 	/* Ensure we don't clash with any Logical SP's. */
183 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
184 		if (el3_lp_descs[i].sp_id == partition_id) {
185 			return false;
186 		}
187 	}
188 
189 	return true;
190 }
191 
192 /*******************************************************************************
193  * This function either forwards the request to the other world or returns
194  * with an ERET depending on the source of the call.
195  * We can assume that the destination is for an entity at a lower exception
196  * level as any messages destined for a logical SP resident in EL3 will have
197  * already been taken care of by the SPMC before entering this function.
198  ******************************************************************************/
199 static uint64_t spmc_smc_return(uint32_t smc_fid,
200 				bool secure_origin,
201 				uint64_t x1,
202 				uint64_t x2,
203 				uint64_t x3,
204 				uint64_t x4,
205 				void *handle,
206 				void *cookie,
207 				uint64_t flags,
208 				uint16_t dst_id)
209 {
210 	/* If the destination is in the normal world always go via the SPMD. */
211 	if (ffa_is_normal_world_id(dst_id)) {
212 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
213 					cookie, handle, flags);
214 	}
215 	/*
216 	 * If the caller is secure and we want to return to the secure world,
217 	 * ERET directly.
218 	 */
219 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
220 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
221 	}
222 	/* If we originated in the normal world then switch contexts. */
223 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
224 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
225 					     x3, x4, handle);
226 	} else {
227 		/* Unknown State. */
228 		panic();
229 	}
230 
231 	/* Shouldn't be Reached. */
232 	return 0;
233 }
234 
235 /*******************************************************************************
236  * FF-A ABI Handlers.
237  ******************************************************************************/
238 
239 /*******************************************************************************
240  * Helper function to validate arg2 as part of a direct message.
241  ******************************************************************************/
242 static inline bool direct_msg_validate_arg2(uint64_t x2)
243 {
244 	/* Check message type. */
245 	if (x2 & FFA_FWK_MSG_BIT) {
246 		/* We have a framework message, ensure it is a known message. */
247 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
248 			VERBOSE("Invalid message format 0x%lx.\n", x2);
249 			return false;
250 		}
251 	} else {
252 		/* We have a partition messages, ensure x2 is not set. */
253 		if (x2 != (uint64_t) 0) {
254 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
255 				x2);
256 			return false;
257 		}
258 	}
259 	return true;
260 }
261 
262 /*******************************************************************************
263  * Handle direct request messages and route to the appropriate destination.
264  ******************************************************************************/
265 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
266 				       bool secure_origin,
267 				       uint64_t x1,
268 				       uint64_t x2,
269 				       uint64_t x3,
270 				       uint64_t x4,
271 				       void *cookie,
272 				       void *handle,
273 				       uint64_t flags)
274 {
275 	uint16_t dst_id = ffa_endpoint_destination(x1);
276 	struct el3_lp_desc *el3_lp_descs;
277 	struct secure_partition_desc *sp;
278 	unsigned int idx;
279 
280 	/* Check if arg2 has been populated correctly based on message type. */
281 	if (!direct_msg_validate_arg2(x2)) {
282 		return spmc_ffa_error_return(handle,
283 					     FFA_ERROR_INVALID_PARAMETER);
284 	}
285 
286 	el3_lp_descs = get_el3_lp_array();
287 
288 	/* Check if the request is destined for a Logical Partition. */
289 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
290 		if (el3_lp_descs[i].sp_id == dst_id) {
291 			return el3_lp_descs[i].direct_req(
292 					smc_fid, secure_origin, x1, x2, x3, x4,
293 					cookie, handle, flags);
294 		}
295 	}
296 
297 	/*
298 	 * If the request was not targeted to a LSP and from the secure world
299 	 * then it is invalid since a SP cannot call into the Normal world and
300 	 * there is no other SP to call into. If there are other SPs in future
301 	 * then the partition runtime model would need to be validated as well.
302 	 */
303 	if (secure_origin) {
304 		VERBOSE("Direct request not supported to the Normal World.\n");
305 		return spmc_ffa_error_return(handle,
306 					     FFA_ERROR_INVALID_PARAMETER);
307 	}
308 
309 	/* Check if the SP ID is valid. */
310 	sp = spmc_get_sp_ctx(dst_id);
311 	if (sp == NULL) {
312 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
313 			dst_id);
314 		return spmc_ffa_error_return(handle,
315 					     FFA_ERROR_INVALID_PARAMETER);
316 	}
317 
318 	/*
319 	 * Check that the target execution context is in a waiting state before
320 	 * forwarding the direct request to it.
321 	 */
322 	idx = get_ec_index(sp);
323 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
324 		VERBOSE("SP context on core%u is not waiting (%u).\n",
325 			idx, sp->ec[idx].rt_model);
326 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
327 	}
328 
329 	/*
330 	 * Everything checks out so forward the request to the SP after updating
331 	 * its state and runtime model.
332 	 */
333 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
334 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
335 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
336 			       handle, cookie, flags, dst_id);
337 }
338 
339 /*******************************************************************************
340  * Handle direct response messages and route to the appropriate destination.
341  ******************************************************************************/
342 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
343 					bool secure_origin,
344 					uint64_t x1,
345 					uint64_t x2,
346 					uint64_t x3,
347 					uint64_t x4,
348 					void *cookie,
349 					void *handle,
350 					uint64_t flags)
351 {
352 	uint16_t dst_id = ffa_endpoint_destination(x1);
353 	struct secure_partition_desc *sp;
354 	unsigned int idx;
355 
356 	/* Check if arg2 has been populated correctly based on message type. */
357 	if (!direct_msg_validate_arg2(x2)) {
358 		return spmc_ffa_error_return(handle,
359 					     FFA_ERROR_INVALID_PARAMETER);
360 	}
361 
362 	/* Check that the response did not originate from the Normal world. */
363 	if (!secure_origin) {
364 		VERBOSE("Direct Response not supported from Normal World.\n");
365 		return spmc_ffa_error_return(handle,
366 					     FFA_ERROR_INVALID_PARAMETER);
367 	}
368 
369 	/*
370 	 * Check that the response is either targeted to the Normal world or the
371 	 * SPMC e.g. a PM response.
372 	 */
373 	if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
374 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
375 			dst_id);
376 		return spmc_ffa_error_return(handle,
377 					     FFA_ERROR_INVALID_PARAMETER);
378 	}
379 
380 	/* Obtain the SP descriptor and update its runtime state. */
381 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
382 	if (sp == NULL) {
383 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
384 			dst_id);
385 		return spmc_ffa_error_return(handle,
386 					     FFA_ERROR_INVALID_PARAMETER);
387 	}
388 
389 	/* Sanity check state is being tracked correctly in the SPMC. */
390 	idx = get_ec_index(sp);
391 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
392 
393 	/* Ensure SP execution context was in the right runtime model. */
394 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
395 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
396 			idx, sp->ec[idx].rt_model);
397 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
398 	}
399 
400 	/* Update the state of the SP execution context. */
401 	sp->ec[idx].rt_state = RT_STATE_WAITING;
402 
403 	/*
404 	 * If the receiver is not the SPMC then forward the response to the
405 	 * Normal world.
406 	 */
407 	if (dst_id == FFA_SPMC_ID) {
408 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
409 		/* Should not get here. */
410 		panic();
411 	}
412 
413 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
414 			       handle, cookie, flags, dst_id);
415 }
416 
417 /*******************************************************************************
418  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
419  * cycles.
420  ******************************************************************************/
421 static uint64_t msg_wait_handler(uint32_t smc_fid,
422 				 bool secure_origin,
423 				 uint64_t x1,
424 				 uint64_t x2,
425 				 uint64_t x3,
426 				 uint64_t x4,
427 				 void *cookie,
428 				 void *handle,
429 				 uint64_t flags)
430 {
431 	struct secure_partition_desc *sp;
432 	unsigned int idx;
433 
434 	/*
435 	 * Check that the response did not originate from the Normal world as
436 	 * only the secure world can call this ABI.
437 	 */
438 	if (!secure_origin) {
439 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
440 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
441 	}
442 
443 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
444 	sp = spmc_get_current_sp_ctx();
445 	if (sp == NULL) {
446 		return spmc_ffa_error_return(handle,
447 					     FFA_ERROR_INVALID_PARAMETER);
448 	}
449 
450 	/*
451 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
452 	 */
453 	idx = get_ec_index(sp);
454 
455 	/* Ensure SP execution context was in the right runtime model. */
456 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
457 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
458 	}
459 
460 	/* Sanity check the state is being tracked correctly in the SPMC. */
461 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
462 
463 	/*
464 	 * Perform a synchronous exit if the partition was initialising. The
465 	 * state is updated after the exit.
466 	 */
467 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
468 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
469 		/* Should not get here */
470 		panic();
471 	}
472 
473 	/* Update the state of the SP execution context. */
474 	sp->ec[idx].rt_state = RT_STATE_WAITING;
475 
476 	/* Resume normal world if a secure interrupt was handled. */
477 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
478 		/* FFA_MSG_WAIT can only be called from the secure world. */
479 		unsigned int secure_state_in = SECURE;
480 		unsigned int secure_state_out = NON_SECURE;
481 
482 		cm_el1_sysregs_context_save(secure_state_in);
483 		cm_el1_sysregs_context_restore(secure_state_out);
484 		cm_set_next_eret_context(secure_state_out);
485 		SMC_RET0(cm_get_context(secure_state_out));
486 	}
487 
488 	/* Forward the response to the Normal world. */
489 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
490 			       handle, cookie, flags, FFA_NWD_ID);
491 }
492 
493 static uint64_t ffa_error_handler(uint32_t smc_fid,
494 				 bool secure_origin,
495 				 uint64_t x1,
496 				 uint64_t x2,
497 				 uint64_t x3,
498 				 uint64_t x4,
499 				 void *cookie,
500 				 void *handle,
501 				 uint64_t flags)
502 {
503 	struct secure_partition_desc *sp;
504 	unsigned int idx;
505 
506 	/* Check that the response did not originate from the Normal world. */
507 	if (!secure_origin) {
508 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
509 	}
510 
511 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
512 	sp = spmc_get_current_sp_ctx();
513 	if (sp == NULL) {
514 		return spmc_ffa_error_return(handle,
515 					     FFA_ERROR_INVALID_PARAMETER);
516 	}
517 
518 	/* Get the execution context of the SP that invoked FFA_ERROR. */
519 	idx = get_ec_index(sp);
520 
521 	/*
522 	 * We only expect FFA_ERROR to be received during SP initialisation
523 	 * otherwise this is an invalid call.
524 	 */
525 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
526 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
527 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
528 		/* Should not get here. */
529 		panic();
530 	}
531 
532 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
533 }
534 
535 static uint64_t ffa_version_handler(uint32_t smc_fid,
536 				    bool secure_origin,
537 				    uint64_t x1,
538 				    uint64_t x2,
539 				    uint64_t x3,
540 				    uint64_t x4,
541 				    void *cookie,
542 				    void *handle,
543 				    uint64_t flags)
544 {
545 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
546 
547 	if (requested_version & FFA_VERSION_BIT31_MASK) {
548 		/* Invalid encoding, return an error. */
549 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
550 		/* Execution stops here. */
551 	}
552 
553 	/* Determine the caller to store the requested version. */
554 	if (secure_origin) {
555 		/*
556 		 * Ensure that the SP is reporting the same version as
557 		 * specified in its manifest. If these do not match there is
558 		 * something wrong with the SP.
559 		 * TODO: Should we abort the SP? For now assert this is not
560 		 *       case.
561 		 */
562 		assert(requested_version ==
563 		       spmc_get_current_sp_ctx()->ffa_version);
564 	} else {
565 		/*
566 		 * If this is called by the normal world, record this
567 		 * information in its descriptor.
568 		 */
569 		spmc_get_hyp_ctx()->ffa_version = requested_version;
570 	}
571 
572 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
573 					  FFA_VERSION_MINOR));
574 }
575 
576 /*******************************************************************************
577  * Helper function to obtain the FF-A version of the calling partition.
578  ******************************************************************************/
579 uint32_t get_partition_ffa_version(bool secure_origin)
580 {
581 	if (secure_origin) {
582 		return spmc_get_current_sp_ctx()->ffa_version;
583 	} else {
584 		return spmc_get_hyp_ctx()->ffa_version;
585 	}
586 }
587 
588 static uint64_t rxtx_map_handler(uint32_t smc_fid,
589 				 bool secure_origin,
590 				 uint64_t x1,
591 				 uint64_t x2,
592 				 uint64_t x3,
593 				 uint64_t x4,
594 				 void *cookie,
595 				 void *handle,
596 				 uint64_t flags)
597 {
598 	int ret;
599 	uint32_t error_code;
600 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
601 	struct mailbox *mbox;
602 	uintptr_t tx_address = x1;
603 	uintptr_t rx_address = x2;
604 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
605 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
606 
607 	/*
608 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
609 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
610 	 * ABI on behalf of a VM and reject it if this is the case.
611 	 */
612 	if (tx_address == 0 || rx_address == 0) {
613 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
614 		return spmc_ffa_error_return(handle,
615 					     FFA_ERROR_INVALID_PARAMETER);
616 	}
617 
618 	/* Ensure the specified buffers are not the same. */
619 	if (tx_address == rx_address) {
620 		WARN("TX Buffer must not be the same as RX Buffer.\n");
621 		return spmc_ffa_error_return(handle,
622 					     FFA_ERROR_INVALID_PARAMETER);
623 	}
624 
625 	/* Ensure the buffer size is not 0. */
626 	if (buf_size == 0U) {
627 		WARN("Buffer size must not be 0\n");
628 		return spmc_ffa_error_return(handle,
629 					     FFA_ERROR_INVALID_PARAMETER);
630 	}
631 
632 	/*
633 	 * Ensure the buffer size is a multiple of the translation granule size
634 	 * in TF-A.
635 	 */
636 	if (buf_size % PAGE_SIZE != 0U) {
637 		WARN("Buffer size must be aligned to translation granule.\n");
638 		return spmc_ffa_error_return(handle,
639 					     FFA_ERROR_INVALID_PARAMETER);
640 	}
641 
642 	/* Obtain the RX/TX buffer pair descriptor. */
643 	mbox = spmc_get_mbox_desc(secure_origin);
644 
645 	spin_lock(&mbox->lock);
646 
647 	/* Check if buffers have already been mapped. */
648 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
649 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
650 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
651 		error_code = FFA_ERROR_DENIED;
652 		goto err;
653 	}
654 
655 	/* memmap the TX buffer as read only. */
656 	ret = mmap_add_dynamic_region(tx_address, /* PA */
657 			tx_address, /* VA */
658 			buf_size, /* size */
659 			mem_atts | MT_RO_DATA); /* attrs */
660 	if (ret != 0) {
661 		/* Return the correct error code. */
662 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
663 						FFA_ERROR_INVALID_PARAMETER;
664 		WARN("Unable to map TX buffer: %d\n", error_code);
665 		goto err;
666 	}
667 
668 	/* memmap the RX buffer as read write. */
669 	ret = mmap_add_dynamic_region(rx_address, /* PA */
670 			rx_address, /* VA */
671 			buf_size, /* size */
672 			mem_atts | MT_RW_DATA); /* attrs */
673 
674 	if (ret != 0) {
675 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
676 						FFA_ERROR_INVALID_PARAMETER;
677 		WARN("Unable to map RX buffer: %d\n", error_code);
678 		/* Unmap the TX buffer again. */
679 		mmap_remove_dynamic_region(tx_address, buf_size);
680 		goto err;
681 	}
682 
683 	mbox->tx_buffer = (void *) tx_address;
684 	mbox->rx_buffer = (void *) rx_address;
685 	mbox->rxtx_page_count = page_count;
686 	spin_unlock(&mbox->lock);
687 
688 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
689 	/* Execution stops here. */
690 err:
691 	spin_unlock(&mbox->lock);
692 	return spmc_ffa_error_return(handle, error_code);
693 }
694 
695 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
696 				   bool secure_origin,
697 				   uint64_t x1,
698 				   uint64_t x2,
699 				   uint64_t x3,
700 				   uint64_t x4,
701 				   void *cookie,
702 				   void *handle,
703 				   uint64_t flags)
704 {
705 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
706 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
707 
708 	/*
709 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
710 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
711 	 * ABI on behalf of a VM and reject it if this is the case.
712 	 */
713 	if (x1 != 0UL) {
714 		return spmc_ffa_error_return(handle,
715 					     FFA_ERROR_INVALID_PARAMETER);
716 	}
717 
718 	spin_lock(&mbox->lock);
719 
720 	/* Check if buffers are currently mapped. */
721 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
722 		spin_unlock(&mbox->lock);
723 		return spmc_ffa_error_return(handle,
724 					     FFA_ERROR_INVALID_PARAMETER);
725 	}
726 
727 	/* Unmap RX Buffer */
728 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
729 				       buf_size) != 0) {
730 		WARN("Unable to unmap RX buffer!\n");
731 	}
732 
733 	mbox->rx_buffer = 0;
734 
735 	/* Unmap TX Buffer */
736 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
737 				       buf_size) != 0) {
738 		WARN("Unable to unmap TX buffer!\n");
739 	}
740 
741 	mbox->tx_buffer = 0;
742 	mbox->rxtx_page_count = 0;
743 
744 	spin_unlock(&mbox->lock);
745 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
746 }
747 
748 /*
749  * Helper function to populate the properties field of a Partition Info Get
750  * descriptor.
751  */
752 static uint32_t
753 partition_info_get_populate_properties(uint32_t sp_properties,
754 				       enum sp_execution_state sp_ec_state)
755 {
756 	uint32_t properties = sp_properties;
757 	uint32_t ec_state;
758 
759 	/* Determine the execution state of the SP. */
760 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
761 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
762 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
763 
764 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
765 
766 	return properties;
767 }
768 
769 /*
770  * Collate the partition information in a v1.1 partition information
771  * descriptor format, this will be converter later if required.
772  */
773 static int partition_info_get_handler_v1_1(uint32_t *uuid,
774 					   struct ffa_partition_info_v1_1
775 						  *partitions,
776 					   uint32_t max_partitions,
777 					   uint32_t *partition_count)
778 {
779 	uint32_t index;
780 	struct ffa_partition_info_v1_1 *desc;
781 	bool null_uuid = is_null_uuid(uuid);
782 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
783 
784 	/* Deal with Logical Partitions. */
785 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
786 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
787 			/* Found a matching UUID, populate appropriately. */
788 			if (*partition_count >= max_partitions) {
789 				return FFA_ERROR_NO_MEMORY;
790 			}
791 
792 			desc = &partitions[*partition_count];
793 			desc->ep_id = el3_lp_descs[index].sp_id;
794 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
795 			/* LSPs must be AArch64. */
796 			desc->properties =
797 				partition_info_get_populate_properties(
798 					el3_lp_descs[index].properties,
799 					SP_STATE_AARCH64);
800 
801 			if (null_uuid) {
802 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
803 			}
804 			(*partition_count)++;
805 		}
806 	}
807 
808 	/* Deal with physical SP's. */
809 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
810 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
811 			/* Found a matching UUID, populate appropriately. */
812 			if (*partition_count >= max_partitions) {
813 				return FFA_ERROR_NO_MEMORY;
814 			}
815 
816 			desc = &partitions[*partition_count];
817 			desc->ep_id = sp_desc[index].sp_id;
818 			/*
819 			 * Execution context count must match No. cores for
820 			 * S-EL1 SPs.
821 			 */
822 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
823 			desc->properties =
824 				partition_info_get_populate_properties(
825 					sp_desc[index].properties,
826 					sp_desc[index].execution_state);
827 
828 			if (null_uuid) {
829 				copy_uuid(desc->uuid, sp_desc[index].uuid);
830 			}
831 			(*partition_count)++;
832 		}
833 	}
834 	return 0;
835 }
836 
837 /*
838  * Handle the case where that caller only wants the count of partitions
839  * matching a given UUID and does not want the corresponding descriptors
840  * populated.
841  */
842 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
843 {
844 	uint32_t index = 0;
845 	uint32_t partition_count = 0;
846 	bool null_uuid = is_null_uuid(uuid);
847 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
848 
849 	/* Deal with Logical Partitions. */
850 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
851 		if (null_uuid ||
852 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
853 			(partition_count)++;
854 		}
855 	}
856 
857 	/* Deal with physical SP's. */
858 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
859 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
860 			(partition_count)++;
861 		}
862 	}
863 	return partition_count;
864 }
865 
866 /*
867  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
868  * the corresponding descriptor format from the v1.1 descriptor array.
869  */
870 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
871 					     *partitions,
872 					     struct mailbox *mbox,
873 					     int partition_count)
874 {
875 	uint32_t index;
876 	uint32_t buf_size;
877 	uint32_t descriptor_size;
878 	struct ffa_partition_info_v1_0 *v1_0_partitions =
879 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
880 
881 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
882 	descriptor_size = partition_count *
883 			  sizeof(struct ffa_partition_info_v1_0);
884 
885 	if (descriptor_size > buf_size) {
886 		return FFA_ERROR_NO_MEMORY;
887 	}
888 
889 	for (index = 0U; index < partition_count; index++) {
890 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
891 		v1_0_partitions[index].execution_ctx_count =
892 			partitions[index].execution_ctx_count;
893 		/* Only report v1.0 properties. */
894 		v1_0_partitions[index].properties =
895 			(partitions[index].properties &
896 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
897 	}
898 	return 0;
899 }
900 
901 /*
902  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
903  * v1.0 implementations.
904  */
905 static uint64_t partition_info_get_handler(uint32_t smc_fid,
906 					   bool secure_origin,
907 					   uint64_t x1,
908 					   uint64_t x2,
909 					   uint64_t x3,
910 					   uint64_t x4,
911 					   void *cookie,
912 					   void *handle,
913 					   uint64_t flags)
914 {
915 	int ret;
916 	uint32_t partition_count = 0;
917 	uint32_t size = 0;
918 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
919 	struct mailbox *mbox;
920 	uint64_t info_get_flags;
921 	bool count_only;
922 	uint32_t uuid[4];
923 
924 	uuid[0] = x1;
925 	uuid[1] = x2;
926 	uuid[2] = x3;
927 	uuid[3] = x4;
928 
929 	/* Determine if the Partition descriptors should be populated. */
930 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
931 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
932 
933 	/* Handle the case where we don't need to populate the descriptors. */
934 	if (count_only) {
935 		partition_count = partition_info_get_handler_count_only(uuid);
936 		if (partition_count == 0) {
937 			return spmc_ffa_error_return(handle,
938 						FFA_ERROR_INVALID_PARAMETER);
939 		}
940 	} else {
941 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
942 
943 		/*
944 		 * Handle the case where the partition descriptors are required,
945 		 * check we have the buffers available and populate the
946 		 * appropriate structure version.
947 		 */
948 
949 		/* Obtain the v1.1 format of the descriptors. */
950 		ret = partition_info_get_handler_v1_1(uuid, partitions,
951 						      MAX_SP_LP_PARTITIONS,
952 						      &partition_count);
953 
954 		/* Check if an error occurred during discovery. */
955 		if (ret != 0) {
956 			goto err;
957 		}
958 
959 		/* If we didn't find any matches the UUID is unknown. */
960 		if (partition_count == 0) {
961 			ret = FFA_ERROR_INVALID_PARAMETER;
962 			goto err;
963 		}
964 
965 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
966 		mbox = spmc_get_mbox_desc(secure_origin);
967 
968 		/*
969 		 * If the caller has not bothered registering its RX/TX pair
970 		 * then return an error code.
971 		 */
972 		spin_lock(&mbox->lock);
973 		if (mbox->rx_buffer == NULL) {
974 			ret = FFA_ERROR_BUSY;
975 			goto err_unlock;
976 		}
977 
978 		/* Ensure the RX buffer is currently free. */
979 		if (mbox->state != MAILBOX_STATE_EMPTY) {
980 			ret = FFA_ERROR_BUSY;
981 			goto err_unlock;
982 		}
983 
984 		/* Zero the RX buffer before populating. */
985 		(void)memset(mbox->rx_buffer, 0,
986 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
987 
988 		/*
989 		 * Depending on the FF-A version of the requesting partition
990 		 * we may need to convert to a v1.0 format otherwise we can copy
991 		 * directly.
992 		 */
993 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
994 			ret = partition_info_populate_v1_0(partitions,
995 							   mbox,
996 							   partition_count);
997 			if (ret != 0) {
998 				goto err_unlock;
999 			}
1000 		} else {
1001 			uint32_t buf_size = mbox->rxtx_page_count *
1002 					    FFA_PAGE_SIZE;
1003 
1004 			/* Ensure the descriptor will fit in the buffer. */
1005 			size = sizeof(struct ffa_partition_info_v1_1);
1006 			if (partition_count * size  > buf_size) {
1007 				ret = FFA_ERROR_NO_MEMORY;
1008 				goto err_unlock;
1009 			}
1010 			memcpy(mbox->rx_buffer, partitions,
1011 			       partition_count * size);
1012 		}
1013 
1014 		mbox->state = MAILBOX_STATE_FULL;
1015 		spin_unlock(&mbox->lock);
1016 	}
1017 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1018 
1019 err_unlock:
1020 	spin_unlock(&mbox->lock);
1021 err:
1022 	return spmc_ffa_error_return(handle, ret);
1023 }
1024 
1025 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1026 {
1027 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1028 }
1029 
1030 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1031 					      uint32_t input_properties,
1032 					      void *handle)
1033 {
1034 	/*
1035 	 * If we're called by the normal world we don't support any
1036 	 * additional features.
1037 	 */
1038 	if (!secure_origin) {
1039 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1040 			return spmc_ffa_error_return(handle,
1041 						     FFA_ERROR_NOT_SUPPORTED);
1042 		}
1043 
1044 	} else {
1045 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1046 		/*
1047 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1048 		 * call. If v1.0 check and store whether the SP has requested
1049 		 * the use of the NS bit.
1050 		 */
1051 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1052 			if ((input_properties &
1053 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1054 				return spmc_ffa_error_return(handle,
1055 						       FFA_ERROR_NOT_SUPPORTED);
1056 			}
1057 			return ffa_feature_success(handle,
1058 						   FFA_FEATURES_RET_REQ_NS_BIT);
1059 		} else {
1060 			sp->ns_bit_requested = (input_properties &
1061 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1062 					       0U;
1063 		}
1064 		if (sp->ns_bit_requested) {
1065 			return ffa_feature_success(handle,
1066 						   FFA_FEATURES_RET_REQ_NS_BIT);
1067 		}
1068 	}
1069 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1070 }
1071 
1072 static uint64_t ffa_features_handler(uint32_t smc_fid,
1073 				     bool secure_origin,
1074 				     uint64_t x1,
1075 				     uint64_t x2,
1076 				     uint64_t x3,
1077 				     uint64_t x4,
1078 				     void *cookie,
1079 				     void *handle,
1080 				     uint64_t flags)
1081 {
1082 	uint32_t function_id = (uint32_t) x1;
1083 	uint32_t input_properties = (uint32_t) x2;
1084 
1085 	/* Check if a Feature ID was requested. */
1086 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1087 		/* We currently don't support any additional features. */
1088 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1089 	}
1090 
1091 	/*
1092 	 * Handle the cases where we have separate handlers due to additional
1093 	 * properties.
1094 	 */
1095 	switch (function_id) {
1096 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1097 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1098 		return ffa_features_retrieve_request(secure_origin,
1099 						     input_properties,
1100 						     handle);
1101 	}
1102 
1103 	/*
1104 	 * We don't currently support additional input properties for these
1105 	 * other ABIs therefore ensure this value is set to 0.
1106 	 */
1107 	if (input_properties != 0U) {
1108 		return spmc_ffa_error_return(handle,
1109 					     FFA_ERROR_NOT_SUPPORTED);
1110 	}
1111 
1112 	/* Report if any other FF-A ABI is supported. */
1113 	switch (function_id) {
1114 	/* Supported features from both worlds. */
1115 	case FFA_ERROR:
1116 	case FFA_SUCCESS_SMC32:
1117 	case FFA_INTERRUPT:
1118 	case FFA_SPM_ID_GET:
1119 	case FFA_ID_GET:
1120 	case FFA_FEATURES:
1121 	case FFA_VERSION:
1122 	case FFA_RX_RELEASE:
1123 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1124 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1125 	case FFA_PARTITION_INFO_GET:
1126 	case FFA_RXTX_MAP_SMC32:
1127 	case FFA_RXTX_MAP_SMC64:
1128 	case FFA_RXTX_UNMAP:
1129 	case FFA_MEM_FRAG_TX:
1130 	case FFA_MSG_RUN:
1131 
1132 		/*
1133 		 * We are relying on the fact that the other registers
1134 		 * will be set to 0 as these values align with the
1135 		 * currently implemented features of the SPMC. If this
1136 		 * changes this function must be extended to handle
1137 		 * reporting the additional functionality.
1138 		 */
1139 
1140 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1141 		/* Execution stops here. */
1142 
1143 	/* Supported ABIs only from the secure world. */
1144 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1145 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1146 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1147 	case FFA_MEM_RELINQUISH:
1148 	case FFA_MSG_WAIT:
1149 
1150 		if (!secure_origin) {
1151 			return spmc_ffa_error_return(handle,
1152 				FFA_ERROR_NOT_SUPPORTED);
1153 		}
1154 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1155 		/* Execution stops here. */
1156 
1157 	/* Supported features only from the normal world. */
1158 	case FFA_MEM_SHARE_SMC32:
1159 	case FFA_MEM_SHARE_SMC64:
1160 	case FFA_MEM_LEND_SMC32:
1161 	case FFA_MEM_LEND_SMC64:
1162 	case FFA_MEM_RECLAIM:
1163 	case FFA_MEM_FRAG_RX:
1164 
1165 		if (secure_origin) {
1166 			return spmc_ffa_error_return(handle,
1167 					FFA_ERROR_NOT_SUPPORTED);
1168 		}
1169 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1170 		/* Execution stops here. */
1171 
1172 	default:
1173 		return spmc_ffa_error_return(handle,
1174 					FFA_ERROR_NOT_SUPPORTED);
1175 	}
1176 }
1177 
1178 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1179 				   bool secure_origin,
1180 				   uint64_t x1,
1181 				   uint64_t x2,
1182 				   uint64_t x3,
1183 				   uint64_t x4,
1184 				   void *cookie,
1185 				   void *handle,
1186 				   uint64_t flags)
1187 {
1188 	if (secure_origin) {
1189 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1190 			 spmc_get_current_sp_ctx()->sp_id);
1191 	} else {
1192 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1193 			 spmc_get_hyp_ctx()->ns_ep_id);
1194 	}
1195 }
1196 
1197 /*
1198  * Enable an SP to query the ID assigned to the SPMC.
1199  */
1200 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1201 				       bool secure_origin,
1202 				       uint64_t x1,
1203 				       uint64_t x2,
1204 				       uint64_t x3,
1205 				       uint64_t x4,
1206 				       void *cookie,
1207 				       void *handle,
1208 				       uint64_t flags)
1209 {
1210 	assert(x1 == 0UL);
1211 	assert(x2 == 0UL);
1212 	assert(x3 == 0UL);
1213 	assert(x4 == 0UL);
1214 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1215 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1216 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1217 
1218 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1219 }
1220 
1221 static uint64_t ffa_run_handler(uint32_t smc_fid,
1222 				bool secure_origin,
1223 				uint64_t x1,
1224 				uint64_t x2,
1225 				uint64_t x3,
1226 				uint64_t x4,
1227 				void *cookie,
1228 				void *handle,
1229 				uint64_t flags)
1230 {
1231 	struct secure_partition_desc *sp;
1232 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1233 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1234 	unsigned int idx;
1235 	unsigned int *rt_state;
1236 	unsigned int *rt_model;
1237 
1238 	/* Can only be called from the normal world. */
1239 	if (secure_origin) {
1240 		ERROR("FFA_RUN can only be called from NWd.\n");
1241 		return spmc_ffa_error_return(handle,
1242 					     FFA_ERROR_INVALID_PARAMETER);
1243 	}
1244 
1245 	/* Cannot run a Normal world partition. */
1246 	if (ffa_is_normal_world_id(target_id)) {
1247 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1248 		return spmc_ffa_error_return(handle,
1249 					     FFA_ERROR_INVALID_PARAMETER);
1250 	}
1251 
1252 	/* Check that the target SP exists. */
1253 	sp = spmc_get_sp_ctx(target_id);
1254 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1255 	if (sp == NULL) {
1256 		return spmc_ffa_error_return(handle,
1257 					     FFA_ERROR_INVALID_PARAMETER);
1258 	}
1259 
1260 	idx = get_ec_index(sp);
1261 	if (idx != vcpu_id) {
1262 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1263 		return spmc_ffa_error_return(handle,
1264 					     FFA_ERROR_INVALID_PARAMETER);
1265 	}
1266 	rt_state = &((sp->ec[idx]).rt_state);
1267 	rt_model = &((sp->ec[idx]).rt_model);
1268 	if (*rt_state == RT_STATE_RUNNING) {
1269 		ERROR("Partition (0x%x) is already running.\n", target_id);
1270 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1271 	}
1272 
1273 	/*
1274 	 * Sanity check that if the execution context was not waiting then it
1275 	 * was either in the direct request or the run partition runtime model.
1276 	 */
1277 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1278 		assert(*rt_model == RT_MODEL_RUN ||
1279 		       *rt_model == RT_MODEL_DIR_REQ);
1280 	}
1281 
1282 	/*
1283 	 * If the context was waiting then update the partition runtime model.
1284 	 */
1285 	if (*rt_state == RT_STATE_WAITING) {
1286 		*rt_model = RT_MODEL_RUN;
1287 	}
1288 
1289 	/*
1290 	 * Forward the request to the correct SP vCPU after updating
1291 	 * its state.
1292 	 */
1293 	*rt_state = RT_STATE_RUNNING;
1294 
1295 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1296 			       handle, cookie, flags, target_id);
1297 }
1298 
1299 static uint64_t rx_release_handler(uint32_t smc_fid,
1300 				   bool secure_origin,
1301 				   uint64_t x1,
1302 				   uint64_t x2,
1303 				   uint64_t x3,
1304 				   uint64_t x4,
1305 				   void *cookie,
1306 				   void *handle,
1307 				   uint64_t flags)
1308 {
1309 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1310 
1311 	spin_lock(&mbox->lock);
1312 
1313 	if (mbox->state != MAILBOX_STATE_FULL) {
1314 		spin_unlock(&mbox->lock);
1315 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1316 	}
1317 
1318 	mbox->state = MAILBOX_STATE_EMPTY;
1319 	spin_unlock(&mbox->lock);
1320 
1321 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1322 }
1323 
1324 /*
1325  * Perform initial validation on the provided secondary entry point.
1326  * For now ensure it does not lie within the BL31 Image or the SP's
1327  * RX/TX buffers as these are mapped within EL3.
1328  * TODO: perform validation for additional invalid memory regions.
1329  */
1330 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1331 {
1332 	struct mailbox *mb;
1333 	uintptr_t buffer_size;
1334 	uintptr_t sp_rx_buffer;
1335 	uintptr_t sp_tx_buffer;
1336 	uintptr_t sp_rx_buffer_limit;
1337 	uintptr_t sp_tx_buffer_limit;
1338 
1339 	mb = &sp->mailbox;
1340 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1341 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1342 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1343 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1344 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1345 
1346 	/*
1347 	 * Check if the entry point lies within BL31, or the
1348 	 * SP's RX or TX buffer.
1349 	 */
1350 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1351 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1352 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1353 		return -EINVAL;
1354 	}
1355 	return 0;
1356 }
1357 
1358 /*******************************************************************************
1359  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1360  *  register an entry point for initialization during a secondary cold boot.
1361  ******************************************************************************/
1362 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1363 					    bool secure_origin,
1364 					    uint64_t x1,
1365 					    uint64_t x2,
1366 					    uint64_t x3,
1367 					    uint64_t x4,
1368 					    void *cookie,
1369 					    void *handle,
1370 					    uint64_t flags)
1371 {
1372 	struct secure_partition_desc *sp;
1373 	struct sp_exec_ctx *sp_ctx;
1374 
1375 	/* This request cannot originate from the Normal world. */
1376 	if (!secure_origin) {
1377 		WARN("%s: Can only be called from SWd.\n", __func__);
1378 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1379 	}
1380 
1381 	/* Get the context of the current SP. */
1382 	sp = spmc_get_current_sp_ctx();
1383 	if (sp == NULL) {
1384 		WARN("%s: Cannot find SP context.\n", __func__);
1385 		return spmc_ffa_error_return(handle,
1386 					     FFA_ERROR_INVALID_PARAMETER);
1387 	}
1388 
1389 	/* Only an S-EL1 SP should be invoking this ABI. */
1390 	if (sp->runtime_el != S_EL1) {
1391 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1392 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1393 	}
1394 
1395 	/* Ensure the SP is in its initialization state. */
1396 	sp_ctx = spmc_get_sp_ec(sp);
1397 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1398 		WARN("%s: Can only be called during SP initialization.\n",
1399 		     __func__);
1400 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1401 	}
1402 
1403 	/* Perform initial validation of the secondary entry point. */
1404 	if (validate_secondary_ep(x1, sp)) {
1405 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1406 		     __func__, x1);
1407 		return spmc_ffa_error_return(handle,
1408 					     FFA_ERROR_INVALID_PARAMETER);
1409 	}
1410 
1411 	/*
1412 	 * Update the secondary entrypoint in SP context.
1413 	 * We don't need a lock here as during partition initialization there
1414 	 * will only be a single core online.
1415 	 */
1416 	sp->secondary_ep = x1;
1417 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1418 
1419 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1420 }
1421 
1422 /*******************************************************************************
1423  * This function will parse the Secure Partition Manifest. From manifest, it
1424  * will fetch details for preparing Secure partition image context and secure
1425  * partition image boot arguments if any.
1426  ******************************************************************************/
1427 static int sp_manifest_parse(void *sp_manifest, int offset,
1428 			     struct secure_partition_desc *sp,
1429 			     entry_point_info_t *ep_info,
1430 			     int32_t *boot_info_reg)
1431 {
1432 	int32_t ret, node;
1433 	uint32_t config_32;
1434 
1435 	/*
1436 	 * Look for the mandatory fields that are expected to be present in
1437 	 * the SP manifests.
1438 	 */
1439 	node = fdt_path_offset(sp_manifest, "/");
1440 	if (node < 0) {
1441 		ERROR("Did not find root node.\n");
1442 		return node;
1443 	}
1444 
1445 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1446 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1447 	if (ret != 0) {
1448 		ERROR("Missing Secure Partition UUID.\n");
1449 		return ret;
1450 	}
1451 
1452 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1453 	if (ret != 0) {
1454 		ERROR("Missing SP Exception Level information.\n");
1455 		return ret;
1456 	}
1457 
1458 	sp->runtime_el = config_32;
1459 
1460 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1461 	if (ret != 0) {
1462 		ERROR("Missing Secure Partition FF-A Version.\n");
1463 		return ret;
1464 	}
1465 
1466 	sp->ffa_version = config_32;
1467 
1468 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1469 	if (ret != 0) {
1470 		ERROR("Missing Secure Partition Execution State.\n");
1471 		return ret;
1472 	}
1473 
1474 	sp->execution_state = config_32;
1475 
1476 	ret = fdt_read_uint32(sp_manifest, node,
1477 			      "messaging-method", &config_32);
1478 	if (ret != 0) {
1479 		ERROR("Missing Secure Partition messaging method.\n");
1480 		return ret;
1481 	}
1482 
1483 	/* Validate this entry, we currently only support direct messaging. */
1484 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1485 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1486 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1487 		     config_32);
1488 		return -EINVAL;
1489 	}
1490 
1491 	sp->properties = config_32;
1492 
1493 	ret = fdt_read_uint32(sp_manifest, node,
1494 			      "execution-ctx-count", &config_32);
1495 
1496 	if (ret != 0) {
1497 		ERROR("Missing SP Execution Context Count.\n");
1498 		return ret;
1499 	}
1500 
1501 	/*
1502 	 * Ensure this field is set correctly in the manifest however
1503 	 * since this is currently a hardcoded value for S-EL1 partitions
1504 	 * we don't need to save it here, just validate.
1505 	 */
1506 	if (config_32 != PLATFORM_CORE_COUNT) {
1507 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1508 			config_32, PLATFORM_CORE_COUNT);
1509 		return -EINVAL;
1510 	}
1511 
1512 	/*
1513 	 * Look for the optional fields that are expected to be present in
1514 	 * an SP manifest.
1515 	 */
1516 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1517 	if (ret != 0) {
1518 		WARN("Missing Secure Partition ID.\n");
1519 	} else {
1520 		if (!is_ffa_secure_id_valid(config_32)) {
1521 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1522 			      config_32);
1523 			return -EINVAL;
1524 		}
1525 		sp->sp_id = config_32;
1526 	}
1527 
1528 	ret = fdt_read_uint32(sp_manifest, node,
1529 			      "power-management-messages", &config_32);
1530 	if (ret != 0) {
1531 		WARN("Missing Power Management Messages entry.\n");
1532 	} else {
1533 		/*
1534 		 * Ensure only the currently supported power messages have
1535 		 * been requested.
1536 		 */
1537 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1538 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1539 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1540 			ERROR("Requested unsupported PM messages (%x)\n",
1541 			      config_32);
1542 			return -EINVAL;
1543 		}
1544 		sp->pwr_mgmt_msgs = config_32;
1545 	}
1546 
1547 	ret = fdt_read_uint32(sp_manifest, node,
1548 			      "gp-register-num", &config_32);
1549 	if (ret != 0) {
1550 		WARN("Missing boot information register.\n");
1551 	} else {
1552 		/* Check if a register number between 0-3 is specified. */
1553 		if (config_32 < 4) {
1554 			*boot_info_reg = config_32;
1555 		} else {
1556 			WARN("Incorrect boot information register (%u).\n",
1557 			     config_32);
1558 		}
1559 	}
1560 
1561 	return 0;
1562 }
1563 
1564 /*******************************************************************************
1565  * This function gets the Secure Partition Manifest base and maps the manifest
1566  * region.
1567  * Currently only one Secure Partition manifest is considered which is used to
1568  * prepare the context for the single Secure Partition.
1569  ******************************************************************************/
1570 static int find_and_prepare_sp_context(void)
1571 {
1572 	void *sp_manifest;
1573 	uintptr_t manifest_base;
1574 	uintptr_t manifest_base_align;
1575 	entry_point_info_t *next_image_ep_info;
1576 	int32_t ret, boot_info_reg = -1;
1577 	struct secure_partition_desc *sp;
1578 
1579 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1580 	if (next_image_ep_info == NULL) {
1581 		WARN("No Secure Partition image provided by BL2.\n");
1582 		return -ENOENT;
1583 	}
1584 
1585 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1586 	if (sp_manifest == NULL) {
1587 		WARN("Secure Partition manifest absent.\n");
1588 		return -ENOENT;
1589 	}
1590 
1591 	manifest_base = (uintptr_t)sp_manifest;
1592 	manifest_base_align = page_align(manifest_base, DOWN);
1593 
1594 	/*
1595 	 * Map the secure partition manifest region in the EL3 translation
1596 	 * regime.
1597 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1598 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1599 	 * not completely accommodate the secure partition manifest region.
1600 	 */
1601 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1602 				      manifest_base_align,
1603 				      PAGE_SIZE * 2,
1604 				      MT_RO_DATA);
1605 	if (ret != 0) {
1606 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1607 		return ret;
1608 	}
1609 
1610 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1611 					    "arm,ffa-manifest-1.0");
1612 	if (ret < 0) {
1613 		ERROR("Error happened in SP manifest reading.\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	/*
1618 	 * Store the size of the manifest so that it can be used later to pass
1619 	 * the manifest as boot information later.
1620 	 */
1621 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1622 	INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1623 
1624 	/*
1625 	 * Select an SP descriptor for initialising the partition's execution
1626 	 * context on the primary CPU.
1627 	 */
1628 	sp = spmc_get_current_sp_ctx();
1629 
1630 	/* Initialize entry point information for the SP */
1631 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1632 		       SECURE | EP_ST_ENABLE);
1633 
1634 	/* Parse the SP manifest. */
1635 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
1636 				&boot_info_reg);
1637 	if (ret != 0) {
1638 		ERROR("Error in Secure Partition manifest parsing.\n");
1639 		return ret;
1640 	}
1641 
1642 	/* Check that the runtime EL in the manifest was correct. */
1643 	if (sp->runtime_el != S_EL1) {
1644 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1645 		return -EINVAL;
1646 	}
1647 
1648 	/* Perform any common initialisation. */
1649 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
1650 
1651 	/* Perform any initialisation specific to S-EL1 SPs. */
1652 	spmc_el1_sp_setup(sp, next_image_ep_info);
1653 
1654 	/* Initialize the SP context with the required ep info. */
1655 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1656 
1657 	return 0;
1658 }
1659 
1660 /*******************************************************************************
1661  * This function takes an SP context pointer and performs a synchronous entry
1662  * into it.
1663  ******************************************************************************/
1664 static int32_t logical_sp_init(void)
1665 {
1666 	int32_t rc = 0;
1667 	struct el3_lp_desc *el3_lp_descs;
1668 
1669 	/* Perform initial validation of the Logical Partitions. */
1670 	rc = el3_sp_desc_validate();
1671 	if (rc != 0) {
1672 		ERROR("Logical Partition validation failed!\n");
1673 		return rc;
1674 	}
1675 
1676 	el3_lp_descs = get_el3_lp_array();
1677 
1678 	INFO("Logical Secure Partition init start.\n");
1679 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1680 		rc = el3_lp_descs[i].init();
1681 		if (rc != 0) {
1682 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
1683 			      el3_lp_descs[i].sp_id);
1684 			return rc;
1685 		}
1686 		VERBOSE("Logical SP (0x%x) Initialized\n",
1687 			      el3_lp_descs[i].sp_id);
1688 	}
1689 
1690 	INFO("Logical Secure Partition init completed.\n");
1691 
1692 	return rc;
1693 }
1694 
1695 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1696 {
1697 	uint64_t rc;
1698 
1699 	assert(ec != NULL);
1700 
1701 	/* Assign the context of the SP to this CPU */
1702 	cm_set_context(&(ec->cpu_ctx), SECURE);
1703 
1704 	/* Restore the context assigned above */
1705 	cm_el1_sysregs_context_restore(SECURE);
1706 	cm_set_next_eret_context(SECURE);
1707 
1708 	/* Invalidate TLBs at EL1. */
1709 	tlbivmalle1();
1710 	dsbish();
1711 
1712 	/* Enter Secure Partition */
1713 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1714 
1715 	/* Save secure state */
1716 	cm_el1_sysregs_context_save(SECURE);
1717 
1718 	return rc;
1719 }
1720 
1721 /*******************************************************************************
1722  * SPMC Helper Functions.
1723  ******************************************************************************/
1724 static int32_t sp_init(void)
1725 {
1726 	uint64_t rc;
1727 	struct secure_partition_desc *sp;
1728 	struct sp_exec_ctx *ec;
1729 
1730 	sp = spmc_get_current_sp_ctx();
1731 	ec = spmc_get_sp_ec(sp);
1732 	ec->rt_model = RT_MODEL_INIT;
1733 	ec->rt_state = RT_STATE_RUNNING;
1734 
1735 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1736 
1737 	rc = spmc_sp_synchronous_entry(ec);
1738 	if (rc != 0) {
1739 		/* Indicate SP init was not successful. */
1740 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
1741 		      sp->sp_id, rc);
1742 		return 0;
1743 	}
1744 
1745 	ec->rt_state = RT_STATE_WAITING;
1746 	INFO("Secure Partition initialized.\n");
1747 
1748 	return 1;
1749 }
1750 
1751 static void initalize_sp_descs(void)
1752 {
1753 	struct secure_partition_desc *sp;
1754 
1755 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1756 		sp = &sp_desc[i];
1757 		sp->sp_id = INV_SP_ID;
1758 		sp->mailbox.rx_buffer = NULL;
1759 		sp->mailbox.tx_buffer = NULL;
1760 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
1761 		sp->secondary_ep = 0;
1762 	}
1763 }
1764 
1765 static void initalize_ns_ep_descs(void)
1766 {
1767 	struct ns_endpoint_desc *ns_ep;
1768 
1769 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1770 		ns_ep = &ns_ep_desc[i];
1771 		/*
1772 		 * Clashes with the Hypervisor ID but will not be a
1773 		 * problem in practice.
1774 		 */
1775 		ns_ep->ns_ep_id = 0;
1776 		ns_ep->ffa_version = 0;
1777 		ns_ep->mailbox.rx_buffer = NULL;
1778 		ns_ep->mailbox.tx_buffer = NULL;
1779 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1780 	}
1781 }
1782 
1783 /*******************************************************************************
1784  * Initialize SPMC attributes for the SPMD.
1785  ******************************************************************************/
1786 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1787 {
1788 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
1789 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
1790 	spmc_attrs->exec_state = MODE_RW_64;
1791 	spmc_attrs->spmc_id = FFA_SPMC_ID;
1792 }
1793 
1794 /*******************************************************************************
1795  * Initialize contexts of all Secure Partitions.
1796  ******************************************************************************/
1797 int32_t spmc_setup(void)
1798 {
1799 	int32_t ret;
1800 	uint32_t flags;
1801 
1802 	/* Initialize endpoint descriptors */
1803 	initalize_sp_descs();
1804 	initalize_ns_ep_descs();
1805 
1806 	/*
1807 	 * Retrieve the information of the datastore for tracking shared memory
1808 	 * requests allocated by platform code and zero the region if available.
1809 	 */
1810 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
1811 					    &spmc_shmem_obj_state.data_size);
1812 	if (ret != 0) {
1813 		ERROR("Failed to obtain memory descriptor backing store!\n");
1814 		return ret;
1815 	}
1816 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
1817 
1818 	/* Setup logical SPs. */
1819 	ret = logical_sp_init();
1820 	if (ret != 0) {
1821 		ERROR("Failed to initialize Logical Partitions.\n");
1822 		return ret;
1823 	}
1824 
1825 	/* Perform physical SP setup. */
1826 
1827 	/* Disable MMU at EL1 (initialized by BL2) */
1828 	disable_mmu_icache_el1();
1829 
1830 	/* Initialize context of the SP */
1831 	INFO("Secure Partition context setup start.\n");
1832 
1833 	ret = find_and_prepare_sp_context();
1834 	if (ret != 0) {
1835 		ERROR("Error in SP finding and context preparation.\n");
1836 		return ret;
1837 	}
1838 
1839 	/* Register power management hooks with PSCI */
1840 	psci_register_spd_pm_hook(&spmc_pm);
1841 
1842 	/*
1843 	 * Register an interrupt handler for S-EL1 interrupts
1844 	 * when generated during code executing in the
1845 	 * non-secure state.
1846 	 */
1847 	flags = 0;
1848 	set_interrupt_rm_flag(flags, NON_SECURE);
1849 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
1850 					      spmc_sp_interrupt_handler,
1851 					      flags);
1852 	if (ret != 0) {
1853 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
1854 		panic();
1855 	}
1856 
1857 	/* Register init function for deferred init.  */
1858 	bl31_register_bl32_init(&sp_init);
1859 
1860 	INFO("Secure Partition setup done.\n");
1861 
1862 	return 0;
1863 }
1864 
1865 /*******************************************************************************
1866  * Secure Partition Manager SMC handler.
1867  ******************************************************************************/
1868 uint64_t spmc_smc_handler(uint32_t smc_fid,
1869 			  bool secure_origin,
1870 			  uint64_t x1,
1871 			  uint64_t x2,
1872 			  uint64_t x3,
1873 			  uint64_t x4,
1874 			  void *cookie,
1875 			  void *handle,
1876 			  uint64_t flags)
1877 {
1878 	switch (smc_fid) {
1879 
1880 	case FFA_VERSION:
1881 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1882 					   x4, cookie, handle, flags);
1883 
1884 	case FFA_SPM_ID_GET:
1885 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
1886 					     x3, x4, cookie, handle, flags);
1887 
1888 	case FFA_ID_GET:
1889 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1890 					  x4, cookie, handle, flags);
1891 
1892 	case FFA_FEATURES:
1893 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1894 					    x4, cookie, handle, flags);
1895 
1896 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1897 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1898 						   x2, x3, x4, cookie, handle,
1899 						   flags);
1900 
1901 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1902 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1903 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1904 					      x3, x4, cookie, handle, flags);
1905 
1906 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1907 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1908 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1909 					       x3, x4, cookie, handle, flags);
1910 
1911 	case FFA_RXTX_MAP_SMC32:
1912 	case FFA_RXTX_MAP_SMC64:
1913 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1914 					cookie, handle, flags);
1915 
1916 	case FFA_RXTX_UNMAP:
1917 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1918 					  x4, cookie, handle, flags);
1919 
1920 	case FFA_PARTITION_INFO_GET:
1921 		return partition_info_get_handler(smc_fid, secure_origin, x1,
1922 						  x2, x3, x4, cookie, handle,
1923 						  flags);
1924 
1925 	case FFA_RX_RELEASE:
1926 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
1927 					  x4, cookie, handle, flags);
1928 
1929 	case FFA_MSG_WAIT:
1930 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1931 					cookie, handle, flags);
1932 
1933 	case FFA_ERROR:
1934 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1935 					cookie, handle, flags);
1936 
1937 	case FFA_MSG_RUN:
1938 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1939 				       cookie, handle, flags);
1940 
1941 	case FFA_MEM_SHARE_SMC32:
1942 	case FFA_MEM_SHARE_SMC64:
1943 	case FFA_MEM_LEND_SMC32:
1944 	case FFA_MEM_LEND_SMC64:
1945 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
1946 					 cookie, handle, flags);
1947 
1948 	case FFA_MEM_FRAG_TX:
1949 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
1950 					    x4, cookie, handle, flags);
1951 
1952 	case FFA_MEM_FRAG_RX:
1953 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
1954 					    x4, cookie, handle, flags);
1955 
1956 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1957 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1958 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
1959 						 x3, x4, cookie, handle, flags);
1960 
1961 	case FFA_MEM_RELINQUISH:
1962 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
1963 					       x3, x4, cookie, handle, flags);
1964 
1965 	case FFA_MEM_RECLAIM:
1966 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
1967 					    x4, cookie, handle, flags);
1968 
1969 	default:
1970 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1971 		break;
1972 	}
1973 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1974 }
1975 
1976 /*******************************************************************************
1977  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
1978  * validates the interrupt and upon success arranges entry into the SP for
1979  * handling the interrupt.
1980  ******************************************************************************/
1981 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
1982 					  uint32_t flags,
1983 					  void *handle,
1984 					  void *cookie)
1985 {
1986 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1987 	struct sp_exec_ctx *ec;
1988 	uint32_t linear_id = plat_my_core_pos();
1989 
1990 	/* Sanity check for a NULL pointer dereference. */
1991 	assert(sp != NULL);
1992 
1993 	/* Check the security state when the exception was generated. */
1994 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
1995 
1996 	/* Panic if not an S-EL1 Partition. */
1997 	if (sp->runtime_el != S_EL1) {
1998 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
1999 		      linear_id);
2000 		panic();
2001 	}
2002 
2003 	/* Obtain a reference to the SP execution context. */
2004 	ec = spmc_get_sp_ec(sp);
2005 
2006 	/* Ensure that the execution context is in waiting state else panic. */
2007 	if (ec->rt_state != RT_STATE_WAITING) {
2008 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2009 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2010 		panic();
2011 	}
2012 
2013 	/* Update the runtime model and state of the partition. */
2014 	ec->rt_model = RT_MODEL_INTR;
2015 	ec->rt_state = RT_STATE_RUNNING;
2016 
2017 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2018 
2019 	/*
2020 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2021 	 * populated as the SP can determine this by itself.
2022 	 */
2023 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2024 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2025 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2026 				     handle);
2027 }
2028