xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 2e21921502b1317031cf2a2f69c5d47ac88a505d)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30 
31 #include <platform_def.h>
32 
33 /* Declare the maximum number of SPs and El3 LPs. */
34 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
35 
36 /*
37  * Allocate a secure partition descriptor to describe each SP in the system that
38  * does not reside at EL3.
39  */
40 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
41 
42 /*
43  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
44  * the system that interacts with a SP. It is used to track the Hypervisor
45  * buffer pair, version and ID for now. It could be extended to track VM
46  * properties when the SPMC supports indirect messaging.
47  */
48 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
49 
50 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
51 					  uint32_t flags,
52 					  void *handle,
53 					  void *cookie);
54 
55 /*
56  * Helper function to obtain the array storing the EL3
57  * Logical Partition descriptors.
58  */
59 struct el3_lp_desc *get_el3_lp_array(void)
60 {
61 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
62 }
63 
64 /*
65  * Helper function to obtain the descriptor of the last SP to whom control was
66  * handed to on this physical cpu. Currently, we assume there is only one SP.
67  * TODO: Expand to track multiple partitions when required.
68  */
69 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
70 {
71 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
72 }
73 
74 /*
75  * Helper function to obtain the execution context of an SP on the
76  * current physical cpu.
77  */
78 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
79 {
80 	return &(sp->ec[get_ec_index(sp)]);
81 }
82 
83 /* Helper function to get pointer to SP context from its ID. */
84 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
85 {
86 	/* Check for Secure World Partitions. */
87 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
88 		if (sp_desc[i].sp_id == id) {
89 			return &(sp_desc[i]);
90 		}
91 	}
92 	return NULL;
93 }
94 
95 /*
96  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
97  * We assume that the first descriptor is reserved for this entity.
98  */
99 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
100 {
101 	return &(ns_ep_desc[0]);
102 }
103 
104 /*
105  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
106  * or OS kernel in the normal world or the last SP that was run.
107  */
108 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
109 {
110 	/* Obtain the RX/TX buffer pair descriptor. */
111 	if (secure_origin) {
112 		return &(spmc_get_current_sp_ctx()->mailbox);
113 	} else {
114 		return &(spmc_get_hyp_ctx()->mailbox);
115 	}
116 }
117 
118 /******************************************************************************
119  * This function returns to the place where spmc_sp_synchronous_entry() was
120  * called originally.
121  ******************************************************************************/
122 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
123 {
124 	/*
125 	 * The SPM must have initiated the original request through a
126 	 * synchronous entry into the secure partition. Jump back to the
127 	 * original C runtime context with the value of rc in x0;
128 	 */
129 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
130 
131 	panic();
132 }
133 
134 /*******************************************************************************
135  * Return FFA_ERROR with specified error code.
136  ******************************************************************************/
137 uint64_t spmc_ffa_error_return(void *handle, int error_code)
138 {
139 	SMC_RET8(handle, FFA_ERROR,
140 		 FFA_TARGET_INFO_MBZ, error_code,
141 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144 
145 /******************************************************************************
146  * Helper function to validate a secure partition ID to ensure it does not
147  * conflict with any other FF-A component and follows the convention to
148  * indicate it resides within the secure world.
149  ******************************************************************************/
150 bool is_ffa_secure_id_valid(uint16_t partition_id)
151 {
152 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
153 
154 	/* Ensure the ID is not the invalid partition ID. */
155 	if (partition_id == INV_SP_ID) {
156 		return false;
157 	}
158 
159 	/* Ensure the ID is not the SPMD ID. */
160 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
161 		return false;
162 	}
163 
164 	/*
165 	 * Ensure the ID follows the convention to indicate it resides
166 	 * in the secure world.
167 	 */
168 	if (!ffa_is_secure_world_id(partition_id)) {
169 		return false;
170 	}
171 
172 	/* Ensure we don't conflict with the SPMC partition ID. */
173 	if (partition_id == FFA_SPMC_ID) {
174 		return false;
175 	}
176 
177 	/* Ensure we do not already have an SP context with this ID. */
178 	if (spmc_get_sp_ctx(partition_id)) {
179 		return false;
180 	}
181 
182 	/* Ensure we don't clash with any Logical SP's. */
183 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
184 		if (el3_lp_descs[i].sp_id == partition_id) {
185 			return false;
186 		}
187 	}
188 
189 	return true;
190 }
191 
192 /*******************************************************************************
193  * This function either forwards the request to the other world or returns
194  * with an ERET depending on the source of the call.
195  * We can assume that the destination is for an entity at a lower exception
196  * level as any messages destined for a logical SP resident in EL3 will have
197  * already been taken care of by the SPMC before entering this function.
198  ******************************************************************************/
199 static uint64_t spmc_smc_return(uint32_t smc_fid,
200 				bool secure_origin,
201 				uint64_t x1,
202 				uint64_t x2,
203 				uint64_t x3,
204 				uint64_t x4,
205 				void *handle,
206 				void *cookie,
207 				uint64_t flags,
208 				uint16_t dst_id)
209 {
210 	/* If the destination is in the normal world always go via the SPMD. */
211 	if (ffa_is_normal_world_id(dst_id)) {
212 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
213 					cookie, handle, flags);
214 	}
215 	/*
216 	 * If the caller is secure and we want to return to the secure world,
217 	 * ERET directly.
218 	 */
219 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
220 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
221 	}
222 	/* If we originated in the normal world then switch contexts. */
223 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
224 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
225 					     x3, x4, handle);
226 	} else {
227 		/* Unknown State. */
228 		panic();
229 	}
230 
231 	/* Shouldn't be Reached. */
232 	return 0;
233 }
234 
235 /*******************************************************************************
236  * FF-A ABI Handlers.
237  ******************************************************************************/
238 
239 /*******************************************************************************
240  * Helper function to validate arg2 as part of a direct message.
241  ******************************************************************************/
242 static inline bool direct_msg_validate_arg2(uint64_t x2)
243 {
244 	/* Check message type. */
245 	if (x2 & FFA_FWK_MSG_BIT) {
246 		/* We have a framework message, ensure it is a known message. */
247 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
248 			VERBOSE("Invalid message format 0x%lx.\n", x2);
249 			return false;
250 		}
251 	} else {
252 		/* We have a partition messages, ensure x2 is not set. */
253 		if (x2 != (uint64_t) 0) {
254 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
255 				x2);
256 			return false;
257 		}
258 	}
259 	return true;
260 }
261 
262 /*******************************************************************************
263  * Handle direct request messages and route to the appropriate destination.
264  ******************************************************************************/
265 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
266 				       bool secure_origin,
267 				       uint64_t x1,
268 				       uint64_t x2,
269 				       uint64_t x3,
270 				       uint64_t x4,
271 				       void *cookie,
272 				       void *handle,
273 				       uint64_t flags)
274 {
275 	uint16_t dst_id = ffa_endpoint_destination(x1);
276 	struct el3_lp_desc *el3_lp_descs;
277 	struct secure_partition_desc *sp;
278 	unsigned int idx;
279 
280 	/* Check if arg2 has been populated correctly based on message type. */
281 	if (!direct_msg_validate_arg2(x2)) {
282 		return spmc_ffa_error_return(handle,
283 					     FFA_ERROR_INVALID_PARAMETER);
284 	}
285 
286 	el3_lp_descs = get_el3_lp_array();
287 
288 	/* Check if the request is destined for a Logical Partition. */
289 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
290 		if (el3_lp_descs[i].sp_id == dst_id) {
291 			return el3_lp_descs[i].direct_req(
292 					smc_fid, secure_origin, x1, x2, x3, x4,
293 					cookie, handle, flags);
294 		}
295 	}
296 
297 	/*
298 	 * If the request was not targeted to a LSP and from the secure world
299 	 * then it is invalid since a SP cannot call into the Normal world and
300 	 * there is no other SP to call into. If there are other SPs in future
301 	 * then the partition runtime model would need to be validated as well.
302 	 */
303 	if (secure_origin) {
304 		VERBOSE("Direct request not supported to the Normal World.\n");
305 		return spmc_ffa_error_return(handle,
306 					     FFA_ERROR_INVALID_PARAMETER);
307 	}
308 
309 	/* Check if the SP ID is valid. */
310 	sp = spmc_get_sp_ctx(dst_id);
311 	if (sp == NULL) {
312 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
313 			dst_id);
314 		return spmc_ffa_error_return(handle,
315 					     FFA_ERROR_INVALID_PARAMETER);
316 	}
317 
318 	/*
319 	 * Check that the target execution context is in a waiting state before
320 	 * forwarding the direct request to it.
321 	 */
322 	idx = get_ec_index(sp);
323 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
324 		VERBOSE("SP context on core%u is not waiting (%u).\n",
325 			idx, sp->ec[idx].rt_model);
326 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
327 	}
328 
329 	/*
330 	 * Everything checks out so forward the request to the SP after updating
331 	 * its state and runtime model.
332 	 */
333 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
334 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
335 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
336 			       handle, cookie, flags, dst_id);
337 }
338 
339 /*******************************************************************************
340  * Handle direct response messages and route to the appropriate destination.
341  ******************************************************************************/
342 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
343 					bool secure_origin,
344 					uint64_t x1,
345 					uint64_t x2,
346 					uint64_t x3,
347 					uint64_t x4,
348 					void *cookie,
349 					void *handle,
350 					uint64_t flags)
351 {
352 	uint16_t dst_id = ffa_endpoint_destination(x1);
353 	struct secure_partition_desc *sp;
354 	unsigned int idx;
355 
356 	/* Check if arg2 has been populated correctly based on message type. */
357 	if (!direct_msg_validate_arg2(x2)) {
358 		return spmc_ffa_error_return(handle,
359 					     FFA_ERROR_INVALID_PARAMETER);
360 	}
361 
362 	/* Check that the response did not originate from the Normal world. */
363 	if (!secure_origin) {
364 		VERBOSE("Direct Response not supported from Normal World.\n");
365 		return spmc_ffa_error_return(handle,
366 					     FFA_ERROR_INVALID_PARAMETER);
367 	}
368 
369 	/*
370 	 * Check that the response is either targeted to the Normal world or the
371 	 * SPMC e.g. a PM response.
372 	 */
373 	if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
374 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
375 			dst_id);
376 		return spmc_ffa_error_return(handle,
377 					     FFA_ERROR_INVALID_PARAMETER);
378 	}
379 
380 	/* Obtain the SP descriptor and update its runtime state. */
381 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
382 	if (sp == NULL) {
383 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
384 			dst_id);
385 		return spmc_ffa_error_return(handle,
386 					     FFA_ERROR_INVALID_PARAMETER);
387 	}
388 
389 	/* Sanity check state is being tracked correctly in the SPMC. */
390 	idx = get_ec_index(sp);
391 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
392 
393 	/* Ensure SP execution context was in the right runtime model. */
394 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
395 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
396 			idx, sp->ec[idx].rt_model);
397 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
398 	}
399 
400 	/* Update the state of the SP execution context. */
401 	sp->ec[idx].rt_state = RT_STATE_WAITING;
402 
403 	/*
404 	 * If the receiver is not the SPMC then forward the response to the
405 	 * Normal world.
406 	 */
407 	if (dst_id == FFA_SPMC_ID) {
408 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
409 		/* Should not get here. */
410 		panic();
411 	}
412 
413 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
414 			       handle, cookie, flags, dst_id);
415 }
416 
417 /*******************************************************************************
418  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
419  * cycles.
420  ******************************************************************************/
421 static uint64_t msg_wait_handler(uint32_t smc_fid,
422 				 bool secure_origin,
423 				 uint64_t x1,
424 				 uint64_t x2,
425 				 uint64_t x3,
426 				 uint64_t x4,
427 				 void *cookie,
428 				 void *handle,
429 				 uint64_t flags)
430 {
431 	struct secure_partition_desc *sp;
432 	unsigned int idx;
433 
434 	/*
435 	 * Check that the response did not originate from the Normal world as
436 	 * only the secure world can call this ABI.
437 	 */
438 	if (!secure_origin) {
439 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
440 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
441 	}
442 
443 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
444 	sp = spmc_get_current_sp_ctx();
445 	if (sp == NULL) {
446 		return spmc_ffa_error_return(handle,
447 					     FFA_ERROR_INVALID_PARAMETER);
448 	}
449 
450 	/*
451 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
452 	 */
453 	idx = get_ec_index(sp);
454 
455 	/* Ensure SP execution context was in the right runtime model. */
456 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
457 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
458 	}
459 
460 	/* Sanity check the state is being tracked correctly in the SPMC. */
461 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
462 
463 	/*
464 	 * Perform a synchronous exit if the partition was initialising. The
465 	 * state is updated after the exit.
466 	 */
467 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
468 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
469 		/* Should not get here */
470 		panic();
471 	}
472 
473 	/* Update the state of the SP execution context. */
474 	sp->ec[idx].rt_state = RT_STATE_WAITING;
475 
476 	/* Resume normal world if a secure interrupt was handled. */
477 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
478 		/* FFA_MSG_WAIT can only be called from the secure world. */
479 		unsigned int secure_state_in = SECURE;
480 		unsigned int secure_state_out = NON_SECURE;
481 
482 		cm_el1_sysregs_context_save(secure_state_in);
483 		cm_el1_sysregs_context_restore(secure_state_out);
484 		cm_set_next_eret_context(secure_state_out);
485 		SMC_RET0(cm_get_context(secure_state_out));
486 	}
487 
488 	/* Forward the response to the Normal world. */
489 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
490 			       handle, cookie, flags, FFA_NWD_ID);
491 }
492 
493 static uint64_t ffa_error_handler(uint32_t smc_fid,
494 				 bool secure_origin,
495 				 uint64_t x1,
496 				 uint64_t x2,
497 				 uint64_t x3,
498 				 uint64_t x4,
499 				 void *cookie,
500 				 void *handle,
501 				 uint64_t flags)
502 {
503 	struct secure_partition_desc *sp;
504 	unsigned int idx;
505 
506 	/* Check that the response did not originate from the Normal world. */
507 	if (!secure_origin) {
508 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
509 	}
510 
511 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
512 	sp = spmc_get_current_sp_ctx();
513 	if (sp == NULL) {
514 		return spmc_ffa_error_return(handle,
515 					     FFA_ERROR_INVALID_PARAMETER);
516 	}
517 
518 	/* Get the execution context of the SP that invoked FFA_ERROR. */
519 	idx = get_ec_index(sp);
520 
521 	/*
522 	 * We only expect FFA_ERROR to be received during SP initialisation
523 	 * otherwise this is an invalid call.
524 	 */
525 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
526 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
527 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
528 		/* Should not get here. */
529 		panic();
530 	}
531 
532 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
533 }
534 
535 static uint64_t ffa_version_handler(uint32_t smc_fid,
536 				    bool secure_origin,
537 				    uint64_t x1,
538 				    uint64_t x2,
539 				    uint64_t x3,
540 				    uint64_t x4,
541 				    void *cookie,
542 				    void *handle,
543 				    uint64_t flags)
544 {
545 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
546 
547 	if (requested_version & FFA_VERSION_BIT31_MASK) {
548 		/* Invalid encoding, return an error. */
549 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
550 		/* Execution stops here. */
551 	}
552 
553 	/* Determine the caller to store the requested version. */
554 	if (secure_origin) {
555 		/*
556 		 * Ensure that the SP is reporting the same version as
557 		 * specified in its manifest. If these do not match there is
558 		 * something wrong with the SP.
559 		 * TODO: Should we abort the SP? For now assert this is not
560 		 *       case.
561 		 */
562 		assert(requested_version ==
563 		       spmc_get_current_sp_ctx()->ffa_version);
564 	} else {
565 		/*
566 		 * If this is called by the normal world, record this
567 		 * information in its descriptor.
568 		 */
569 		spmc_get_hyp_ctx()->ffa_version = requested_version;
570 	}
571 
572 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
573 					  FFA_VERSION_MINOR));
574 }
575 
576 /*******************************************************************************
577  * Helper function to obtain the FF-A version of the calling partition.
578  ******************************************************************************/
579 uint32_t get_partition_ffa_version(bool secure_origin)
580 {
581 	if (secure_origin) {
582 		return spmc_get_current_sp_ctx()->ffa_version;
583 	} else {
584 		return spmc_get_hyp_ctx()->ffa_version;
585 	}
586 }
587 
588 static uint64_t rxtx_map_handler(uint32_t smc_fid,
589 				 bool secure_origin,
590 				 uint64_t x1,
591 				 uint64_t x2,
592 				 uint64_t x3,
593 				 uint64_t x4,
594 				 void *cookie,
595 				 void *handle,
596 				 uint64_t flags)
597 {
598 	int ret;
599 	uint32_t error_code;
600 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
601 	struct mailbox *mbox;
602 	uintptr_t tx_address = x1;
603 	uintptr_t rx_address = x2;
604 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
605 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
606 
607 	/*
608 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
609 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
610 	 * ABI on behalf of a VM and reject it if this is the case.
611 	 */
612 	if (tx_address == 0 || rx_address == 0) {
613 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
614 		return spmc_ffa_error_return(handle,
615 					     FFA_ERROR_INVALID_PARAMETER);
616 	}
617 
618 	/* Ensure the specified buffers are not the same. */
619 	if (tx_address == rx_address) {
620 		WARN("TX Buffer must not be the same as RX Buffer.\n");
621 		return spmc_ffa_error_return(handle,
622 					     FFA_ERROR_INVALID_PARAMETER);
623 	}
624 
625 	/* Ensure the buffer size is not 0. */
626 	if (buf_size == 0U) {
627 		WARN("Buffer size must not be 0\n");
628 		return spmc_ffa_error_return(handle,
629 					     FFA_ERROR_INVALID_PARAMETER);
630 	}
631 
632 	/*
633 	 * Ensure the buffer size is a multiple of the translation granule size
634 	 * in TF-A.
635 	 */
636 	if (buf_size % PAGE_SIZE != 0U) {
637 		WARN("Buffer size must be aligned to translation granule.\n");
638 		return spmc_ffa_error_return(handle,
639 					     FFA_ERROR_INVALID_PARAMETER);
640 	}
641 
642 	/* Obtain the RX/TX buffer pair descriptor. */
643 	mbox = spmc_get_mbox_desc(secure_origin);
644 
645 	spin_lock(&mbox->lock);
646 
647 	/* Check if buffers have already been mapped. */
648 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
649 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
650 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
651 		error_code = FFA_ERROR_DENIED;
652 		goto err;
653 	}
654 
655 	/* memmap the TX buffer as read only. */
656 	ret = mmap_add_dynamic_region(tx_address, /* PA */
657 			tx_address, /* VA */
658 			buf_size, /* size */
659 			mem_atts | MT_RO_DATA); /* attrs */
660 	if (ret != 0) {
661 		/* Return the correct error code. */
662 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
663 						FFA_ERROR_INVALID_PARAMETER;
664 		WARN("Unable to map TX buffer: %d\n", error_code);
665 		goto err;
666 	}
667 
668 	/* memmap the RX buffer as read write. */
669 	ret = mmap_add_dynamic_region(rx_address, /* PA */
670 			rx_address, /* VA */
671 			buf_size, /* size */
672 			mem_atts | MT_RW_DATA); /* attrs */
673 
674 	if (ret != 0) {
675 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
676 						FFA_ERROR_INVALID_PARAMETER;
677 		WARN("Unable to map RX buffer: %d\n", error_code);
678 		/* Unmap the TX buffer again. */
679 		mmap_remove_dynamic_region(tx_address, buf_size);
680 		goto err;
681 	}
682 
683 	mbox->tx_buffer = (void *) tx_address;
684 	mbox->rx_buffer = (void *) rx_address;
685 	mbox->rxtx_page_count = page_count;
686 	spin_unlock(&mbox->lock);
687 
688 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
689 	/* Execution stops here. */
690 err:
691 	spin_unlock(&mbox->lock);
692 	return spmc_ffa_error_return(handle, error_code);
693 }
694 
695 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
696 				   bool secure_origin,
697 				   uint64_t x1,
698 				   uint64_t x2,
699 				   uint64_t x3,
700 				   uint64_t x4,
701 				   void *cookie,
702 				   void *handle,
703 				   uint64_t flags)
704 {
705 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
706 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
707 
708 	/*
709 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
710 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
711 	 * ABI on behalf of a VM and reject it if this is the case.
712 	 */
713 	if (x1 != 0UL) {
714 		return spmc_ffa_error_return(handle,
715 					     FFA_ERROR_INVALID_PARAMETER);
716 	}
717 
718 	spin_lock(&mbox->lock);
719 
720 	/* Check if buffers are currently mapped. */
721 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
722 		spin_unlock(&mbox->lock);
723 		return spmc_ffa_error_return(handle,
724 					     FFA_ERROR_INVALID_PARAMETER);
725 	}
726 
727 	/* Unmap RX Buffer */
728 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
729 				       buf_size) != 0) {
730 		WARN("Unable to unmap RX buffer!\n");
731 	}
732 
733 	mbox->rx_buffer = 0;
734 
735 	/* Unmap TX Buffer */
736 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
737 				       buf_size) != 0) {
738 		WARN("Unable to unmap TX buffer!\n");
739 	}
740 
741 	mbox->tx_buffer = 0;
742 	mbox->rxtx_page_count = 0;
743 
744 	spin_unlock(&mbox->lock);
745 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
746 }
747 
748 /*
749  * Collate the partition information in a v1.1 partition information
750  * descriptor format, this will be converter later if required.
751  */
752 static int partition_info_get_handler_v1_1(uint32_t *uuid,
753 					   struct ffa_partition_info_v1_1
754 						  *partitions,
755 					   uint32_t max_partitions,
756 					   uint32_t *partition_count)
757 {
758 	uint32_t index;
759 	struct ffa_partition_info_v1_1 *desc;
760 	bool null_uuid = is_null_uuid(uuid);
761 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
762 
763 	/* Deal with Logical Partitions. */
764 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
765 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
766 			/* Found a matching UUID, populate appropriately. */
767 			if (*partition_count >= max_partitions) {
768 				return FFA_ERROR_NO_MEMORY;
769 			}
770 
771 			desc = &partitions[*partition_count];
772 			desc->ep_id = el3_lp_descs[index].sp_id;
773 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
774 			desc->properties = el3_lp_descs[index].properties;
775 			if (null_uuid) {
776 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
777 			}
778 			(*partition_count)++;
779 		}
780 	}
781 
782 	/* Deal with physical SP's. */
783 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
784 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
785 			/* Found a matching UUID, populate appropriately. */
786 			if (*partition_count >= max_partitions) {
787 				return FFA_ERROR_NO_MEMORY;
788 			}
789 
790 			desc = &partitions[*partition_count];
791 			desc->ep_id = sp_desc[index].sp_id;
792 			/*
793 			 * Execution context count must match No. cores for
794 			 * S-EL1 SPs.
795 			 */
796 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
797 			desc->properties = sp_desc[index].properties;
798 			if (null_uuid) {
799 				copy_uuid(desc->uuid, sp_desc[index].uuid);
800 			}
801 			(*partition_count)++;
802 		}
803 	}
804 	return 0;
805 }
806 
807 /*
808  * Handle the case where that caller only wants the count of partitions
809  * matching a given UUID and does not want the corresponding descriptors
810  * populated.
811  */
812 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
813 {
814 	uint32_t index = 0;
815 	uint32_t partition_count = 0;
816 	bool null_uuid = is_null_uuid(uuid);
817 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
818 
819 	/* Deal with Logical Partitions. */
820 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
821 		if (null_uuid ||
822 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
823 			(partition_count)++;
824 		}
825 	}
826 
827 	/* Deal with physical SP's. */
828 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
829 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
830 			(partition_count)++;
831 		}
832 	}
833 	return partition_count;
834 }
835 
836 /*
837  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
838  * the coresponding descriptor format from the v1.1 descriptor array.
839  */
840 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
841 					     *partitions,
842 					     struct mailbox *mbox,
843 					     int partition_count)
844 {
845 	uint32_t index;
846 	uint32_t buf_size;
847 	uint32_t descriptor_size;
848 	struct ffa_partition_info_v1_0 *v1_0_partitions =
849 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
850 
851 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
852 	descriptor_size = partition_count *
853 			  sizeof(struct ffa_partition_info_v1_0);
854 
855 	if (descriptor_size > buf_size) {
856 		return FFA_ERROR_NO_MEMORY;
857 	}
858 
859 	for (index = 0U; index < partition_count; index++) {
860 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
861 		v1_0_partitions[index].execution_ctx_count =
862 			partitions[index].execution_ctx_count;
863 		v1_0_partitions[index].properties =
864 			partitions[index].properties;
865 	}
866 	return 0;
867 }
868 
869 /*
870  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
871  * v1.0 implementations.
872  */
873 static uint64_t partition_info_get_handler(uint32_t smc_fid,
874 					   bool secure_origin,
875 					   uint64_t x1,
876 					   uint64_t x2,
877 					   uint64_t x3,
878 					   uint64_t x4,
879 					   void *cookie,
880 					   void *handle,
881 					   uint64_t flags)
882 {
883 	int ret;
884 	uint32_t partition_count = 0;
885 	uint32_t size = 0;
886 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
887 	struct mailbox *mbox;
888 	uint64_t info_get_flags;
889 	bool count_only;
890 	uint32_t uuid[4];
891 
892 	uuid[0] = x1;
893 	uuid[1] = x2;
894 	uuid[2] = x3;
895 	uuid[3] = x4;
896 
897 	/* Determine if the Partition descriptors should be populated. */
898 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
899 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
900 
901 	/* Handle the case where we don't need to populate the descriptors. */
902 	if (count_only) {
903 		partition_count = partition_info_get_handler_count_only(uuid);
904 		if (partition_count == 0) {
905 			return spmc_ffa_error_return(handle,
906 						FFA_ERROR_INVALID_PARAMETER);
907 		}
908 	} else {
909 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
910 
911 		/*
912 		 * Handle the case where the partition descriptors are required,
913 		 * check we have the buffers available and populate the
914 		 * appropriate structure version.
915 		 */
916 
917 		/* Obtain the v1.1 format of the descriptors. */
918 		ret = partition_info_get_handler_v1_1(uuid, partitions,
919 						      MAX_SP_LP_PARTITIONS,
920 						      &partition_count);
921 
922 		/* Check if an error occurred during discovery. */
923 		if (ret != 0) {
924 			goto err;
925 		}
926 
927 		/* If we didn't find any matches the UUID is unknown. */
928 		if (partition_count == 0) {
929 			ret = FFA_ERROR_INVALID_PARAMETER;
930 			goto err;
931 		}
932 
933 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
934 		mbox = spmc_get_mbox_desc(secure_origin);
935 
936 		/*
937 		 * If the caller has not bothered registering its RX/TX pair
938 		 * then return an error code.
939 		 */
940 		spin_lock(&mbox->lock);
941 		if (mbox->rx_buffer == NULL) {
942 			ret = FFA_ERROR_BUSY;
943 			goto err_unlock;
944 		}
945 
946 		/* Ensure the RX buffer is currently free. */
947 		if (mbox->state != MAILBOX_STATE_EMPTY) {
948 			ret = FFA_ERROR_BUSY;
949 			goto err_unlock;
950 		}
951 
952 		/* Zero the RX buffer before populating. */
953 		(void)memset(mbox->rx_buffer, 0,
954 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
955 
956 		/*
957 		 * Depending on the FF-A version of the requesting partition
958 		 * we may need to convert to a v1.0 format otherwise we can copy
959 		 * directly.
960 		 */
961 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
962 			ret = partition_info_populate_v1_0(partitions,
963 							   mbox,
964 							   partition_count);
965 			if (ret != 0) {
966 				goto err_unlock;
967 			}
968 		} else {
969 			uint32_t buf_size = mbox->rxtx_page_count *
970 					    FFA_PAGE_SIZE;
971 
972 			/* Ensure the descriptor will fit in the buffer. */
973 			size = sizeof(struct ffa_partition_info_v1_1);
974 			if (partition_count * size  > buf_size) {
975 				ret = FFA_ERROR_NO_MEMORY;
976 				goto err_unlock;
977 			}
978 			memcpy(mbox->rx_buffer, partitions,
979 			       partition_count * size);
980 		}
981 
982 		mbox->state = MAILBOX_STATE_FULL;
983 		spin_unlock(&mbox->lock);
984 	}
985 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
986 
987 err_unlock:
988 	spin_unlock(&mbox->lock);
989 err:
990 	return spmc_ffa_error_return(handle, ret);
991 }
992 
993 static uint64_t ffa_features_handler(uint32_t smc_fid,
994 				     bool secure_origin,
995 				     uint64_t x1,
996 				     uint64_t x2,
997 				     uint64_t x3,
998 				     uint64_t x4,
999 				     void *cookie,
1000 				     void *handle,
1001 				     uint64_t flags)
1002 {
1003 	uint32_t function_id = (uint32_t) x1;
1004 	uint32_t input_properties = (uint32_t) x2;
1005 
1006 	/*
1007 	 * We don't currently support any additional input properties
1008 	 * for any ABI therefore ensure this value is always set to 0.
1009 	 */
1010 	if (input_properties != 0) {
1011 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1012 	}
1013 
1014 	/* Check if a Feature ID was requested. */
1015 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1016 		/* We currently don't support any additional features. */
1017 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1018 	}
1019 
1020 	/* Report if an FF-A ABI is supported. */
1021 	switch (function_id) {
1022 	/* Supported features from both worlds. */
1023 	case FFA_ERROR:
1024 	case FFA_SUCCESS_SMC32:
1025 	case FFA_INTERRUPT:
1026 	case FFA_SPM_ID_GET:
1027 	case FFA_ID_GET:
1028 	case FFA_FEATURES:
1029 	case FFA_VERSION:
1030 	case FFA_RX_RELEASE:
1031 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1032 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1033 	case FFA_PARTITION_INFO_GET:
1034 	case FFA_RXTX_MAP_SMC32:
1035 	case FFA_RXTX_MAP_SMC64:
1036 	case FFA_RXTX_UNMAP:
1037 	case FFA_MEM_FRAG_TX:
1038 	case FFA_MSG_RUN:
1039 
1040 		/*
1041 		 * We are relying on the fact that the other registers
1042 		 * will be set to 0 as these values align with the
1043 		 * currently implemented features of the SPMC. If this
1044 		 * changes this function must be extended to handle
1045 		 * reporting the additional functionality.
1046 		 */
1047 
1048 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1049 		/* Execution stops here. */
1050 
1051 	/* Supported ABIs only from the secure world. */
1052 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1053 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1054 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1055 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1056 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1057 	case FFA_MEM_RELINQUISH:
1058 	case FFA_MSG_WAIT:
1059 
1060 		if (!secure_origin) {
1061 			return spmc_ffa_error_return(handle,
1062 				FFA_ERROR_NOT_SUPPORTED);
1063 		}
1064 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1065 		/* Execution stops here. */
1066 
1067 	/* Supported features only from the normal world. */
1068 	case FFA_MEM_SHARE_SMC32:
1069 	case FFA_MEM_SHARE_SMC64:
1070 	case FFA_MEM_LEND_SMC32:
1071 	case FFA_MEM_LEND_SMC64:
1072 	case FFA_MEM_RECLAIM:
1073 	case FFA_MEM_FRAG_RX:
1074 
1075 		if (secure_origin) {
1076 			return spmc_ffa_error_return(handle,
1077 					FFA_ERROR_NOT_SUPPORTED);
1078 		}
1079 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1080 		/* Execution stops here. */
1081 
1082 	default:
1083 		return spmc_ffa_error_return(handle,
1084 					FFA_ERROR_NOT_SUPPORTED);
1085 	}
1086 }
1087 
1088 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1089 				   bool secure_origin,
1090 				   uint64_t x1,
1091 				   uint64_t x2,
1092 				   uint64_t x3,
1093 				   uint64_t x4,
1094 				   void *cookie,
1095 				   void *handle,
1096 				   uint64_t flags)
1097 {
1098 	if (secure_origin) {
1099 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1100 			 spmc_get_current_sp_ctx()->sp_id);
1101 	} else {
1102 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1103 			 spmc_get_hyp_ctx()->ns_ep_id);
1104 	}
1105 }
1106 
1107 /*
1108  * Enable an SP to query the ID assigned to the SPMC.
1109  */
1110 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1111 				       bool secure_origin,
1112 				       uint64_t x1,
1113 				       uint64_t x2,
1114 				       uint64_t x3,
1115 				       uint64_t x4,
1116 				       void *cookie,
1117 				       void *handle,
1118 				       uint64_t flags)
1119 {
1120 	assert(x1 == 0UL);
1121 	assert(x2 == 0UL);
1122 	assert(x3 == 0UL);
1123 	assert(x4 == 0UL);
1124 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1125 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1126 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1127 
1128 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1129 }
1130 
1131 static uint64_t ffa_run_handler(uint32_t smc_fid,
1132 				bool secure_origin,
1133 				uint64_t x1,
1134 				uint64_t x2,
1135 				uint64_t x3,
1136 				uint64_t x4,
1137 				void *cookie,
1138 				void *handle,
1139 				uint64_t flags)
1140 {
1141 	struct secure_partition_desc *sp;
1142 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1143 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1144 	unsigned int idx;
1145 	unsigned int *rt_state;
1146 	unsigned int *rt_model;
1147 
1148 	/* Can only be called from the normal world. */
1149 	if (secure_origin) {
1150 		ERROR("FFA_RUN can only be called from NWd.\n");
1151 		return spmc_ffa_error_return(handle,
1152 					     FFA_ERROR_INVALID_PARAMETER);
1153 	}
1154 
1155 	/* Cannot run a Normal world partition. */
1156 	if (ffa_is_normal_world_id(target_id)) {
1157 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1158 		return spmc_ffa_error_return(handle,
1159 					     FFA_ERROR_INVALID_PARAMETER);
1160 	}
1161 
1162 	/* Check that the target SP exists. */
1163 	sp = spmc_get_sp_ctx(target_id);
1164 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1165 	if (sp == NULL) {
1166 		return spmc_ffa_error_return(handle,
1167 					     FFA_ERROR_INVALID_PARAMETER);
1168 	}
1169 
1170 	idx = get_ec_index(sp);
1171 	if (idx != vcpu_id) {
1172 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1173 		return spmc_ffa_error_return(handle,
1174 					     FFA_ERROR_INVALID_PARAMETER);
1175 	}
1176 	rt_state = &((sp->ec[idx]).rt_state);
1177 	rt_model = &((sp->ec[idx]).rt_model);
1178 	if (*rt_state == RT_STATE_RUNNING) {
1179 		ERROR("Partition (0x%x) is already running.\n", target_id);
1180 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1181 	}
1182 
1183 	/*
1184 	 * Sanity check that if the execution context was not waiting then it
1185 	 * was either in the direct request or the run partition runtime model.
1186 	 */
1187 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1188 		assert(*rt_model == RT_MODEL_RUN ||
1189 		       *rt_model == RT_MODEL_DIR_REQ);
1190 	}
1191 
1192 	/*
1193 	 * If the context was waiting then update the partition runtime model.
1194 	 */
1195 	if (*rt_state == RT_STATE_WAITING) {
1196 		*rt_model = RT_MODEL_RUN;
1197 	}
1198 
1199 	/*
1200 	 * Forward the request to the correct SP vCPU after updating
1201 	 * its state.
1202 	 */
1203 	*rt_state = RT_STATE_RUNNING;
1204 
1205 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1206 			       handle, cookie, flags, target_id);
1207 }
1208 
1209 static uint64_t rx_release_handler(uint32_t smc_fid,
1210 				   bool secure_origin,
1211 				   uint64_t x1,
1212 				   uint64_t x2,
1213 				   uint64_t x3,
1214 				   uint64_t x4,
1215 				   void *cookie,
1216 				   void *handle,
1217 				   uint64_t flags)
1218 {
1219 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1220 
1221 	spin_lock(&mbox->lock);
1222 
1223 	if (mbox->state != MAILBOX_STATE_FULL) {
1224 		spin_unlock(&mbox->lock);
1225 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1226 	}
1227 
1228 	mbox->state = MAILBOX_STATE_EMPTY;
1229 	spin_unlock(&mbox->lock);
1230 
1231 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1232 }
1233 
1234 /*
1235  * Perform initial validation on the provided secondary entry point.
1236  * For now ensure it does not lie within the BL31 Image or the SP's
1237  * RX/TX buffers as these are mapped within EL3.
1238  * TODO: perform validation for additional invalid memory regions.
1239  */
1240 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1241 {
1242 	struct mailbox *mb;
1243 	uintptr_t buffer_size;
1244 	uintptr_t sp_rx_buffer;
1245 	uintptr_t sp_tx_buffer;
1246 	uintptr_t sp_rx_buffer_limit;
1247 	uintptr_t sp_tx_buffer_limit;
1248 
1249 	mb = &sp->mailbox;
1250 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1251 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1252 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1253 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1254 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1255 
1256 	/*
1257 	 * Check if the entry point lies within BL31, or the
1258 	 * SP's RX or TX buffer.
1259 	 */
1260 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1261 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1262 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1263 		return -EINVAL;
1264 	}
1265 	return 0;
1266 }
1267 
1268 /*******************************************************************************
1269  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1270  *  register an entry point for initialization during a secondary cold boot.
1271  ******************************************************************************/
1272 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1273 					    bool secure_origin,
1274 					    uint64_t x1,
1275 					    uint64_t x2,
1276 					    uint64_t x3,
1277 					    uint64_t x4,
1278 					    void *cookie,
1279 					    void *handle,
1280 					    uint64_t flags)
1281 {
1282 	struct secure_partition_desc *sp;
1283 	struct sp_exec_ctx *sp_ctx;
1284 
1285 	/* This request cannot originate from the Normal world. */
1286 	if (!secure_origin) {
1287 		WARN("%s: Can only be called from SWd.\n", __func__);
1288 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1289 	}
1290 
1291 	/* Get the context of the current SP. */
1292 	sp = spmc_get_current_sp_ctx();
1293 	if (sp == NULL) {
1294 		WARN("%s: Cannot find SP context.\n", __func__);
1295 		return spmc_ffa_error_return(handle,
1296 					     FFA_ERROR_INVALID_PARAMETER);
1297 	}
1298 
1299 	/* Only an S-EL1 SP should be invoking this ABI. */
1300 	if (sp->runtime_el != S_EL1) {
1301 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1302 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1303 	}
1304 
1305 	/* Ensure the SP is in its initialization state. */
1306 	sp_ctx = spmc_get_sp_ec(sp);
1307 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1308 		WARN("%s: Can only be called during SP initialization.\n",
1309 		     __func__);
1310 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1311 	}
1312 
1313 	/* Perform initial validation of the secondary entry point. */
1314 	if (validate_secondary_ep(x1, sp)) {
1315 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1316 		     __func__, x1);
1317 		return spmc_ffa_error_return(handle,
1318 					     FFA_ERROR_INVALID_PARAMETER);
1319 	}
1320 
1321 	/*
1322 	 * Update the secondary entrypoint in SP context.
1323 	 * We don't need a lock here as during partition initialization there
1324 	 * will only be a single core online.
1325 	 */
1326 	sp->secondary_ep = x1;
1327 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1328 
1329 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1330 }
1331 
1332 /*******************************************************************************
1333  * This function will parse the Secure Partition Manifest. From manifest, it
1334  * will fetch details for preparing Secure partition image context and secure
1335  * partition image boot arguments if any.
1336  ******************************************************************************/
1337 static int sp_manifest_parse(void *sp_manifest, int offset,
1338 			     struct secure_partition_desc *sp,
1339 			     entry_point_info_t *ep_info,
1340 			     int32_t *boot_info_reg)
1341 {
1342 	int32_t ret, node;
1343 	uint32_t config_32;
1344 
1345 	/*
1346 	 * Look for the mandatory fields that are expected to be present in
1347 	 * the SP manifests.
1348 	 */
1349 	node = fdt_path_offset(sp_manifest, "/");
1350 	if (node < 0) {
1351 		ERROR("Did not find root node.\n");
1352 		return node;
1353 	}
1354 
1355 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1356 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1357 	if (ret != 0) {
1358 		ERROR("Missing Secure Partition UUID.\n");
1359 		return ret;
1360 	}
1361 
1362 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1363 	if (ret != 0) {
1364 		ERROR("Missing SP Exception Level information.\n");
1365 		return ret;
1366 	}
1367 
1368 	sp->runtime_el = config_32;
1369 
1370 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1371 	if (ret != 0) {
1372 		ERROR("Missing Secure Partition FF-A Version.\n");
1373 		return ret;
1374 	}
1375 
1376 	sp->ffa_version = config_32;
1377 
1378 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1379 	if (ret != 0) {
1380 		ERROR("Missing Secure Partition Execution State.\n");
1381 		return ret;
1382 	}
1383 
1384 	sp->execution_state = config_32;
1385 
1386 	ret = fdt_read_uint32(sp_manifest, node,
1387 			      "messaging-method", &config_32);
1388 	if (ret != 0) {
1389 		ERROR("Missing Secure Partition messaging method.\n");
1390 		return ret;
1391 	}
1392 
1393 	/* Validate this entry, we currently only support direct messaging. */
1394 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1395 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1396 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1397 		     config_32);
1398 		return -EINVAL;
1399 	}
1400 
1401 	sp->properties = config_32;
1402 
1403 	ret = fdt_read_uint32(sp_manifest, node,
1404 			      "execution-ctx-count", &config_32);
1405 
1406 	if (ret != 0) {
1407 		ERROR("Missing SP Execution Context Count.\n");
1408 		return ret;
1409 	}
1410 
1411 	/*
1412 	 * Ensure this field is set correctly in the manifest however
1413 	 * since this is currently a hardcoded value for S-EL1 partitions
1414 	 * we don't need to save it here, just validate.
1415 	 */
1416 	if (config_32 != PLATFORM_CORE_COUNT) {
1417 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1418 			config_32, PLATFORM_CORE_COUNT);
1419 		return -EINVAL;
1420 	}
1421 
1422 	/*
1423 	 * Look for the optional fields that are expected to be present in
1424 	 * an SP manifest.
1425 	 */
1426 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1427 	if (ret != 0) {
1428 		WARN("Missing Secure Partition ID.\n");
1429 	} else {
1430 		if (!is_ffa_secure_id_valid(config_32)) {
1431 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1432 			      config_32);
1433 			return -EINVAL;
1434 		}
1435 		sp->sp_id = config_32;
1436 	}
1437 
1438 	ret = fdt_read_uint32(sp_manifest, node,
1439 			      "power-management-messages", &config_32);
1440 	if (ret != 0) {
1441 		WARN("Missing Power Management Messages entry.\n");
1442 	} else {
1443 		/*
1444 		 * Ensure only the currently supported power messages have
1445 		 * been requested.
1446 		 */
1447 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1448 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1449 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1450 			ERROR("Requested unsupported PM messages (%x)\n",
1451 			      config_32);
1452 			return -EINVAL;
1453 		}
1454 		sp->pwr_mgmt_msgs = config_32;
1455 	}
1456 
1457 	ret = fdt_read_uint32(sp_manifest, node,
1458 			      "gp-register-num", &config_32);
1459 	if (ret != 0) {
1460 		WARN("Missing boot information register.\n");
1461 	} else {
1462 		/* Check if a register number between 0-3 is specified. */
1463 		if (config_32 < 4) {
1464 			*boot_info_reg = config_32;
1465 		} else {
1466 			WARN("Incorrect boot information register (%u).\n",
1467 			     config_32);
1468 		}
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 /*******************************************************************************
1475  * This function gets the Secure Partition Manifest base and maps the manifest
1476  * region.
1477  * Currently only one Secure Partition manifest is considered which is used to
1478  * prepare the context for the single Secure Partition.
1479  ******************************************************************************/
1480 static int find_and_prepare_sp_context(void)
1481 {
1482 	void *sp_manifest;
1483 	uintptr_t manifest_base;
1484 	uintptr_t manifest_base_align;
1485 	entry_point_info_t *next_image_ep_info;
1486 	int32_t ret, boot_info_reg = -1;
1487 	struct secure_partition_desc *sp;
1488 
1489 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1490 	if (next_image_ep_info == NULL) {
1491 		WARN("No Secure Partition image provided by BL2.\n");
1492 		return -ENOENT;
1493 	}
1494 
1495 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1496 	if (sp_manifest == NULL) {
1497 		WARN("Secure Partition manifest absent.\n");
1498 		return -ENOENT;
1499 	}
1500 
1501 	manifest_base = (uintptr_t)sp_manifest;
1502 	manifest_base_align = page_align(manifest_base, DOWN);
1503 
1504 	/*
1505 	 * Map the secure partition manifest region in the EL3 translation
1506 	 * regime.
1507 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1508 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1509 	 * not completely accommodate the secure partition manifest region.
1510 	 */
1511 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1512 				      manifest_base_align,
1513 				      PAGE_SIZE * 2,
1514 				      MT_RO_DATA);
1515 	if (ret != 0) {
1516 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1517 		return ret;
1518 	}
1519 
1520 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1521 					    "arm,ffa-manifest-1.0");
1522 	if (ret < 0) {
1523 		ERROR("Error happened in SP manifest reading.\n");
1524 		return -EINVAL;
1525 	}
1526 
1527 	/*
1528 	 * Store the size of the manifest so that it can be used later to pass
1529 	 * the manifest as boot information later.
1530 	 */
1531 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1532 	INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1533 
1534 	/*
1535 	 * Select an SP descriptor for initialising the partition's execution
1536 	 * context on the primary CPU.
1537 	 */
1538 	sp = spmc_get_current_sp_ctx();
1539 
1540 	/* Initialize entry point information for the SP */
1541 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1542 		       SECURE | EP_ST_ENABLE);
1543 
1544 	/* Parse the SP manifest. */
1545 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
1546 				&boot_info_reg);
1547 	if (ret != 0) {
1548 		ERROR("Error in Secure Partition manifest parsing.\n");
1549 		return ret;
1550 	}
1551 
1552 	/* Check that the runtime EL in the manifest was correct. */
1553 	if (sp->runtime_el != S_EL1) {
1554 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1555 		return -EINVAL;
1556 	}
1557 
1558 	/* Perform any common initialisation. */
1559 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
1560 
1561 	/* Perform any initialisation specific to S-EL1 SPs. */
1562 	spmc_el1_sp_setup(sp, next_image_ep_info);
1563 
1564 	/* Initialize the SP context with the required ep info. */
1565 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1566 
1567 	return 0;
1568 }
1569 
1570 /*******************************************************************************
1571  * This function takes an SP context pointer and performs a synchronous entry
1572  * into it.
1573  ******************************************************************************/
1574 static int32_t logical_sp_init(void)
1575 {
1576 	int32_t rc = 0;
1577 	struct el3_lp_desc *el3_lp_descs;
1578 
1579 	/* Perform initial validation of the Logical Partitions. */
1580 	rc = el3_sp_desc_validate();
1581 	if (rc != 0) {
1582 		ERROR("Logical Partition validation failed!\n");
1583 		return rc;
1584 	}
1585 
1586 	el3_lp_descs = get_el3_lp_array();
1587 
1588 	INFO("Logical Secure Partition init start.\n");
1589 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1590 		rc = el3_lp_descs[i].init();
1591 		if (rc != 0) {
1592 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
1593 			      el3_lp_descs[i].sp_id);
1594 			return rc;
1595 		}
1596 		VERBOSE("Logical SP (0x%x) Initialized\n",
1597 			      el3_lp_descs[i].sp_id);
1598 	}
1599 
1600 	INFO("Logical Secure Partition init completed.\n");
1601 
1602 	return rc;
1603 }
1604 
1605 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1606 {
1607 	uint64_t rc;
1608 
1609 	assert(ec != NULL);
1610 
1611 	/* Assign the context of the SP to this CPU */
1612 	cm_set_context(&(ec->cpu_ctx), SECURE);
1613 
1614 	/* Restore the context assigned above */
1615 	cm_el1_sysregs_context_restore(SECURE);
1616 	cm_set_next_eret_context(SECURE);
1617 
1618 	/* Invalidate TLBs at EL1. */
1619 	tlbivmalle1();
1620 	dsbish();
1621 
1622 	/* Enter Secure Partition */
1623 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1624 
1625 	/* Save secure state */
1626 	cm_el1_sysregs_context_save(SECURE);
1627 
1628 	return rc;
1629 }
1630 
1631 /*******************************************************************************
1632  * SPMC Helper Functions.
1633  ******************************************************************************/
1634 static int32_t sp_init(void)
1635 {
1636 	uint64_t rc;
1637 	struct secure_partition_desc *sp;
1638 	struct sp_exec_ctx *ec;
1639 
1640 	sp = spmc_get_current_sp_ctx();
1641 	ec = spmc_get_sp_ec(sp);
1642 	ec->rt_model = RT_MODEL_INIT;
1643 	ec->rt_state = RT_STATE_RUNNING;
1644 
1645 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1646 
1647 	rc = spmc_sp_synchronous_entry(ec);
1648 	if (rc != 0) {
1649 		/* Indicate SP init was not successful. */
1650 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
1651 		      sp->sp_id, rc);
1652 		return 0;
1653 	}
1654 
1655 	ec->rt_state = RT_STATE_WAITING;
1656 	INFO("Secure Partition initialized.\n");
1657 
1658 	return 1;
1659 }
1660 
1661 static void initalize_sp_descs(void)
1662 {
1663 	struct secure_partition_desc *sp;
1664 
1665 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1666 		sp = &sp_desc[i];
1667 		sp->sp_id = INV_SP_ID;
1668 		sp->mailbox.rx_buffer = NULL;
1669 		sp->mailbox.tx_buffer = NULL;
1670 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
1671 		sp->secondary_ep = 0;
1672 	}
1673 }
1674 
1675 static void initalize_ns_ep_descs(void)
1676 {
1677 	struct ns_endpoint_desc *ns_ep;
1678 
1679 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1680 		ns_ep = &ns_ep_desc[i];
1681 		/*
1682 		 * Clashes with the Hypervisor ID but will not be a
1683 		 * problem in practice.
1684 		 */
1685 		ns_ep->ns_ep_id = 0;
1686 		ns_ep->ffa_version = 0;
1687 		ns_ep->mailbox.rx_buffer = NULL;
1688 		ns_ep->mailbox.tx_buffer = NULL;
1689 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1690 	}
1691 }
1692 
1693 /*******************************************************************************
1694  * Initialize SPMC attributes for the SPMD.
1695  ******************************************************************************/
1696 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1697 {
1698 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
1699 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
1700 	spmc_attrs->exec_state = MODE_RW_64;
1701 	spmc_attrs->spmc_id = FFA_SPMC_ID;
1702 }
1703 
1704 /*******************************************************************************
1705  * Initialize contexts of all Secure Partitions.
1706  ******************************************************************************/
1707 int32_t spmc_setup(void)
1708 {
1709 	int32_t ret;
1710 	uint32_t flags;
1711 
1712 	/* Initialize endpoint descriptors */
1713 	initalize_sp_descs();
1714 	initalize_ns_ep_descs();
1715 
1716 	/*
1717 	 * Retrieve the information of the datastore for tracking shared memory
1718 	 * requests allocated by platform code and zero the region if available.
1719 	 */
1720 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
1721 					    &spmc_shmem_obj_state.data_size);
1722 	if (ret != 0) {
1723 		ERROR("Failed to obtain memory descriptor backing store!\n");
1724 		return ret;
1725 	}
1726 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
1727 
1728 	/* Setup logical SPs. */
1729 	ret = logical_sp_init();
1730 	if (ret != 0) {
1731 		ERROR("Failed to initialize Logical Partitions.\n");
1732 		return ret;
1733 	}
1734 
1735 	/* Perform physical SP setup. */
1736 
1737 	/* Disable MMU at EL1 (initialized by BL2) */
1738 	disable_mmu_icache_el1();
1739 
1740 	/* Initialize context of the SP */
1741 	INFO("Secure Partition context setup start.\n");
1742 
1743 	ret = find_and_prepare_sp_context();
1744 	if (ret != 0) {
1745 		ERROR("Error in SP finding and context preparation.\n");
1746 		return ret;
1747 	}
1748 
1749 	/* Register power management hooks with PSCI */
1750 	psci_register_spd_pm_hook(&spmc_pm);
1751 
1752 	/*
1753 	 * Register an interrupt handler for S-EL1 interrupts
1754 	 * when generated during code executing in the
1755 	 * non-secure state.
1756 	 */
1757 	flags = 0;
1758 	set_interrupt_rm_flag(flags, NON_SECURE);
1759 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
1760 					      spmc_sp_interrupt_handler,
1761 					      flags);
1762 	if (ret != 0) {
1763 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
1764 		panic();
1765 	}
1766 
1767 	/* Register init function for deferred init.  */
1768 	bl31_register_bl32_init(&sp_init);
1769 
1770 	INFO("Secure Partition setup done.\n");
1771 
1772 	return 0;
1773 }
1774 
1775 /*******************************************************************************
1776  * Secure Partition Manager SMC handler.
1777  ******************************************************************************/
1778 uint64_t spmc_smc_handler(uint32_t smc_fid,
1779 			  bool secure_origin,
1780 			  uint64_t x1,
1781 			  uint64_t x2,
1782 			  uint64_t x3,
1783 			  uint64_t x4,
1784 			  void *cookie,
1785 			  void *handle,
1786 			  uint64_t flags)
1787 {
1788 	switch (smc_fid) {
1789 
1790 	case FFA_VERSION:
1791 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1792 					   x4, cookie, handle, flags);
1793 
1794 	case FFA_SPM_ID_GET:
1795 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
1796 					     x3, x4, cookie, handle, flags);
1797 
1798 	case FFA_ID_GET:
1799 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1800 					  x4, cookie, handle, flags);
1801 
1802 	case FFA_FEATURES:
1803 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1804 					    x4, cookie, handle, flags);
1805 
1806 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1807 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1808 						   x2, x3, x4, cookie, handle,
1809 						   flags);
1810 
1811 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1812 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1813 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1814 					      x3, x4, cookie, handle, flags);
1815 
1816 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1817 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1818 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1819 					       x3, x4, cookie, handle, flags);
1820 
1821 	case FFA_RXTX_MAP_SMC32:
1822 	case FFA_RXTX_MAP_SMC64:
1823 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1824 					cookie, handle, flags);
1825 
1826 	case FFA_RXTX_UNMAP:
1827 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1828 					  x4, cookie, handle, flags);
1829 
1830 	case FFA_PARTITION_INFO_GET:
1831 		return partition_info_get_handler(smc_fid, secure_origin, x1,
1832 						  x2, x3, x4, cookie, handle,
1833 						  flags);
1834 
1835 	case FFA_RX_RELEASE:
1836 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
1837 					  x4, cookie, handle, flags);
1838 
1839 	case FFA_MSG_WAIT:
1840 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1841 					cookie, handle, flags);
1842 
1843 	case FFA_ERROR:
1844 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1845 					cookie, handle, flags);
1846 
1847 	case FFA_MSG_RUN:
1848 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1849 				       cookie, handle, flags);
1850 
1851 	case FFA_MEM_SHARE_SMC32:
1852 	case FFA_MEM_SHARE_SMC64:
1853 	case FFA_MEM_LEND_SMC32:
1854 	case FFA_MEM_LEND_SMC64:
1855 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
1856 					 cookie, handle, flags);
1857 
1858 	case FFA_MEM_FRAG_TX:
1859 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
1860 					    x4, cookie, handle, flags);
1861 
1862 	case FFA_MEM_FRAG_RX:
1863 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
1864 					    x4, cookie, handle, flags);
1865 
1866 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1867 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1868 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
1869 						 x3, x4, cookie, handle, flags);
1870 
1871 	case FFA_MEM_RELINQUISH:
1872 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
1873 					       x3, x4, cookie, handle, flags);
1874 
1875 	case FFA_MEM_RECLAIM:
1876 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
1877 					    x4, cookie, handle, flags);
1878 
1879 	default:
1880 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1881 		break;
1882 	}
1883 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1884 }
1885 
1886 /*******************************************************************************
1887  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
1888  * validates the interrupt and upon success arranges entry into the SP for
1889  * handling the interrupt.
1890  ******************************************************************************/
1891 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
1892 					  uint32_t flags,
1893 					  void *handle,
1894 					  void *cookie)
1895 {
1896 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1897 	struct sp_exec_ctx *ec;
1898 	uint32_t linear_id = plat_my_core_pos();
1899 
1900 	/* Sanity check for a NULL pointer dereference. */
1901 	assert(sp != NULL);
1902 
1903 	/* Check the security state when the exception was generated. */
1904 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
1905 
1906 	/* Panic if not an S-EL1 Partition. */
1907 	if (sp->runtime_el != S_EL1) {
1908 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
1909 		      linear_id);
1910 		panic();
1911 	}
1912 
1913 	/* Obtain a reference to the SP execution context. */
1914 	ec = spmc_get_sp_ec(sp);
1915 
1916 	/* Ensure that the execution context is in waiting state else panic. */
1917 	if (ec->rt_state != RT_STATE_WAITING) {
1918 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
1919 		      linear_id, RT_STATE_WAITING, ec->rt_state);
1920 		panic();
1921 	}
1922 
1923 	/* Update the runtime model and state of the partition. */
1924 	ec->rt_model = RT_MODEL_INTR;
1925 	ec->rt_state = RT_STATE_RUNNING;
1926 
1927 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
1928 
1929 	/*
1930 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
1931 	 * populated as the SP can determine this by itself.
1932 	 */
1933 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
1934 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1935 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1936 				     handle);
1937 }
1938