xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision a0a7f158d2932117d57ec0d74113890a565e0cbc)
1 /*
2  * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 
32 #include <platform_def.h>
33 
34 /* FFA_MEM_PERM_* helpers */
35 #define FFA_MEM_PERM_MASK		U(7)
36 #define FFA_MEM_PERM_DATA_MASK		U(3)
37 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
38 #define FFA_MEM_PERM_DATA_NA		U(0)
39 #define FFA_MEM_PERM_DATA_RW		U(1)
40 #define FFA_MEM_PERM_DATA_RES		U(2)
41 #define FFA_MEM_PERM_DATA_RO		U(3)
42 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
43 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
44 
45 /* Declare the maximum number of SPs and El3 LPs. */
46 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
47 
48 /*
49  * Allocate a secure partition descriptor to describe each SP in the system that
50  * does not reside at EL3.
51  */
52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
53 
54 /*
55  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
56  * the system that interacts with a SP. It is used to track the Hypervisor
57  * buffer pair, version and ID for now. It could be extended to track VM
58  * properties when the SPMC supports indirect messaging.
59  */
60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
61 
62 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
63 					  uint32_t flags,
64 					  void *handle,
65 					  void *cookie);
66 
67 /*
68  * Helper function to obtain the array storing the EL3
69  * Logical Partition descriptors.
70  */
71 struct el3_lp_desc *get_el3_lp_array(void)
72 {
73 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
74 }
75 
76 /*
77  * Helper function to obtain the descriptor of the last SP to whom control was
78  * handed to on this physical cpu. Currently, we assume there is only one SP.
79  * TODO: Expand to track multiple partitions when required.
80  */
81 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
82 {
83 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
84 }
85 
86 /*
87  * Helper function to obtain the execution context of an SP on the
88  * current physical cpu.
89  */
90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
91 {
92 	return &(sp->ec[get_ec_index(sp)]);
93 }
94 
95 /* Helper function to get pointer to SP context from its ID. */
96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
97 {
98 	/* Check for Secure World Partitions. */
99 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
100 		if (sp_desc[i].sp_id == id) {
101 			return &(sp_desc[i]);
102 		}
103 	}
104 	return NULL;
105 }
106 
107 /*
108  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
109  * We assume that the first descriptor is reserved for this entity.
110  */
111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
112 {
113 	return &(ns_ep_desc[0]);
114 }
115 
116 /*
117  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
118  * or OS kernel in the normal world or the last SP that was run.
119  */
120 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
121 {
122 	/* Obtain the RX/TX buffer pair descriptor. */
123 	if (secure_origin) {
124 		return &(spmc_get_current_sp_ctx()->mailbox);
125 	} else {
126 		return &(spmc_get_hyp_ctx()->mailbox);
127 	}
128 }
129 
130 /******************************************************************************
131  * This function returns to the place where spmc_sp_synchronous_entry() was
132  * called originally.
133  ******************************************************************************/
134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
135 {
136 	/*
137 	 * The SPM must have initiated the original request through a
138 	 * synchronous entry into the secure partition. Jump back to the
139 	 * original C runtime context with the value of rc in x0;
140 	 */
141 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
142 
143 	panic();
144 }
145 
146 /*******************************************************************************
147  * Return FFA_ERROR with specified error code.
148  ******************************************************************************/
149 uint64_t spmc_ffa_error_return(void *handle, int error_code)
150 {
151 	SMC_RET8(handle, FFA_ERROR,
152 		 FFA_TARGET_INFO_MBZ, error_code,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
154 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
155 }
156 
157 /******************************************************************************
158  * Helper function to validate a secure partition ID to ensure it does not
159  * conflict with any other FF-A component and follows the convention to
160  * indicate it resides within the secure world.
161  ******************************************************************************/
162 bool is_ffa_secure_id_valid(uint16_t partition_id)
163 {
164 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
165 
166 	/* Ensure the ID is not the invalid partition ID. */
167 	if (partition_id == INV_SP_ID) {
168 		return false;
169 	}
170 
171 	/* Ensure the ID is not the SPMD ID. */
172 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
173 		return false;
174 	}
175 
176 	/*
177 	 * Ensure the ID follows the convention to indicate it resides
178 	 * in the secure world.
179 	 */
180 	if (!ffa_is_secure_world_id(partition_id)) {
181 		return false;
182 	}
183 
184 	/* Ensure we don't conflict with the SPMC partition ID. */
185 	if (partition_id == FFA_SPMC_ID) {
186 		return false;
187 	}
188 
189 	/* Ensure we do not already have an SP context with this ID. */
190 	if (spmc_get_sp_ctx(partition_id)) {
191 		return false;
192 	}
193 
194 	/* Ensure we don't clash with any Logical SP's. */
195 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
196 		if (el3_lp_descs[i].sp_id == partition_id) {
197 			return false;
198 		}
199 	}
200 
201 	return true;
202 }
203 
204 /*******************************************************************************
205  * This function either forwards the request to the other world or returns
206  * with an ERET depending on the source of the call.
207  * We can assume that the destination is for an entity at a lower exception
208  * level as any messages destined for a logical SP resident in EL3 will have
209  * already been taken care of by the SPMC before entering this function.
210  ******************************************************************************/
211 static uint64_t spmc_smc_return(uint32_t smc_fid,
212 				bool secure_origin,
213 				uint64_t x1,
214 				uint64_t x2,
215 				uint64_t x3,
216 				uint64_t x4,
217 				void *handle,
218 				void *cookie,
219 				uint64_t flags,
220 				uint16_t dst_id)
221 {
222 	/* If the destination is in the normal world always go via the SPMD. */
223 	if (ffa_is_normal_world_id(dst_id)) {
224 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
225 					cookie, handle, flags);
226 	}
227 	/*
228 	 * If the caller is secure and we want to return to the secure world,
229 	 * ERET directly.
230 	 */
231 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
232 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
233 	}
234 	/* If we originated in the normal world then switch contexts. */
235 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
236 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
237 					     x3, x4, handle, flags);
238 	} else {
239 		/* Unknown State. */
240 		panic();
241 	}
242 
243 	/* Shouldn't be Reached. */
244 	return 0;
245 }
246 
247 /*******************************************************************************
248  * FF-A ABI Handlers.
249  ******************************************************************************/
250 
251 /*******************************************************************************
252  * Helper function to validate arg2 as part of a direct message.
253  ******************************************************************************/
254 static inline bool direct_msg_validate_arg2(uint64_t x2)
255 {
256 	/* Check message type. */
257 	if (x2 & FFA_FWK_MSG_BIT) {
258 		/* We have a framework message, ensure it is a known message. */
259 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
260 			VERBOSE("Invalid message format 0x%lx.\n", x2);
261 			return false;
262 		}
263 	} else {
264 		/* We have a partition messages, ensure x2 is not set. */
265 		if (x2 != (uint64_t) 0) {
266 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
267 				x2);
268 			return false;
269 		}
270 	}
271 	return true;
272 }
273 
274 /*******************************************************************************
275  * Helper function to validate the destination ID of a direct response.
276  ******************************************************************************/
277 static bool direct_msg_validate_dst_id(uint16_t dst_id)
278 {
279 	struct secure_partition_desc *sp;
280 
281 	/* Check if we're targeting a normal world partition. */
282 	if (ffa_is_normal_world_id(dst_id)) {
283 		return true;
284 	}
285 
286 	/* Or directed to the SPMC itself.*/
287 	if (dst_id == FFA_SPMC_ID) {
288 		return true;
289 	}
290 
291 	/* Otherwise ensure the SP exists. */
292 	sp = spmc_get_sp_ctx(dst_id);
293 	if (sp != NULL) {
294 		return true;
295 	}
296 
297 	return false;
298 }
299 
300 /*******************************************************************************
301  * Helper function to validate the response from a Logical Partition.
302  ******************************************************************************/
303 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
304 					void *handle)
305 {
306 	/* Retrieve populated Direct Response Arguments. */
307 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
308 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
309 	uint16_t src_id = ffa_endpoint_source(x1);
310 	uint16_t dst_id = ffa_endpoint_destination(x1);
311 
312 	if (src_id != lp_id) {
313 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
314 		return false;
315 	}
316 
317 	/*
318 	 * Check the destination ID is valid and ensure the LP is responding to
319 	 * the original request.
320 	 */
321 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
322 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
323 		return false;
324 	}
325 
326 	if (!direct_msg_validate_arg2(x2)) {
327 		ERROR("Invalid EL3 LP message encoding.\n");
328 		return false;
329 	}
330 	return true;
331 }
332 
333 /*******************************************************************************
334  * Handle direct request messages and route to the appropriate destination.
335  ******************************************************************************/
336 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
337 				       bool secure_origin,
338 				       uint64_t x1,
339 				       uint64_t x2,
340 				       uint64_t x3,
341 				       uint64_t x4,
342 				       void *cookie,
343 				       void *handle,
344 				       uint64_t flags)
345 {
346 	uint16_t src_id = ffa_endpoint_source(x1);
347 	uint16_t dst_id = ffa_endpoint_destination(x1);
348 	struct el3_lp_desc *el3_lp_descs;
349 	struct secure_partition_desc *sp;
350 	unsigned int idx;
351 
352 	/* Check if arg2 has been populated correctly based on message type. */
353 	if (!direct_msg_validate_arg2(x2)) {
354 		return spmc_ffa_error_return(handle,
355 					     FFA_ERROR_INVALID_PARAMETER);
356 	}
357 
358 	/* Validate Sender is either the current SP or from the normal world. */
359 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
360 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
361 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
362 		return spmc_ffa_error_return(handle,
363 					FFA_ERROR_INVALID_PARAMETER);
364 	}
365 
366 	el3_lp_descs = get_el3_lp_array();
367 
368 	/* Check if the request is destined for a Logical Partition. */
369 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
370 		if (el3_lp_descs[i].sp_id == dst_id) {
371 			uint64_t ret = el3_lp_descs[i].direct_req(
372 						smc_fid, secure_origin, x1, x2,
373 						x3, x4, cookie, handle, flags);
374 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
375 							 handle)) {
376 				panic();
377 			}
378 
379 			/* Message checks out. */
380 			return ret;
381 		}
382 	}
383 
384 	/*
385 	 * If the request was not targeted to a LSP and from the secure world
386 	 * then it is invalid since a SP cannot call into the Normal world and
387 	 * there is no other SP to call into. If there are other SPs in future
388 	 * then the partition runtime model would need to be validated as well.
389 	 */
390 	if (secure_origin) {
391 		VERBOSE("Direct request not supported to the Normal World.\n");
392 		return spmc_ffa_error_return(handle,
393 					     FFA_ERROR_INVALID_PARAMETER);
394 	}
395 
396 	/* Check if the SP ID is valid. */
397 	sp = spmc_get_sp_ctx(dst_id);
398 	if (sp == NULL) {
399 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
400 			dst_id);
401 		return spmc_ffa_error_return(handle,
402 					     FFA_ERROR_INVALID_PARAMETER);
403 	}
404 
405 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
406 	if (sp->runtime_el == S_EL0) {
407 		spin_lock(&sp->rt_state_lock);
408 	}
409 
410 	/*
411 	 * Check that the target execution context is in a waiting state before
412 	 * forwarding the direct request to it.
413 	 */
414 	idx = get_ec_index(sp);
415 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
416 		VERBOSE("SP context on core%u is not waiting (%u).\n",
417 			idx, sp->ec[idx].rt_model);
418 
419 		if (sp->runtime_el == S_EL0) {
420 			spin_unlock(&sp->rt_state_lock);
421 		}
422 
423 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
424 	}
425 
426 	/*
427 	 * Everything checks out so forward the request to the SP after updating
428 	 * its state and runtime model.
429 	 */
430 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
431 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
432 	sp->ec[idx].dir_req_origin_id = src_id;
433 
434 	if (sp->runtime_el == S_EL0) {
435 		spin_unlock(&sp->rt_state_lock);
436 	}
437 
438 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
439 			       handle, cookie, flags, dst_id);
440 }
441 
442 /*******************************************************************************
443  * Handle direct response messages and route to the appropriate destination.
444  ******************************************************************************/
445 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
446 					bool secure_origin,
447 					uint64_t x1,
448 					uint64_t x2,
449 					uint64_t x3,
450 					uint64_t x4,
451 					void *cookie,
452 					void *handle,
453 					uint64_t flags)
454 {
455 	uint16_t dst_id = ffa_endpoint_destination(x1);
456 	struct secure_partition_desc *sp;
457 	unsigned int idx;
458 
459 	/* Check if arg2 has been populated correctly based on message type. */
460 	if (!direct_msg_validate_arg2(x2)) {
461 		return spmc_ffa_error_return(handle,
462 					     FFA_ERROR_INVALID_PARAMETER);
463 	}
464 
465 	/* Check that the response did not originate from the Normal world. */
466 	if (!secure_origin) {
467 		VERBOSE("Direct Response not supported from Normal World.\n");
468 		return spmc_ffa_error_return(handle,
469 					     FFA_ERROR_INVALID_PARAMETER);
470 	}
471 
472 	/*
473 	 * Check that the response is either targeted to the Normal world or the
474 	 * SPMC e.g. a PM response.
475 	 */
476 	if (!direct_msg_validate_dst_id(dst_id)) {
477 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
478 			dst_id);
479 		return spmc_ffa_error_return(handle,
480 					     FFA_ERROR_INVALID_PARAMETER);
481 	}
482 
483 	/* Obtain the SP descriptor and update its runtime state. */
484 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
485 	if (sp == NULL) {
486 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
487 			dst_id);
488 		return spmc_ffa_error_return(handle,
489 					     FFA_ERROR_INVALID_PARAMETER);
490 	}
491 
492 	if (sp->runtime_el == S_EL0) {
493 		spin_lock(&sp->rt_state_lock);
494 	}
495 
496 	/* Sanity check state is being tracked correctly in the SPMC. */
497 	idx = get_ec_index(sp);
498 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
499 
500 	/* Ensure SP execution context was in the right runtime model. */
501 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
502 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
503 			idx, sp->ec[idx].rt_model);
504 		if (sp->runtime_el == S_EL0) {
505 			spin_unlock(&sp->rt_state_lock);
506 		}
507 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
508 	}
509 
510 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
511 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
512 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
513 		if (sp->runtime_el == S_EL0) {
514 			spin_unlock(&sp->rt_state_lock);
515 		}
516 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
517 	}
518 
519 	/* Update the state of the SP execution context. */
520 	sp->ec[idx].rt_state = RT_STATE_WAITING;
521 
522 	/* Clear the ongoing direct request ID. */
523 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
524 
525 	if (sp->runtime_el == S_EL0) {
526 		spin_unlock(&sp->rt_state_lock);
527 	}
528 
529 	/*
530 	 * If the receiver is not the SPMC then forward the response to the
531 	 * Normal world.
532 	 */
533 	if (dst_id == FFA_SPMC_ID) {
534 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
535 		/* Should not get here. */
536 		panic();
537 	}
538 
539 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
540 			       handle, cookie, flags, dst_id);
541 }
542 
543 /*******************************************************************************
544  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
545  * cycles.
546  ******************************************************************************/
547 static uint64_t msg_wait_handler(uint32_t smc_fid,
548 				 bool secure_origin,
549 				 uint64_t x1,
550 				 uint64_t x2,
551 				 uint64_t x3,
552 				 uint64_t x4,
553 				 void *cookie,
554 				 void *handle,
555 				 uint64_t flags)
556 {
557 	struct secure_partition_desc *sp;
558 	unsigned int idx;
559 
560 	/*
561 	 * Check that the response did not originate from the Normal world as
562 	 * only the secure world can call this ABI.
563 	 */
564 	if (!secure_origin) {
565 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
566 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
567 	}
568 
569 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
570 	sp = spmc_get_current_sp_ctx();
571 	if (sp == NULL) {
572 		return spmc_ffa_error_return(handle,
573 					     FFA_ERROR_INVALID_PARAMETER);
574 	}
575 
576 	/*
577 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
578 	 */
579 	idx = get_ec_index(sp);
580 	if (sp->runtime_el == S_EL0) {
581 		spin_lock(&sp->rt_state_lock);
582 	}
583 
584 	/* Ensure SP execution context was in the right runtime model. */
585 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
586 		if (sp->runtime_el == S_EL0) {
587 			spin_unlock(&sp->rt_state_lock);
588 		}
589 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
590 	}
591 
592 	/* Sanity check the state is being tracked correctly in the SPMC. */
593 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
594 
595 	/*
596 	 * Perform a synchronous exit if the partition was initialising. The
597 	 * state is updated after the exit.
598 	 */
599 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
600 		if (sp->runtime_el == S_EL0) {
601 			spin_unlock(&sp->rt_state_lock);
602 		}
603 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
604 		/* Should not get here */
605 		panic();
606 	}
607 
608 	/* Update the state of the SP execution context. */
609 	sp->ec[idx].rt_state = RT_STATE_WAITING;
610 
611 	/* Resume normal world if a secure interrupt was handled. */
612 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
613 		if (sp->runtime_el == S_EL0) {
614 			spin_unlock(&sp->rt_state_lock);
615 		}
616 
617 		return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
618 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
619 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
620 					     handle, flags);
621 	}
622 
623 	/* Protect the runtime state of a S-EL0 SP with a lock. */
624 	if (sp->runtime_el == S_EL0) {
625 		spin_unlock(&sp->rt_state_lock);
626 	}
627 
628 	/* Forward the response to the Normal world. */
629 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
630 			       handle, cookie, flags, FFA_NWD_ID);
631 }
632 
633 static uint64_t ffa_error_handler(uint32_t smc_fid,
634 				 bool secure_origin,
635 				 uint64_t x1,
636 				 uint64_t x2,
637 				 uint64_t x3,
638 				 uint64_t x4,
639 				 void *cookie,
640 				 void *handle,
641 				 uint64_t flags)
642 {
643 	struct secure_partition_desc *sp;
644 	unsigned int idx;
645 
646 	/* Check that the response did not originate from the Normal world. */
647 	if (!secure_origin) {
648 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
649 	}
650 
651 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
652 	sp = spmc_get_current_sp_ctx();
653 	if (sp == NULL) {
654 		return spmc_ffa_error_return(handle,
655 					     FFA_ERROR_INVALID_PARAMETER);
656 	}
657 
658 	/* Get the execution context of the SP that invoked FFA_ERROR. */
659 	idx = get_ec_index(sp);
660 
661 	/*
662 	 * We only expect FFA_ERROR to be received during SP initialisation
663 	 * otherwise this is an invalid call.
664 	 */
665 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
666 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
667 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
668 		/* Should not get here. */
669 		panic();
670 	}
671 
672 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
673 }
674 
675 static uint64_t ffa_version_handler(uint32_t smc_fid,
676 				    bool secure_origin,
677 				    uint64_t x1,
678 				    uint64_t x2,
679 				    uint64_t x3,
680 				    uint64_t x4,
681 				    void *cookie,
682 				    void *handle,
683 				    uint64_t flags)
684 {
685 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
686 
687 	if (requested_version & FFA_VERSION_BIT31_MASK) {
688 		/* Invalid encoding, return an error. */
689 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
690 		/* Execution stops here. */
691 	}
692 
693 	/* Determine the caller to store the requested version. */
694 	if (secure_origin) {
695 		/*
696 		 * Ensure that the SP is reporting the same version as
697 		 * specified in its manifest. If these do not match there is
698 		 * something wrong with the SP.
699 		 * TODO: Should we abort the SP? For now assert this is not
700 		 *       case.
701 		 */
702 		assert(requested_version ==
703 		       spmc_get_current_sp_ctx()->ffa_version);
704 	} else {
705 		/*
706 		 * If this is called by the normal world, record this
707 		 * information in its descriptor.
708 		 */
709 		spmc_get_hyp_ctx()->ffa_version = requested_version;
710 	}
711 
712 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
713 					  FFA_VERSION_MINOR));
714 }
715 
716 /*******************************************************************************
717  * Helper function to obtain the FF-A version of the calling partition.
718  ******************************************************************************/
719 uint32_t get_partition_ffa_version(bool secure_origin)
720 {
721 	if (secure_origin) {
722 		return spmc_get_current_sp_ctx()->ffa_version;
723 	} else {
724 		return spmc_get_hyp_ctx()->ffa_version;
725 	}
726 }
727 
728 static uint64_t rxtx_map_handler(uint32_t smc_fid,
729 				 bool secure_origin,
730 				 uint64_t x1,
731 				 uint64_t x2,
732 				 uint64_t x3,
733 				 uint64_t x4,
734 				 void *cookie,
735 				 void *handle,
736 				 uint64_t flags)
737 {
738 	int ret;
739 	uint32_t error_code;
740 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
741 	struct mailbox *mbox;
742 	uintptr_t tx_address = x1;
743 	uintptr_t rx_address = x2;
744 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
745 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
746 
747 	/*
748 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
749 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
750 	 * ABI on behalf of a VM and reject it if this is the case.
751 	 */
752 	if (tx_address == 0 || rx_address == 0) {
753 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
754 		return spmc_ffa_error_return(handle,
755 					     FFA_ERROR_INVALID_PARAMETER);
756 	}
757 
758 	/* Ensure the specified buffers are not the same. */
759 	if (tx_address == rx_address) {
760 		WARN("TX Buffer must not be the same as RX Buffer.\n");
761 		return spmc_ffa_error_return(handle,
762 					     FFA_ERROR_INVALID_PARAMETER);
763 	}
764 
765 	/* Ensure the buffer size is not 0. */
766 	if (buf_size == 0U) {
767 		WARN("Buffer size must not be 0\n");
768 		return spmc_ffa_error_return(handle,
769 					     FFA_ERROR_INVALID_PARAMETER);
770 	}
771 
772 	/*
773 	 * Ensure the buffer size is a multiple of the translation granule size
774 	 * in TF-A.
775 	 */
776 	if (buf_size % PAGE_SIZE != 0U) {
777 		WARN("Buffer size must be aligned to translation granule.\n");
778 		return spmc_ffa_error_return(handle,
779 					     FFA_ERROR_INVALID_PARAMETER);
780 	}
781 
782 	/* Obtain the RX/TX buffer pair descriptor. */
783 	mbox = spmc_get_mbox_desc(secure_origin);
784 
785 	spin_lock(&mbox->lock);
786 
787 	/* Check if buffers have already been mapped. */
788 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
789 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
790 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
791 		error_code = FFA_ERROR_DENIED;
792 		goto err;
793 	}
794 
795 	/* memmap the TX buffer as read only. */
796 	ret = mmap_add_dynamic_region(tx_address, /* PA */
797 			tx_address, /* VA */
798 			buf_size, /* size */
799 			mem_atts | MT_RO_DATA); /* attrs */
800 	if (ret != 0) {
801 		/* Return the correct error code. */
802 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
803 						FFA_ERROR_INVALID_PARAMETER;
804 		WARN("Unable to map TX buffer: %d\n", error_code);
805 		goto err;
806 	}
807 
808 	/* memmap the RX buffer as read write. */
809 	ret = mmap_add_dynamic_region(rx_address, /* PA */
810 			rx_address, /* VA */
811 			buf_size, /* size */
812 			mem_atts | MT_RW_DATA); /* attrs */
813 
814 	if (ret != 0) {
815 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
816 						FFA_ERROR_INVALID_PARAMETER;
817 		WARN("Unable to map RX buffer: %d\n", error_code);
818 		/* Unmap the TX buffer again. */
819 		mmap_remove_dynamic_region(tx_address, buf_size);
820 		goto err;
821 	}
822 
823 	mbox->tx_buffer = (void *) tx_address;
824 	mbox->rx_buffer = (void *) rx_address;
825 	mbox->rxtx_page_count = page_count;
826 	spin_unlock(&mbox->lock);
827 
828 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
829 	/* Execution stops here. */
830 err:
831 	spin_unlock(&mbox->lock);
832 	return spmc_ffa_error_return(handle, error_code);
833 }
834 
835 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
836 				   bool secure_origin,
837 				   uint64_t x1,
838 				   uint64_t x2,
839 				   uint64_t x3,
840 				   uint64_t x4,
841 				   void *cookie,
842 				   void *handle,
843 				   uint64_t flags)
844 {
845 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
846 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
847 
848 	/*
849 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
850 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
851 	 * ABI on behalf of a VM and reject it if this is the case.
852 	 */
853 	if (x1 != 0UL) {
854 		return spmc_ffa_error_return(handle,
855 					     FFA_ERROR_INVALID_PARAMETER);
856 	}
857 
858 	spin_lock(&mbox->lock);
859 
860 	/* Check if buffers are currently mapped. */
861 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
862 		spin_unlock(&mbox->lock);
863 		return spmc_ffa_error_return(handle,
864 					     FFA_ERROR_INVALID_PARAMETER);
865 	}
866 
867 	/* Unmap RX Buffer */
868 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
869 				       buf_size) != 0) {
870 		WARN("Unable to unmap RX buffer!\n");
871 	}
872 
873 	mbox->rx_buffer = 0;
874 
875 	/* Unmap TX Buffer */
876 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
877 				       buf_size) != 0) {
878 		WARN("Unable to unmap TX buffer!\n");
879 	}
880 
881 	mbox->tx_buffer = 0;
882 	mbox->rxtx_page_count = 0;
883 
884 	spin_unlock(&mbox->lock);
885 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
886 }
887 
888 /*
889  * Helper function to populate the properties field of a Partition Info Get
890  * descriptor.
891  */
892 static uint32_t
893 partition_info_get_populate_properties(uint32_t sp_properties,
894 				       enum sp_execution_state sp_ec_state)
895 {
896 	uint32_t properties = sp_properties;
897 	uint32_t ec_state;
898 
899 	/* Determine the execution state of the SP. */
900 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
901 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
902 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
903 
904 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
905 
906 	return properties;
907 }
908 
909 /*
910  * Collate the partition information in a v1.1 partition information
911  * descriptor format, this will be converter later if required.
912  */
913 static int partition_info_get_handler_v1_1(uint32_t *uuid,
914 					   struct ffa_partition_info_v1_1
915 						  *partitions,
916 					   uint32_t max_partitions,
917 					   uint32_t *partition_count)
918 {
919 	uint32_t index;
920 	struct ffa_partition_info_v1_1 *desc;
921 	bool null_uuid = is_null_uuid(uuid);
922 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
923 
924 	/* Deal with Logical Partitions. */
925 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
926 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
927 			/* Found a matching UUID, populate appropriately. */
928 			if (*partition_count >= max_partitions) {
929 				return FFA_ERROR_NO_MEMORY;
930 			}
931 
932 			desc = &partitions[*partition_count];
933 			desc->ep_id = el3_lp_descs[index].sp_id;
934 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
935 			/* LSPs must be AArch64. */
936 			desc->properties =
937 				partition_info_get_populate_properties(
938 					el3_lp_descs[index].properties,
939 					SP_STATE_AARCH64);
940 
941 			if (null_uuid) {
942 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
943 			}
944 			(*partition_count)++;
945 		}
946 	}
947 
948 	/* Deal with physical SP's. */
949 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
950 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
951 			/* Found a matching UUID, populate appropriately. */
952 			if (*partition_count >= max_partitions) {
953 				return FFA_ERROR_NO_MEMORY;
954 			}
955 
956 			desc = &partitions[*partition_count];
957 			desc->ep_id = sp_desc[index].sp_id;
958 			/*
959 			 * Execution context count must match No. cores for
960 			 * S-EL1 SPs.
961 			 */
962 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
963 			desc->properties =
964 				partition_info_get_populate_properties(
965 					sp_desc[index].properties,
966 					sp_desc[index].execution_state);
967 
968 			if (null_uuid) {
969 				copy_uuid(desc->uuid, sp_desc[index].uuid);
970 			}
971 			(*partition_count)++;
972 		}
973 	}
974 	return 0;
975 }
976 
977 /*
978  * Handle the case where that caller only wants the count of partitions
979  * matching a given UUID and does not want the corresponding descriptors
980  * populated.
981  */
982 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
983 {
984 	uint32_t index = 0;
985 	uint32_t partition_count = 0;
986 	bool null_uuid = is_null_uuid(uuid);
987 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
988 
989 	/* Deal with Logical Partitions. */
990 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
991 		if (null_uuid ||
992 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
993 			(partition_count)++;
994 		}
995 	}
996 
997 	/* Deal with physical SP's. */
998 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
999 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1000 			(partition_count)++;
1001 		}
1002 	}
1003 	return partition_count;
1004 }
1005 
1006 /*
1007  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1008  * the corresponding descriptor format from the v1.1 descriptor array.
1009  */
1010 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1011 					     *partitions,
1012 					     struct mailbox *mbox,
1013 					     int partition_count)
1014 {
1015 	uint32_t index;
1016 	uint32_t buf_size;
1017 	uint32_t descriptor_size;
1018 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1019 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1020 
1021 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1022 	descriptor_size = partition_count *
1023 			  sizeof(struct ffa_partition_info_v1_0);
1024 
1025 	if (descriptor_size > buf_size) {
1026 		return FFA_ERROR_NO_MEMORY;
1027 	}
1028 
1029 	for (index = 0U; index < partition_count; index++) {
1030 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1031 		v1_0_partitions[index].execution_ctx_count =
1032 			partitions[index].execution_ctx_count;
1033 		/* Only report v1.0 properties. */
1034 		v1_0_partitions[index].properties =
1035 			(partitions[index].properties &
1036 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1037 	}
1038 	return 0;
1039 }
1040 
1041 /*
1042  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1043  * v1.0 implementations.
1044  */
1045 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1046 					   bool secure_origin,
1047 					   uint64_t x1,
1048 					   uint64_t x2,
1049 					   uint64_t x3,
1050 					   uint64_t x4,
1051 					   void *cookie,
1052 					   void *handle,
1053 					   uint64_t flags)
1054 {
1055 	int ret;
1056 	uint32_t partition_count = 0;
1057 	uint32_t size = 0;
1058 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1059 	struct mailbox *mbox;
1060 	uint64_t info_get_flags;
1061 	bool count_only;
1062 	uint32_t uuid[4];
1063 
1064 	uuid[0] = x1;
1065 	uuid[1] = x2;
1066 	uuid[2] = x3;
1067 	uuid[3] = x4;
1068 
1069 	/* Determine if the Partition descriptors should be populated. */
1070 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1071 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1072 
1073 	/* Handle the case where we don't need to populate the descriptors. */
1074 	if (count_only) {
1075 		partition_count = partition_info_get_handler_count_only(uuid);
1076 		if (partition_count == 0) {
1077 			return spmc_ffa_error_return(handle,
1078 						FFA_ERROR_INVALID_PARAMETER);
1079 		}
1080 	} else {
1081 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1082 
1083 		/*
1084 		 * Handle the case where the partition descriptors are required,
1085 		 * check we have the buffers available and populate the
1086 		 * appropriate structure version.
1087 		 */
1088 
1089 		/* Obtain the v1.1 format of the descriptors. */
1090 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1091 						      MAX_SP_LP_PARTITIONS,
1092 						      &partition_count);
1093 
1094 		/* Check if an error occurred during discovery. */
1095 		if (ret != 0) {
1096 			goto err;
1097 		}
1098 
1099 		/* If we didn't find any matches the UUID is unknown. */
1100 		if (partition_count == 0) {
1101 			ret = FFA_ERROR_INVALID_PARAMETER;
1102 			goto err;
1103 		}
1104 
1105 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1106 		mbox = spmc_get_mbox_desc(secure_origin);
1107 
1108 		/*
1109 		 * If the caller has not bothered registering its RX/TX pair
1110 		 * then return an error code.
1111 		 */
1112 		spin_lock(&mbox->lock);
1113 		if (mbox->rx_buffer == NULL) {
1114 			ret = FFA_ERROR_BUSY;
1115 			goto err_unlock;
1116 		}
1117 
1118 		/* Ensure the RX buffer is currently free. */
1119 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1120 			ret = FFA_ERROR_BUSY;
1121 			goto err_unlock;
1122 		}
1123 
1124 		/* Zero the RX buffer before populating. */
1125 		(void)memset(mbox->rx_buffer, 0,
1126 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1127 
1128 		/*
1129 		 * Depending on the FF-A version of the requesting partition
1130 		 * we may need to convert to a v1.0 format otherwise we can copy
1131 		 * directly.
1132 		 */
1133 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1134 			ret = partition_info_populate_v1_0(partitions,
1135 							   mbox,
1136 							   partition_count);
1137 			if (ret != 0) {
1138 				goto err_unlock;
1139 			}
1140 		} else {
1141 			uint32_t buf_size = mbox->rxtx_page_count *
1142 					    FFA_PAGE_SIZE;
1143 
1144 			/* Ensure the descriptor will fit in the buffer. */
1145 			size = sizeof(struct ffa_partition_info_v1_1);
1146 			if (partition_count * size  > buf_size) {
1147 				ret = FFA_ERROR_NO_MEMORY;
1148 				goto err_unlock;
1149 			}
1150 			memcpy(mbox->rx_buffer, partitions,
1151 			       partition_count * size);
1152 		}
1153 
1154 		mbox->state = MAILBOX_STATE_FULL;
1155 		spin_unlock(&mbox->lock);
1156 	}
1157 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1158 
1159 err_unlock:
1160 	spin_unlock(&mbox->lock);
1161 err:
1162 	return spmc_ffa_error_return(handle, ret);
1163 }
1164 
1165 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1166 {
1167 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1168 }
1169 
1170 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1171 					      uint32_t input_properties,
1172 					      void *handle)
1173 {
1174 	/*
1175 	 * If we're called by the normal world we don't support any
1176 	 * additional features.
1177 	 */
1178 	if (!secure_origin) {
1179 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1180 			return spmc_ffa_error_return(handle,
1181 						     FFA_ERROR_NOT_SUPPORTED);
1182 		}
1183 
1184 	} else {
1185 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1186 		/*
1187 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1188 		 * call. If v1.0 check and store whether the SP has requested
1189 		 * the use of the NS bit.
1190 		 */
1191 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1192 			if ((input_properties &
1193 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1194 				return spmc_ffa_error_return(handle,
1195 						       FFA_ERROR_NOT_SUPPORTED);
1196 			}
1197 			return ffa_feature_success(handle,
1198 						   FFA_FEATURES_RET_REQ_NS_BIT);
1199 		} else {
1200 			sp->ns_bit_requested = (input_properties &
1201 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1202 					       0U;
1203 		}
1204 		if (sp->ns_bit_requested) {
1205 			return ffa_feature_success(handle,
1206 						   FFA_FEATURES_RET_REQ_NS_BIT);
1207 		}
1208 	}
1209 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1210 }
1211 
1212 static uint64_t ffa_features_handler(uint32_t smc_fid,
1213 				     bool secure_origin,
1214 				     uint64_t x1,
1215 				     uint64_t x2,
1216 				     uint64_t x3,
1217 				     uint64_t x4,
1218 				     void *cookie,
1219 				     void *handle,
1220 				     uint64_t flags)
1221 {
1222 	uint32_t function_id = (uint32_t) x1;
1223 	uint32_t input_properties = (uint32_t) x2;
1224 
1225 	/* Check if a Feature ID was requested. */
1226 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1227 		/* We currently don't support any additional features. */
1228 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1229 	}
1230 
1231 	/*
1232 	 * Handle the cases where we have separate handlers due to additional
1233 	 * properties.
1234 	 */
1235 	switch (function_id) {
1236 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1237 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1238 		return ffa_features_retrieve_request(secure_origin,
1239 						     input_properties,
1240 						     handle);
1241 	}
1242 
1243 	/*
1244 	 * We don't currently support additional input properties for these
1245 	 * other ABIs therefore ensure this value is set to 0.
1246 	 */
1247 	if (input_properties != 0U) {
1248 		return spmc_ffa_error_return(handle,
1249 					     FFA_ERROR_NOT_SUPPORTED);
1250 	}
1251 
1252 	/* Report if any other FF-A ABI is supported. */
1253 	switch (function_id) {
1254 	/* Supported features from both worlds. */
1255 	case FFA_ERROR:
1256 	case FFA_SUCCESS_SMC32:
1257 	case FFA_INTERRUPT:
1258 	case FFA_SPM_ID_GET:
1259 	case FFA_ID_GET:
1260 	case FFA_FEATURES:
1261 	case FFA_VERSION:
1262 	case FFA_RX_RELEASE:
1263 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1264 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1265 	case FFA_PARTITION_INFO_GET:
1266 	case FFA_RXTX_MAP_SMC32:
1267 	case FFA_RXTX_MAP_SMC64:
1268 	case FFA_RXTX_UNMAP:
1269 	case FFA_MEM_FRAG_TX:
1270 	case FFA_MSG_RUN:
1271 
1272 		/*
1273 		 * We are relying on the fact that the other registers
1274 		 * will be set to 0 as these values align with the
1275 		 * currently implemented features of the SPMC. If this
1276 		 * changes this function must be extended to handle
1277 		 * reporting the additional functionality.
1278 		 */
1279 
1280 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1281 		/* Execution stops here. */
1282 
1283 	/* Supported ABIs only from the secure world. */
1284 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1285 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1286 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1287 	case FFA_MEM_RELINQUISH:
1288 	case FFA_MSG_WAIT:
1289 	case FFA_CONSOLE_LOG_SMC32:
1290 	case FFA_CONSOLE_LOG_SMC64:
1291 
1292 		if (!secure_origin) {
1293 			return spmc_ffa_error_return(handle,
1294 				FFA_ERROR_NOT_SUPPORTED);
1295 		}
1296 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1297 		/* Execution stops here. */
1298 
1299 	/* Supported features only from the normal world. */
1300 	case FFA_MEM_SHARE_SMC32:
1301 	case FFA_MEM_SHARE_SMC64:
1302 	case FFA_MEM_LEND_SMC32:
1303 	case FFA_MEM_LEND_SMC64:
1304 	case FFA_MEM_RECLAIM:
1305 	case FFA_MEM_FRAG_RX:
1306 
1307 		if (secure_origin) {
1308 			return spmc_ffa_error_return(handle,
1309 					FFA_ERROR_NOT_SUPPORTED);
1310 		}
1311 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1312 		/* Execution stops here. */
1313 
1314 	default:
1315 		return spmc_ffa_error_return(handle,
1316 					FFA_ERROR_NOT_SUPPORTED);
1317 	}
1318 }
1319 
1320 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1321 				   bool secure_origin,
1322 				   uint64_t x1,
1323 				   uint64_t x2,
1324 				   uint64_t x3,
1325 				   uint64_t x4,
1326 				   void *cookie,
1327 				   void *handle,
1328 				   uint64_t flags)
1329 {
1330 	if (secure_origin) {
1331 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1332 			 spmc_get_current_sp_ctx()->sp_id);
1333 	} else {
1334 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1335 			 spmc_get_hyp_ctx()->ns_ep_id);
1336 	}
1337 }
1338 
1339 /*
1340  * Enable an SP to query the ID assigned to the SPMC.
1341  */
1342 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1343 				       bool secure_origin,
1344 				       uint64_t x1,
1345 				       uint64_t x2,
1346 				       uint64_t x3,
1347 				       uint64_t x4,
1348 				       void *cookie,
1349 				       void *handle,
1350 				       uint64_t flags)
1351 {
1352 	assert(x1 == 0UL);
1353 	assert(x2 == 0UL);
1354 	assert(x3 == 0UL);
1355 	assert(x4 == 0UL);
1356 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1357 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1358 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1359 
1360 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1361 }
1362 
1363 static uint64_t ffa_run_handler(uint32_t smc_fid,
1364 				bool secure_origin,
1365 				uint64_t x1,
1366 				uint64_t x2,
1367 				uint64_t x3,
1368 				uint64_t x4,
1369 				void *cookie,
1370 				void *handle,
1371 				uint64_t flags)
1372 {
1373 	struct secure_partition_desc *sp;
1374 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1375 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1376 	unsigned int idx;
1377 	unsigned int *rt_state;
1378 	unsigned int *rt_model;
1379 
1380 	/* Can only be called from the normal world. */
1381 	if (secure_origin) {
1382 		ERROR("FFA_RUN can only be called from NWd.\n");
1383 		return spmc_ffa_error_return(handle,
1384 					     FFA_ERROR_INVALID_PARAMETER);
1385 	}
1386 
1387 	/* Cannot run a Normal world partition. */
1388 	if (ffa_is_normal_world_id(target_id)) {
1389 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1390 		return spmc_ffa_error_return(handle,
1391 					     FFA_ERROR_INVALID_PARAMETER);
1392 	}
1393 
1394 	/* Check that the target SP exists. */
1395 	sp = spmc_get_sp_ctx(target_id);
1396 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1397 	if (sp == NULL) {
1398 		return spmc_ffa_error_return(handle,
1399 					     FFA_ERROR_INVALID_PARAMETER);
1400 	}
1401 
1402 	idx = get_ec_index(sp);
1403 
1404 	if (idx != vcpu_id) {
1405 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1406 		return spmc_ffa_error_return(handle,
1407 					     FFA_ERROR_INVALID_PARAMETER);
1408 	}
1409 	if (sp->runtime_el == S_EL0) {
1410 		spin_lock(&sp->rt_state_lock);
1411 	}
1412 	rt_state = &((sp->ec[idx]).rt_state);
1413 	rt_model = &((sp->ec[idx]).rt_model);
1414 	if (*rt_state == RT_STATE_RUNNING) {
1415 		if (sp->runtime_el == S_EL0) {
1416 			spin_unlock(&sp->rt_state_lock);
1417 		}
1418 		ERROR("Partition (0x%x) is already running.\n", target_id);
1419 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1420 	}
1421 
1422 	/*
1423 	 * Sanity check that if the execution context was not waiting then it
1424 	 * was either in the direct request or the run partition runtime model.
1425 	 */
1426 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1427 		assert(*rt_model == RT_MODEL_RUN ||
1428 		       *rt_model == RT_MODEL_DIR_REQ);
1429 	}
1430 
1431 	/*
1432 	 * If the context was waiting then update the partition runtime model.
1433 	 */
1434 	if (*rt_state == RT_STATE_WAITING) {
1435 		*rt_model = RT_MODEL_RUN;
1436 	}
1437 
1438 	/*
1439 	 * Forward the request to the correct SP vCPU after updating
1440 	 * its state.
1441 	 */
1442 	*rt_state = RT_STATE_RUNNING;
1443 
1444 	if (sp->runtime_el == S_EL0) {
1445 		spin_unlock(&sp->rt_state_lock);
1446 	}
1447 
1448 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1449 			       handle, cookie, flags, target_id);
1450 }
1451 
1452 static uint64_t rx_release_handler(uint32_t smc_fid,
1453 				   bool secure_origin,
1454 				   uint64_t x1,
1455 				   uint64_t x2,
1456 				   uint64_t x3,
1457 				   uint64_t x4,
1458 				   void *cookie,
1459 				   void *handle,
1460 				   uint64_t flags)
1461 {
1462 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1463 
1464 	spin_lock(&mbox->lock);
1465 
1466 	if (mbox->state != MAILBOX_STATE_FULL) {
1467 		spin_unlock(&mbox->lock);
1468 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1469 	}
1470 
1471 	mbox->state = MAILBOX_STATE_EMPTY;
1472 	spin_unlock(&mbox->lock);
1473 
1474 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1475 }
1476 
1477 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1478 				     bool secure_origin,
1479 				     uint64_t x1,
1480 				     uint64_t x2,
1481 				     uint64_t x3,
1482 				     uint64_t x4,
1483 				     void *cookie,
1484 				     void *handle,
1485 				     uint64_t flags)
1486 {
1487 	/* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1488 	char chars[48] = {0};
1489 	size_t chars_max;
1490 	size_t chars_count = x1;
1491 
1492 	/* Does not support request from Nwd. */
1493 	if (!secure_origin) {
1494 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1495 	}
1496 
1497 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1498 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1499 		uint32_t *registers = (uint32_t *)chars;
1500 		registers[0] = (uint32_t)x2;
1501 		registers[1] = (uint32_t)x3;
1502 		registers[2] = (uint32_t)x4;
1503 		registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1504 		registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1505 		registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1506 		chars_max = 6 * sizeof(uint32_t);
1507 	} else {
1508 		uint64_t *registers = (uint64_t *)chars;
1509 		registers[0] = x2;
1510 		registers[1] = x3;
1511 		registers[2] = x4;
1512 		registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1513 		registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1514 		registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1515 		chars_max = 6 * sizeof(uint64_t);
1516 	}
1517 
1518 	if ((chars_count == 0) || (chars_count > chars_max)) {
1519 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1520 	}
1521 
1522 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1523 		putchar(chars[i]);
1524 	}
1525 
1526 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1527 }
1528 
1529 /*
1530  * Perform initial validation on the provided secondary entry point.
1531  * For now ensure it does not lie within the BL31 Image or the SP's
1532  * RX/TX buffers as these are mapped within EL3.
1533  * TODO: perform validation for additional invalid memory regions.
1534  */
1535 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1536 {
1537 	struct mailbox *mb;
1538 	uintptr_t buffer_size;
1539 	uintptr_t sp_rx_buffer;
1540 	uintptr_t sp_tx_buffer;
1541 	uintptr_t sp_rx_buffer_limit;
1542 	uintptr_t sp_tx_buffer_limit;
1543 
1544 	mb = &sp->mailbox;
1545 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1546 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1547 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1548 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1549 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1550 
1551 	/*
1552 	 * Check if the entry point lies within BL31, or the
1553 	 * SP's RX or TX buffer.
1554 	 */
1555 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1556 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1557 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1558 		return -EINVAL;
1559 	}
1560 	return 0;
1561 }
1562 
1563 /*******************************************************************************
1564  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1565  *  register an entry point for initialization during a secondary cold boot.
1566  ******************************************************************************/
1567 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1568 					    bool secure_origin,
1569 					    uint64_t x1,
1570 					    uint64_t x2,
1571 					    uint64_t x3,
1572 					    uint64_t x4,
1573 					    void *cookie,
1574 					    void *handle,
1575 					    uint64_t flags)
1576 {
1577 	struct secure_partition_desc *sp;
1578 	struct sp_exec_ctx *sp_ctx;
1579 
1580 	/* This request cannot originate from the Normal world. */
1581 	if (!secure_origin) {
1582 		WARN("%s: Can only be called from SWd.\n", __func__);
1583 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1584 	}
1585 
1586 	/* Get the context of the current SP. */
1587 	sp = spmc_get_current_sp_ctx();
1588 	if (sp == NULL) {
1589 		WARN("%s: Cannot find SP context.\n", __func__);
1590 		return spmc_ffa_error_return(handle,
1591 					     FFA_ERROR_INVALID_PARAMETER);
1592 	}
1593 
1594 	/* Only an S-EL1 SP should be invoking this ABI. */
1595 	if (sp->runtime_el != S_EL1) {
1596 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1597 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1598 	}
1599 
1600 	/* Ensure the SP is in its initialization state. */
1601 	sp_ctx = spmc_get_sp_ec(sp);
1602 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1603 		WARN("%s: Can only be called during SP initialization.\n",
1604 		     __func__);
1605 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1606 	}
1607 
1608 	/* Perform initial validation of the secondary entry point. */
1609 	if (validate_secondary_ep(x1, sp)) {
1610 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1611 		     __func__, x1);
1612 		return spmc_ffa_error_return(handle,
1613 					     FFA_ERROR_INVALID_PARAMETER);
1614 	}
1615 
1616 	/*
1617 	 * Update the secondary entrypoint in SP context.
1618 	 * We don't need a lock here as during partition initialization there
1619 	 * will only be a single core online.
1620 	 */
1621 	sp->secondary_ep = x1;
1622 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1623 
1624 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1625 }
1626 
1627 /*******************************************************************************
1628  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1629  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1630  * function converts a permission value from the FF-A format to the mmap_attr_t
1631  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1632  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1633  * ignored by the function xlat_change_mem_attributes_ctx().
1634  ******************************************************************************/
1635 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1636 {
1637 	unsigned int tf_attr = 0U;
1638 	unsigned int access;
1639 
1640 	/* Deal with data access permissions first. */
1641 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1642 
1643 	switch (access) {
1644 	case FFA_MEM_PERM_DATA_RW:
1645 		/* Return 0 if the execute is set with RW. */
1646 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1647 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1648 		}
1649 		break;
1650 
1651 	case FFA_MEM_PERM_DATA_RO:
1652 		tf_attr |= MT_RO | MT_USER;
1653 		/* Deal with the instruction access permissions next. */
1654 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1655 			tf_attr |= MT_EXECUTE;
1656 		} else {
1657 			tf_attr |= MT_EXECUTE_NEVER;
1658 		}
1659 		break;
1660 
1661 	case FFA_MEM_PERM_DATA_NA:
1662 	default:
1663 		return tf_attr;
1664 	}
1665 
1666 	return tf_attr;
1667 }
1668 
1669 /*******************************************************************************
1670  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1671  ******************************************************************************/
1672 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1673 					 bool secure_origin,
1674 					 uint64_t x1,
1675 					 uint64_t x2,
1676 					 uint64_t x3,
1677 					 uint64_t x4,
1678 					 void *cookie,
1679 					 void *handle,
1680 					 uint64_t flags)
1681 {
1682 	struct secure_partition_desc *sp;
1683 	unsigned int idx;
1684 	uintptr_t base_va = (uintptr_t) x1;
1685 	size_t size = (size_t)(x2 * PAGE_SIZE);
1686 	uint32_t tf_attr;
1687 	int ret;
1688 
1689 	/* This request cannot originate from the Normal world. */
1690 	if (!secure_origin) {
1691 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1692 	}
1693 
1694 	if (size == 0) {
1695 		return spmc_ffa_error_return(handle,
1696 					     FFA_ERROR_INVALID_PARAMETER);
1697 	}
1698 
1699 	/* Get the context of the current SP. */
1700 	sp = spmc_get_current_sp_ctx();
1701 	if (sp == NULL) {
1702 		return spmc_ffa_error_return(handle,
1703 					     FFA_ERROR_INVALID_PARAMETER);
1704 	}
1705 
1706 	/* A S-EL1 SP has no business invoking this ABI. */
1707 	if (sp->runtime_el == S_EL1) {
1708 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1709 	}
1710 
1711 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1712 		return spmc_ffa_error_return(handle,
1713 					     FFA_ERROR_INVALID_PARAMETER);
1714 	}
1715 
1716 	/* Get the execution context of the calling SP. */
1717 	idx = get_ec_index(sp);
1718 
1719 	/*
1720 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1721 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1722 	 * and can only be initialising on this cpu.
1723 	 */
1724 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1725 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1726 	}
1727 
1728 	VERBOSE("Setting memory permissions:\n");
1729 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1730 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1731 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1732 
1733 	/* Convert inbound permissions to TF-A permission attributes */
1734 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1735 	if (tf_attr == 0U) {
1736 		return spmc_ffa_error_return(handle,
1737 					     FFA_ERROR_INVALID_PARAMETER);
1738 	}
1739 
1740 	/* Request the change in permissions */
1741 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1742 					     base_va, size, tf_attr);
1743 	if (ret != 0) {
1744 		return spmc_ffa_error_return(handle,
1745 					     FFA_ERROR_INVALID_PARAMETER);
1746 	}
1747 
1748 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1749 }
1750 
1751 /*******************************************************************************
1752  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1753  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1754  * function converts a permission value from the mmap_attr_t format to the FF-A
1755  * format.
1756  ******************************************************************************/
1757 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1758 {
1759 	unsigned int perms = 0U;
1760 	unsigned int data_access;
1761 
1762 	if ((attr & MT_USER) == 0) {
1763 		/* No access from EL0. */
1764 		data_access = FFA_MEM_PERM_DATA_NA;
1765 	} else {
1766 		if ((attr & MT_RW) != 0) {
1767 			data_access = FFA_MEM_PERM_DATA_RW;
1768 		} else {
1769 			data_access = FFA_MEM_PERM_DATA_RO;
1770 		}
1771 	}
1772 
1773 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1774 		<< FFA_MEM_PERM_DATA_SHIFT;
1775 
1776 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1777 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1778 	}
1779 
1780 	return perms;
1781 }
1782 
1783 /*******************************************************************************
1784  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1785  ******************************************************************************/
1786 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1787 					 bool secure_origin,
1788 					 uint64_t x1,
1789 					 uint64_t x2,
1790 					 uint64_t x3,
1791 					 uint64_t x4,
1792 					 void *cookie,
1793 					 void *handle,
1794 					 uint64_t flags)
1795 {
1796 	struct secure_partition_desc *sp;
1797 	unsigned int idx;
1798 	uintptr_t base_va = (uintptr_t)x1;
1799 	uint32_t tf_attr = 0;
1800 	int ret;
1801 
1802 	/* This request cannot originate from the Normal world. */
1803 	if (!secure_origin) {
1804 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1805 	}
1806 
1807 	/* Get the context of the current SP. */
1808 	sp = spmc_get_current_sp_ctx();
1809 	if (sp == NULL) {
1810 		return spmc_ffa_error_return(handle,
1811 					     FFA_ERROR_INVALID_PARAMETER);
1812 	}
1813 
1814 	/* A S-EL1 SP has no business invoking this ABI. */
1815 	if (sp->runtime_el == S_EL1) {
1816 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1817 	}
1818 
1819 	/* Get the execution context of the calling SP. */
1820 	idx = get_ec_index(sp);
1821 
1822 	/*
1823 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1824 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1825 	 * and can only be initialising on this cpu.
1826 	 */
1827 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1828 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1829 	}
1830 
1831 	/* Request the permissions */
1832 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1833 	if (ret != 0) {
1834 		return spmc_ffa_error_return(handle,
1835 					     FFA_ERROR_INVALID_PARAMETER);
1836 	}
1837 
1838 	/* Convert TF-A permission to FF-A permissions attributes. */
1839 	x2 = mmap_perm_to_ffa_perm(tf_attr);
1840 
1841 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1842 }
1843 
1844 /*******************************************************************************
1845  * This function will parse the Secure Partition Manifest. From manifest, it
1846  * will fetch details for preparing Secure partition image context and secure
1847  * partition image boot arguments if any.
1848  ******************************************************************************/
1849 static int sp_manifest_parse(void *sp_manifest, int offset,
1850 			     struct secure_partition_desc *sp,
1851 			     entry_point_info_t *ep_info,
1852 			     int32_t *boot_info_reg)
1853 {
1854 	int32_t ret, node;
1855 	uint32_t config_32;
1856 
1857 	/*
1858 	 * Look for the mandatory fields that are expected to be present in
1859 	 * the SP manifests.
1860 	 */
1861 	node = fdt_path_offset(sp_manifest, "/");
1862 	if (node < 0) {
1863 		ERROR("Did not find root node.\n");
1864 		return node;
1865 	}
1866 
1867 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1868 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1869 	if (ret != 0) {
1870 		ERROR("Missing Secure Partition UUID.\n");
1871 		return ret;
1872 	}
1873 
1874 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1875 	if (ret != 0) {
1876 		ERROR("Missing SP Exception Level information.\n");
1877 		return ret;
1878 	}
1879 
1880 	sp->runtime_el = config_32;
1881 
1882 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1883 	if (ret != 0) {
1884 		ERROR("Missing Secure Partition FF-A Version.\n");
1885 		return ret;
1886 	}
1887 
1888 	sp->ffa_version = config_32;
1889 
1890 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1891 	if (ret != 0) {
1892 		ERROR("Missing Secure Partition Execution State.\n");
1893 		return ret;
1894 	}
1895 
1896 	sp->execution_state = config_32;
1897 
1898 	ret = fdt_read_uint32(sp_manifest, node,
1899 			      "messaging-method", &config_32);
1900 	if (ret != 0) {
1901 		ERROR("Missing Secure Partition messaging method.\n");
1902 		return ret;
1903 	}
1904 
1905 	/* Validate this entry, we currently only support direct messaging. */
1906 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1907 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1908 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1909 		     config_32);
1910 		return -EINVAL;
1911 	}
1912 
1913 	sp->properties = config_32;
1914 
1915 	ret = fdt_read_uint32(sp_manifest, node,
1916 			      "execution-ctx-count", &config_32);
1917 
1918 	if (ret != 0) {
1919 		ERROR("Missing SP Execution Context Count.\n");
1920 		return ret;
1921 	}
1922 
1923 	/*
1924 	 * Ensure this field is set correctly in the manifest however
1925 	 * since this is currently a hardcoded value for S-EL1 partitions
1926 	 * we don't need to save it here, just validate.
1927 	 */
1928 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
1929 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1930 			config_32, PLATFORM_CORE_COUNT);
1931 		return -EINVAL;
1932 	}
1933 
1934 	/*
1935 	 * Look for the optional fields that are expected to be present in
1936 	 * an SP manifest.
1937 	 */
1938 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1939 	if (ret != 0) {
1940 		WARN("Missing Secure Partition ID.\n");
1941 	} else {
1942 		if (!is_ffa_secure_id_valid(config_32)) {
1943 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1944 			      config_32);
1945 			return -EINVAL;
1946 		}
1947 		sp->sp_id = config_32;
1948 	}
1949 
1950 	ret = fdt_read_uint32(sp_manifest, node,
1951 			      "power-management-messages", &config_32);
1952 	if (ret != 0) {
1953 		WARN("Missing Power Management Messages entry.\n");
1954 	} else {
1955 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
1956 			ERROR("Power messages not supported for S-EL0 SP\n");
1957 			return -EINVAL;
1958 		}
1959 
1960 		/*
1961 		 * Ensure only the currently supported power messages have
1962 		 * been requested.
1963 		 */
1964 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1965 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1966 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1967 			ERROR("Requested unsupported PM messages (%x)\n",
1968 			      config_32);
1969 			return -EINVAL;
1970 		}
1971 		sp->pwr_mgmt_msgs = config_32;
1972 	}
1973 
1974 	ret = fdt_read_uint32(sp_manifest, node,
1975 			      "gp-register-num", &config_32);
1976 	if (ret != 0) {
1977 		WARN("Missing boot information register.\n");
1978 	} else {
1979 		/* Check if a register number between 0-3 is specified. */
1980 		if (config_32 < 4) {
1981 			*boot_info_reg = config_32;
1982 		} else {
1983 			WARN("Incorrect boot information register (%u).\n",
1984 			     config_32);
1985 		}
1986 	}
1987 
1988 	return 0;
1989 }
1990 
1991 /*******************************************************************************
1992  * This function gets the Secure Partition Manifest base and maps the manifest
1993  * region.
1994  * Currently only one Secure Partition manifest is considered which is used to
1995  * prepare the context for the single Secure Partition.
1996  ******************************************************************************/
1997 static int find_and_prepare_sp_context(void)
1998 {
1999 	void *sp_manifest;
2000 	uintptr_t manifest_base;
2001 	uintptr_t manifest_base_align;
2002 	entry_point_info_t *next_image_ep_info;
2003 	int32_t ret, boot_info_reg = -1;
2004 	struct secure_partition_desc *sp;
2005 
2006 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2007 	if (next_image_ep_info == NULL) {
2008 		WARN("No Secure Partition image provided by BL2.\n");
2009 		return -ENOENT;
2010 	}
2011 
2012 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2013 	if (sp_manifest == NULL) {
2014 		WARN("Secure Partition manifest absent.\n");
2015 		return -ENOENT;
2016 	}
2017 
2018 	manifest_base = (uintptr_t)sp_manifest;
2019 	manifest_base_align = page_align(manifest_base, DOWN);
2020 
2021 	/*
2022 	 * Map the secure partition manifest region in the EL3 translation
2023 	 * regime.
2024 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2025 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2026 	 * not completely accommodate the secure partition manifest region.
2027 	 */
2028 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2029 				      manifest_base_align,
2030 				      PAGE_SIZE * 2,
2031 				      MT_RO_DATA);
2032 	if (ret != 0) {
2033 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2034 		return ret;
2035 	}
2036 
2037 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2038 					    "arm,ffa-manifest-1.0");
2039 	if (ret < 0) {
2040 		ERROR("Error happened in SP manifest reading.\n");
2041 		return -EINVAL;
2042 	}
2043 
2044 	/*
2045 	 * Store the size of the manifest so that it can be used later to pass
2046 	 * the manifest as boot information later.
2047 	 */
2048 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2049 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2050 	     next_image_ep_info->args.arg1);
2051 
2052 	/*
2053 	 * Select an SP descriptor for initialising the partition's execution
2054 	 * context on the primary CPU.
2055 	 */
2056 	sp = spmc_get_current_sp_ctx();
2057 
2058 #if SPMC_AT_EL3_SEL0_SP
2059 	/* Assign translation tables context. */
2060 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2061 
2062 #endif /* SPMC_AT_EL3_SEL0_SP */
2063 	/* Initialize entry point information for the SP */
2064 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2065 		       SECURE | EP_ST_ENABLE);
2066 
2067 	/* Parse the SP manifest. */
2068 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2069 				&boot_info_reg);
2070 	if (ret != 0) {
2071 		ERROR("Error in Secure Partition manifest parsing.\n");
2072 		return ret;
2073 	}
2074 
2075 	/* Check that the runtime EL in the manifest was correct. */
2076 	if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) {
2077 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
2078 		return -EINVAL;
2079 	}
2080 
2081 	/* Perform any common initialisation. */
2082 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2083 
2084 	/* Perform any initialisation specific to S-EL1 SPs. */
2085 	if (sp->runtime_el == S_EL1) {
2086 		spmc_el1_sp_setup(sp, next_image_ep_info);
2087 	}
2088 
2089 #if SPMC_AT_EL3_SEL0_SP
2090 	/* Setup spsr in endpoint info for common context management routine. */
2091 	if (sp->runtime_el == S_EL0) {
2092 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2093 	}
2094 #endif /* SPMC_AT_EL3_SEL0_SP */
2095 
2096 	/* Initialize the SP context with the required ep info. */
2097 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
2098 
2099 #if SPMC_AT_EL3_SEL0_SP
2100 	/*
2101 	 * Perform any initialisation specific to S-EL0 not set by common
2102 	 * context management routine.
2103 	 */
2104 	if (sp->runtime_el == S_EL0) {
2105 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2106 	}
2107 #endif /* SPMC_AT_EL3_SEL0_SP */
2108 	return 0;
2109 }
2110 
2111 /*******************************************************************************
2112  * This function takes an SP context pointer and performs a synchronous entry
2113  * into it.
2114  ******************************************************************************/
2115 static int32_t logical_sp_init(void)
2116 {
2117 	int32_t rc = 0;
2118 	struct el3_lp_desc *el3_lp_descs;
2119 
2120 	/* Perform initial validation of the Logical Partitions. */
2121 	rc = el3_sp_desc_validate();
2122 	if (rc != 0) {
2123 		ERROR("Logical Partition validation failed!\n");
2124 		return rc;
2125 	}
2126 
2127 	el3_lp_descs = get_el3_lp_array();
2128 
2129 	INFO("Logical Secure Partition init start.\n");
2130 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2131 		rc = el3_lp_descs[i].init();
2132 		if (rc != 0) {
2133 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2134 			      el3_lp_descs[i].sp_id);
2135 			return rc;
2136 		}
2137 		VERBOSE("Logical SP (0x%x) Initialized\n",
2138 			      el3_lp_descs[i].sp_id);
2139 	}
2140 
2141 	INFO("Logical Secure Partition init completed.\n");
2142 
2143 	return rc;
2144 }
2145 
2146 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2147 {
2148 	uint64_t rc;
2149 
2150 	assert(ec != NULL);
2151 
2152 	/* Assign the context of the SP to this CPU */
2153 	cm_set_context(&(ec->cpu_ctx), SECURE);
2154 
2155 	/* Restore the context assigned above */
2156 	cm_el1_sysregs_context_restore(SECURE);
2157 	cm_set_next_eret_context(SECURE);
2158 
2159 	/* Invalidate TLBs at EL1. */
2160 	tlbivmalle1();
2161 	dsbish();
2162 
2163 	/* Enter Secure Partition */
2164 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2165 
2166 	/* Save secure state */
2167 	cm_el1_sysregs_context_save(SECURE);
2168 
2169 	return rc;
2170 }
2171 
2172 /*******************************************************************************
2173  * SPMC Helper Functions.
2174  ******************************************************************************/
2175 static int32_t sp_init(void)
2176 {
2177 	uint64_t rc;
2178 	struct secure_partition_desc *sp;
2179 	struct sp_exec_ctx *ec;
2180 
2181 	sp = spmc_get_current_sp_ctx();
2182 	ec = spmc_get_sp_ec(sp);
2183 	ec->rt_model = RT_MODEL_INIT;
2184 	ec->rt_state = RT_STATE_RUNNING;
2185 
2186 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2187 
2188 	rc = spmc_sp_synchronous_entry(ec);
2189 	if (rc != 0) {
2190 		/* Indicate SP init was not successful. */
2191 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2192 		      sp->sp_id, rc);
2193 		return 0;
2194 	}
2195 
2196 	ec->rt_state = RT_STATE_WAITING;
2197 	INFO("Secure Partition initialized.\n");
2198 
2199 	return 1;
2200 }
2201 
2202 static void initalize_sp_descs(void)
2203 {
2204 	struct secure_partition_desc *sp;
2205 
2206 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2207 		sp = &sp_desc[i];
2208 		sp->sp_id = INV_SP_ID;
2209 		sp->mailbox.rx_buffer = NULL;
2210 		sp->mailbox.tx_buffer = NULL;
2211 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2212 		sp->secondary_ep = 0;
2213 	}
2214 }
2215 
2216 static void initalize_ns_ep_descs(void)
2217 {
2218 	struct ns_endpoint_desc *ns_ep;
2219 
2220 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2221 		ns_ep = &ns_ep_desc[i];
2222 		/*
2223 		 * Clashes with the Hypervisor ID but will not be a
2224 		 * problem in practice.
2225 		 */
2226 		ns_ep->ns_ep_id = 0;
2227 		ns_ep->ffa_version = 0;
2228 		ns_ep->mailbox.rx_buffer = NULL;
2229 		ns_ep->mailbox.tx_buffer = NULL;
2230 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2231 	}
2232 }
2233 
2234 /*******************************************************************************
2235  * Initialize SPMC attributes for the SPMD.
2236  ******************************************************************************/
2237 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2238 {
2239 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2240 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2241 	spmc_attrs->exec_state = MODE_RW_64;
2242 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2243 }
2244 
2245 /*******************************************************************************
2246  * Initialize contexts of all Secure Partitions.
2247  ******************************************************************************/
2248 int32_t spmc_setup(void)
2249 {
2250 	int32_t ret;
2251 	uint32_t flags;
2252 
2253 	/* Initialize endpoint descriptors */
2254 	initalize_sp_descs();
2255 	initalize_ns_ep_descs();
2256 
2257 	/*
2258 	 * Retrieve the information of the datastore for tracking shared memory
2259 	 * requests allocated by platform code and zero the region if available.
2260 	 */
2261 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2262 					    &spmc_shmem_obj_state.data_size);
2263 	if (ret != 0) {
2264 		ERROR("Failed to obtain memory descriptor backing store!\n");
2265 		return ret;
2266 	}
2267 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2268 
2269 	/* Setup logical SPs. */
2270 	ret = logical_sp_init();
2271 	if (ret != 0) {
2272 		ERROR("Failed to initialize Logical Partitions.\n");
2273 		return ret;
2274 	}
2275 
2276 	/* Perform physical SP setup. */
2277 
2278 	/* Disable MMU at EL1 (initialized by BL2) */
2279 	disable_mmu_icache_el1();
2280 
2281 	/* Initialize context of the SP */
2282 	INFO("Secure Partition context setup start.\n");
2283 
2284 	ret = find_and_prepare_sp_context();
2285 	if (ret != 0) {
2286 		ERROR("Error in SP finding and context preparation.\n");
2287 		return ret;
2288 	}
2289 
2290 	/* Register power management hooks with PSCI */
2291 	psci_register_spd_pm_hook(&spmc_pm);
2292 
2293 	/*
2294 	 * Register an interrupt handler for S-EL1 interrupts
2295 	 * when generated during code executing in the
2296 	 * non-secure state.
2297 	 */
2298 	flags = 0;
2299 	set_interrupt_rm_flag(flags, NON_SECURE);
2300 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2301 					      spmc_sp_interrupt_handler,
2302 					      flags);
2303 	if (ret != 0) {
2304 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2305 		panic();
2306 	}
2307 
2308 	/* Register init function for deferred init.  */
2309 	bl31_register_bl32_init(&sp_init);
2310 
2311 	INFO("Secure Partition setup done.\n");
2312 
2313 	return 0;
2314 }
2315 
2316 /*******************************************************************************
2317  * Secure Partition Manager SMC handler.
2318  ******************************************************************************/
2319 uint64_t spmc_smc_handler(uint32_t smc_fid,
2320 			  bool secure_origin,
2321 			  uint64_t x1,
2322 			  uint64_t x2,
2323 			  uint64_t x3,
2324 			  uint64_t x4,
2325 			  void *cookie,
2326 			  void *handle,
2327 			  uint64_t flags)
2328 {
2329 	switch (smc_fid) {
2330 
2331 	case FFA_VERSION:
2332 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2333 					   x4, cookie, handle, flags);
2334 
2335 	case FFA_SPM_ID_GET:
2336 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2337 					     x3, x4, cookie, handle, flags);
2338 
2339 	case FFA_ID_GET:
2340 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2341 					  x4, cookie, handle, flags);
2342 
2343 	case FFA_FEATURES:
2344 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2345 					    x4, cookie, handle, flags);
2346 
2347 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2348 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2349 						   x2, x3, x4, cookie, handle,
2350 						   flags);
2351 
2352 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2353 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2354 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2355 					      x3, x4, cookie, handle, flags);
2356 
2357 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2358 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2359 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2360 					       x3, x4, cookie, handle, flags);
2361 
2362 	case FFA_RXTX_MAP_SMC32:
2363 	case FFA_RXTX_MAP_SMC64:
2364 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2365 					cookie, handle, flags);
2366 
2367 	case FFA_RXTX_UNMAP:
2368 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2369 					  x4, cookie, handle, flags);
2370 
2371 	case FFA_PARTITION_INFO_GET:
2372 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2373 						  x2, x3, x4, cookie, handle,
2374 						  flags);
2375 
2376 	case FFA_RX_RELEASE:
2377 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2378 					  x4, cookie, handle, flags);
2379 
2380 	case FFA_MSG_WAIT:
2381 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2382 					cookie, handle, flags);
2383 
2384 	case FFA_ERROR:
2385 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2386 					cookie, handle, flags);
2387 
2388 	case FFA_MSG_RUN:
2389 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2390 				       cookie, handle, flags);
2391 
2392 	case FFA_MEM_SHARE_SMC32:
2393 	case FFA_MEM_SHARE_SMC64:
2394 	case FFA_MEM_LEND_SMC32:
2395 	case FFA_MEM_LEND_SMC64:
2396 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2397 					 cookie, handle, flags);
2398 
2399 	case FFA_MEM_FRAG_TX:
2400 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2401 					    x4, cookie, handle, flags);
2402 
2403 	case FFA_MEM_FRAG_RX:
2404 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2405 					    x4, cookie, handle, flags);
2406 
2407 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2408 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2409 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2410 						 x3, x4, cookie, handle, flags);
2411 
2412 	case FFA_MEM_RELINQUISH:
2413 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2414 					       x3, x4, cookie, handle, flags);
2415 
2416 	case FFA_MEM_RECLAIM:
2417 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2418 						x4, cookie, handle, flags);
2419 	case FFA_CONSOLE_LOG_SMC32:
2420 	case FFA_CONSOLE_LOG_SMC64:
2421 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2422 						x4, cookie, handle, flags);
2423 
2424 	case FFA_MEM_PERM_GET:
2425 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2426 						x3, x4, cookie, handle, flags);
2427 
2428 	case FFA_MEM_PERM_SET:
2429 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2430 						x3, x4, cookie, handle, flags);
2431 
2432 	default:
2433 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2434 		break;
2435 	}
2436 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2437 }
2438 
2439 /*******************************************************************************
2440  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2441  * validates the interrupt and upon success arranges entry into the SP for
2442  * handling the interrupt.
2443  ******************************************************************************/
2444 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2445 					  uint32_t flags,
2446 					  void *handle,
2447 					  void *cookie)
2448 {
2449 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2450 	struct sp_exec_ctx *ec;
2451 	uint32_t linear_id = plat_my_core_pos();
2452 
2453 	/* Sanity check for a NULL pointer dereference. */
2454 	assert(sp != NULL);
2455 
2456 	/* Check the security state when the exception was generated. */
2457 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2458 
2459 	/* Panic if not an S-EL1 Partition. */
2460 	if (sp->runtime_el != S_EL1) {
2461 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2462 		      linear_id);
2463 		panic();
2464 	}
2465 
2466 	/* Obtain a reference to the SP execution context. */
2467 	ec = spmc_get_sp_ec(sp);
2468 
2469 	/* Ensure that the execution context is in waiting state else panic. */
2470 	if (ec->rt_state != RT_STATE_WAITING) {
2471 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2472 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2473 		panic();
2474 	}
2475 
2476 	/* Update the runtime model and state of the partition. */
2477 	ec->rt_model = RT_MODEL_INTR;
2478 	ec->rt_state = RT_STATE_RUNNING;
2479 
2480 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2481 
2482 	/*
2483 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2484 	 * populated as the SP can determine this by itself.
2485 	 * The flags field is forced to 0 mainly to pass the SVE hint bit
2486 	 * cleared for consumption by the lower EL.
2487 	 */
2488 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2489 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2490 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2491 				     handle, 0ULL);
2492 }
2493