xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 68bb3e836e93b271f9f1c05787025dd3f04dd788)
1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30 
31 #include <platform_def.h>
32 
33 /* FFA_MEM_PERM_* helpers */
34 #define FFA_MEM_PERM_MASK		U(7)
35 #define FFA_MEM_PERM_DATA_MASK		U(3)
36 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
37 #define FFA_MEM_PERM_DATA_NA		U(0)
38 #define FFA_MEM_PERM_DATA_RW		U(1)
39 #define FFA_MEM_PERM_DATA_RES		U(2)
40 #define FFA_MEM_PERM_DATA_RO		U(3)
41 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
42 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
43 
44 /* Declare the maximum number of SPs and El3 LPs. */
45 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
46 
47 /*
48  * Allocate a secure partition descriptor to describe each SP in the system that
49  * does not reside at EL3.
50  */
51 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
52 
53 /*
54  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
55  * the system that interacts with a SP. It is used to track the Hypervisor
56  * buffer pair, version and ID for now. It could be extended to track VM
57  * properties when the SPMC supports indirect messaging.
58  */
59 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
60 
61 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
62 					  uint32_t flags,
63 					  void *handle,
64 					  void *cookie);
65 
66 /*
67  * Helper function to obtain the array storing the EL3
68  * Logical Partition descriptors.
69  */
70 struct el3_lp_desc *get_el3_lp_array(void)
71 {
72 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
73 }
74 
75 /*
76  * Helper function to obtain the descriptor of the last SP to whom control was
77  * handed to on this physical cpu. Currently, we assume there is only one SP.
78  * TODO: Expand to track multiple partitions when required.
79  */
80 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
81 {
82 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
83 }
84 
85 /*
86  * Helper function to obtain the execution context of an SP on the
87  * current physical cpu.
88  */
89 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
90 {
91 	return &(sp->ec[get_ec_index(sp)]);
92 }
93 
94 /* Helper function to get pointer to SP context from its ID. */
95 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
96 {
97 	/* Check for Secure World Partitions. */
98 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
99 		if (sp_desc[i].sp_id == id) {
100 			return &(sp_desc[i]);
101 		}
102 	}
103 	return NULL;
104 }
105 
106 /*
107  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
108  * We assume that the first descriptor is reserved for this entity.
109  */
110 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
111 {
112 	return &(ns_ep_desc[0]);
113 }
114 
115 /*
116  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
117  * or OS kernel in the normal world or the last SP that was run.
118  */
119 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
120 {
121 	/* Obtain the RX/TX buffer pair descriptor. */
122 	if (secure_origin) {
123 		return &(spmc_get_current_sp_ctx()->mailbox);
124 	} else {
125 		return &(spmc_get_hyp_ctx()->mailbox);
126 	}
127 }
128 
129 /******************************************************************************
130  * This function returns to the place where spmc_sp_synchronous_entry() was
131  * called originally.
132  ******************************************************************************/
133 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
134 {
135 	/*
136 	 * The SPM must have initiated the original request through a
137 	 * synchronous entry into the secure partition. Jump back to the
138 	 * original C runtime context with the value of rc in x0;
139 	 */
140 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
141 
142 	panic();
143 }
144 
145 /*******************************************************************************
146  * Return FFA_ERROR with specified error code.
147  ******************************************************************************/
148 uint64_t spmc_ffa_error_return(void *handle, int error_code)
149 {
150 	SMC_RET8(handle, FFA_ERROR,
151 		 FFA_TARGET_INFO_MBZ, error_code,
152 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
154 }
155 
156 /******************************************************************************
157  * Helper function to validate a secure partition ID to ensure it does not
158  * conflict with any other FF-A component and follows the convention to
159  * indicate it resides within the secure world.
160  ******************************************************************************/
161 bool is_ffa_secure_id_valid(uint16_t partition_id)
162 {
163 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
164 
165 	/* Ensure the ID is not the invalid partition ID. */
166 	if (partition_id == INV_SP_ID) {
167 		return false;
168 	}
169 
170 	/* Ensure the ID is not the SPMD ID. */
171 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
172 		return false;
173 	}
174 
175 	/*
176 	 * Ensure the ID follows the convention to indicate it resides
177 	 * in the secure world.
178 	 */
179 	if (!ffa_is_secure_world_id(partition_id)) {
180 		return false;
181 	}
182 
183 	/* Ensure we don't conflict with the SPMC partition ID. */
184 	if (partition_id == FFA_SPMC_ID) {
185 		return false;
186 	}
187 
188 	/* Ensure we do not already have an SP context with this ID. */
189 	if (spmc_get_sp_ctx(partition_id)) {
190 		return false;
191 	}
192 
193 	/* Ensure we don't clash with any Logical SP's. */
194 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
195 		if (el3_lp_descs[i].sp_id == partition_id) {
196 			return false;
197 		}
198 	}
199 
200 	return true;
201 }
202 
203 /*******************************************************************************
204  * This function either forwards the request to the other world or returns
205  * with an ERET depending on the source of the call.
206  * We can assume that the destination is for an entity at a lower exception
207  * level as any messages destined for a logical SP resident in EL3 will have
208  * already been taken care of by the SPMC before entering this function.
209  ******************************************************************************/
210 static uint64_t spmc_smc_return(uint32_t smc_fid,
211 				bool secure_origin,
212 				uint64_t x1,
213 				uint64_t x2,
214 				uint64_t x3,
215 				uint64_t x4,
216 				void *handle,
217 				void *cookie,
218 				uint64_t flags,
219 				uint16_t dst_id)
220 {
221 	/* If the destination is in the normal world always go via the SPMD. */
222 	if (ffa_is_normal_world_id(dst_id)) {
223 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
224 					cookie, handle, flags);
225 	}
226 	/*
227 	 * If the caller is secure and we want to return to the secure world,
228 	 * ERET directly.
229 	 */
230 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
231 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
232 	}
233 	/* If we originated in the normal world then switch contexts. */
234 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
235 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
236 					     x3, x4, handle);
237 	} else {
238 		/* Unknown State. */
239 		panic();
240 	}
241 
242 	/* Shouldn't be Reached. */
243 	return 0;
244 }
245 
246 /*******************************************************************************
247  * FF-A ABI Handlers.
248  ******************************************************************************/
249 
250 /*******************************************************************************
251  * Helper function to validate arg2 as part of a direct message.
252  ******************************************************************************/
253 static inline bool direct_msg_validate_arg2(uint64_t x2)
254 {
255 	/* Check message type. */
256 	if (x2 & FFA_FWK_MSG_BIT) {
257 		/* We have a framework message, ensure it is a known message. */
258 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
259 			VERBOSE("Invalid message format 0x%lx.\n", x2);
260 			return false;
261 		}
262 	} else {
263 		/* We have a partition messages, ensure x2 is not set. */
264 		if (x2 != (uint64_t) 0) {
265 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
266 				x2);
267 			return false;
268 		}
269 	}
270 	return true;
271 }
272 
273 /*******************************************************************************
274  * Helper function to validate the destination ID of a direct response.
275  ******************************************************************************/
276 static bool direct_msg_validate_dst_id(uint16_t dst_id)
277 {
278 	struct secure_partition_desc *sp;
279 
280 	/* Check if we're targeting a normal world partition. */
281 	if (ffa_is_normal_world_id(dst_id)) {
282 		return true;
283 	}
284 
285 	/* Or directed to the SPMC itself.*/
286 	if (dst_id == FFA_SPMC_ID) {
287 		return true;
288 	}
289 
290 	/* Otherwise ensure the SP exists. */
291 	sp = spmc_get_sp_ctx(dst_id);
292 	if (sp != NULL) {
293 		return true;
294 	}
295 
296 	return false;
297 }
298 
299 /*******************************************************************************
300  * Helper function to validate the response from a Logical Partition.
301  ******************************************************************************/
302 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
303 					void *handle)
304 {
305 	/* Retrieve populated Direct Response Arguments. */
306 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
307 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
308 	uint16_t src_id = ffa_endpoint_source(x1);
309 	uint16_t dst_id = ffa_endpoint_destination(x1);
310 
311 	if (src_id != lp_id) {
312 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
313 		return false;
314 	}
315 
316 	/*
317 	 * Check the destination ID is valid and ensure the LP is responding to
318 	 * the original request.
319 	 */
320 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
321 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
322 		return false;
323 	}
324 
325 	if (!direct_msg_validate_arg2(x2)) {
326 		ERROR("Invalid EL3 LP message encoding.\n");
327 		return false;
328 	}
329 	return true;
330 }
331 
332 /*******************************************************************************
333  * Handle direct request messages and route to the appropriate destination.
334  ******************************************************************************/
335 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
336 				       bool secure_origin,
337 				       uint64_t x1,
338 				       uint64_t x2,
339 				       uint64_t x3,
340 				       uint64_t x4,
341 				       void *cookie,
342 				       void *handle,
343 				       uint64_t flags)
344 {
345 	uint16_t src_id = ffa_endpoint_source(x1);
346 	uint16_t dst_id = ffa_endpoint_destination(x1);
347 	struct el3_lp_desc *el3_lp_descs;
348 	struct secure_partition_desc *sp;
349 	unsigned int idx;
350 
351 	/* Check if arg2 has been populated correctly based on message type. */
352 	if (!direct_msg_validate_arg2(x2)) {
353 		return spmc_ffa_error_return(handle,
354 					     FFA_ERROR_INVALID_PARAMETER);
355 	}
356 
357 	/* Validate Sender is either the current SP or from the normal world. */
358 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
359 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
360 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
361 		return spmc_ffa_error_return(handle,
362 					FFA_ERROR_INVALID_PARAMETER);
363 	}
364 
365 	el3_lp_descs = get_el3_lp_array();
366 
367 	/* Check if the request is destined for a Logical Partition. */
368 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
369 		if (el3_lp_descs[i].sp_id == dst_id) {
370 			uint64_t ret = el3_lp_descs[i].direct_req(
371 						smc_fid, secure_origin, x1, x2,
372 						x3, x4, cookie, handle, flags);
373 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
374 							 handle)) {
375 				panic();
376 			}
377 
378 			/* Message checks out. */
379 			return ret;
380 		}
381 	}
382 
383 	/*
384 	 * If the request was not targeted to a LSP and from the secure world
385 	 * then it is invalid since a SP cannot call into the Normal world and
386 	 * there is no other SP to call into. If there are other SPs in future
387 	 * then the partition runtime model would need to be validated as well.
388 	 */
389 	if (secure_origin) {
390 		VERBOSE("Direct request not supported to the Normal World.\n");
391 		return spmc_ffa_error_return(handle,
392 					     FFA_ERROR_INVALID_PARAMETER);
393 	}
394 
395 	/* Check if the SP ID is valid. */
396 	sp = spmc_get_sp_ctx(dst_id);
397 	if (sp == NULL) {
398 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
399 			dst_id);
400 		return spmc_ffa_error_return(handle,
401 					     FFA_ERROR_INVALID_PARAMETER);
402 	}
403 
404 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
405 	if (sp->runtime_el == S_EL0) {
406 		spin_lock(&sp->rt_state_lock);
407 	}
408 
409 	/*
410 	 * Check that the target execution context is in a waiting state before
411 	 * forwarding the direct request to it.
412 	 */
413 	idx = get_ec_index(sp);
414 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
415 		VERBOSE("SP context on core%u is not waiting (%u).\n",
416 			idx, sp->ec[idx].rt_model);
417 
418 		if (sp->runtime_el == S_EL0) {
419 			spin_unlock(&sp->rt_state_lock);
420 		}
421 
422 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
423 	}
424 
425 	/*
426 	 * Everything checks out so forward the request to the SP after updating
427 	 * its state and runtime model.
428 	 */
429 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
430 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
431 	sp->ec[idx].dir_req_origin_id = src_id;
432 
433 	if (sp->runtime_el == S_EL0) {
434 		spin_unlock(&sp->rt_state_lock);
435 	}
436 
437 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
438 			       handle, cookie, flags, dst_id);
439 }
440 
441 /*******************************************************************************
442  * Handle direct response messages and route to the appropriate destination.
443  ******************************************************************************/
444 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
445 					bool secure_origin,
446 					uint64_t x1,
447 					uint64_t x2,
448 					uint64_t x3,
449 					uint64_t x4,
450 					void *cookie,
451 					void *handle,
452 					uint64_t flags)
453 {
454 	uint16_t dst_id = ffa_endpoint_destination(x1);
455 	struct secure_partition_desc *sp;
456 	unsigned int idx;
457 
458 	/* Check if arg2 has been populated correctly based on message type. */
459 	if (!direct_msg_validate_arg2(x2)) {
460 		return spmc_ffa_error_return(handle,
461 					     FFA_ERROR_INVALID_PARAMETER);
462 	}
463 
464 	/* Check that the response did not originate from the Normal world. */
465 	if (!secure_origin) {
466 		VERBOSE("Direct Response not supported from Normal World.\n");
467 		return spmc_ffa_error_return(handle,
468 					     FFA_ERROR_INVALID_PARAMETER);
469 	}
470 
471 	/*
472 	 * Check that the response is either targeted to the Normal world or the
473 	 * SPMC e.g. a PM response.
474 	 */
475 	if (!direct_msg_validate_dst_id(dst_id)) {
476 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
477 			dst_id);
478 		return spmc_ffa_error_return(handle,
479 					     FFA_ERROR_INVALID_PARAMETER);
480 	}
481 
482 	/* Obtain the SP descriptor and update its runtime state. */
483 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
484 	if (sp == NULL) {
485 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
486 			dst_id);
487 		return spmc_ffa_error_return(handle,
488 					     FFA_ERROR_INVALID_PARAMETER);
489 	}
490 
491 	if (sp->runtime_el == S_EL0) {
492 		spin_lock(&sp->rt_state_lock);
493 	}
494 
495 	/* Sanity check state is being tracked correctly in the SPMC. */
496 	idx = get_ec_index(sp);
497 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
498 
499 	/* Ensure SP execution context was in the right runtime model. */
500 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
501 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
502 			idx, sp->ec[idx].rt_model);
503 		if (sp->runtime_el == S_EL0) {
504 			spin_unlock(&sp->rt_state_lock);
505 		}
506 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
507 	}
508 
509 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
510 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
511 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
512 		if (sp->runtime_el == S_EL0) {
513 			spin_unlock(&sp->rt_state_lock);
514 		}
515 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
516 	}
517 
518 	/* Update the state of the SP execution context. */
519 	sp->ec[idx].rt_state = RT_STATE_WAITING;
520 
521 	/* Clear the ongoing direct request ID. */
522 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
523 
524 	if (sp->runtime_el == S_EL0) {
525 		spin_unlock(&sp->rt_state_lock);
526 	}
527 
528 	/*
529 	 * If the receiver is not the SPMC then forward the response to the
530 	 * Normal world.
531 	 */
532 	if (dst_id == FFA_SPMC_ID) {
533 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
534 		/* Should not get here. */
535 		panic();
536 	}
537 
538 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
539 			       handle, cookie, flags, dst_id);
540 }
541 
542 /*******************************************************************************
543  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
544  * cycles.
545  ******************************************************************************/
546 static uint64_t msg_wait_handler(uint32_t smc_fid,
547 				 bool secure_origin,
548 				 uint64_t x1,
549 				 uint64_t x2,
550 				 uint64_t x3,
551 				 uint64_t x4,
552 				 void *cookie,
553 				 void *handle,
554 				 uint64_t flags)
555 {
556 	struct secure_partition_desc *sp;
557 	unsigned int idx;
558 
559 	/*
560 	 * Check that the response did not originate from the Normal world as
561 	 * only the secure world can call this ABI.
562 	 */
563 	if (!secure_origin) {
564 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
565 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
566 	}
567 
568 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
569 	sp = spmc_get_current_sp_ctx();
570 	if (sp == NULL) {
571 		return spmc_ffa_error_return(handle,
572 					     FFA_ERROR_INVALID_PARAMETER);
573 	}
574 
575 	/*
576 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
577 	 */
578 	idx = get_ec_index(sp);
579 	if (sp->runtime_el == S_EL0) {
580 		spin_lock(&sp->rt_state_lock);
581 	}
582 
583 	/* Ensure SP execution context was in the right runtime model. */
584 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
585 		if (sp->runtime_el == S_EL0) {
586 			spin_unlock(&sp->rt_state_lock);
587 		}
588 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
589 	}
590 
591 	/* Sanity check the state is being tracked correctly in the SPMC. */
592 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
593 
594 	/*
595 	 * Perform a synchronous exit if the partition was initialising. The
596 	 * state is updated after the exit.
597 	 */
598 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
599 		if (sp->runtime_el == S_EL0) {
600 			spin_unlock(&sp->rt_state_lock);
601 		}
602 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
603 		/* Should not get here */
604 		panic();
605 	}
606 
607 	/* Update the state of the SP execution context. */
608 	sp->ec[idx].rt_state = RT_STATE_WAITING;
609 
610 	/* Resume normal world if a secure interrupt was handled. */
611 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
612 		/* FFA_MSG_WAIT can only be called from the secure world. */
613 		unsigned int secure_state_in = SECURE;
614 		unsigned int secure_state_out = NON_SECURE;
615 
616 		cm_el1_sysregs_context_save(secure_state_in);
617 		cm_el1_sysregs_context_restore(secure_state_out);
618 		cm_set_next_eret_context(secure_state_out);
619 
620 		if (sp->runtime_el == S_EL0) {
621 			spin_unlock(&sp->rt_state_lock);
622 		}
623 
624 		SMC_RET0(cm_get_context(secure_state_out));
625 	}
626 
627 	/* Protect the runtime state of a S-EL0 SP with a lock. */
628 	if (sp->runtime_el == S_EL0) {
629 		spin_unlock(&sp->rt_state_lock);
630 	}
631 
632 	/* Forward the response to the Normal world. */
633 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
634 			       handle, cookie, flags, FFA_NWD_ID);
635 }
636 
637 static uint64_t ffa_error_handler(uint32_t smc_fid,
638 				 bool secure_origin,
639 				 uint64_t x1,
640 				 uint64_t x2,
641 				 uint64_t x3,
642 				 uint64_t x4,
643 				 void *cookie,
644 				 void *handle,
645 				 uint64_t flags)
646 {
647 	struct secure_partition_desc *sp;
648 	unsigned int idx;
649 
650 	/* Check that the response did not originate from the Normal world. */
651 	if (!secure_origin) {
652 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
653 	}
654 
655 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
656 	sp = spmc_get_current_sp_ctx();
657 	if (sp == NULL) {
658 		return spmc_ffa_error_return(handle,
659 					     FFA_ERROR_INVALID_PARAMETER);
660 	}
661 
662 	/* Get the execution context of the SP that invoked FFA_ERROR. */
663 	idx = get_ec_index(sp);
664 
665 	/*
666 	 * We only expect FFA_ERROR to be received during SP initialisation
667 	 * otherwise this is an invalid call.
668 	 */
669 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
670 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
671 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
672 		/* Should not get here. */
673 		panic();
674 	}
675 
676 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
677 }
678 
679 static uint64_t ffa_version_handler(uint32_t smc_fid,
680 				    bool secure_origin,
681 				    uint64_t x1,
682 				    uint64_t x2,
683 				    uint64_t x3,
684 				    uint64_t x4,
685 				    void *cookie,
686 				    void *handle,
687 				    uint64_t flags)
688 {
689 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
690 
691 	if (requested_version & FFA_VERSION_BIT31_MASK) {
692 		/* Invalid encoding, return an error. */
693 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
694 		/* Execution stops here. */
695 	}
696 
697 	/* Determine the caller to store the requested version. */
698 	if (secure_origin) {
699 		/*
700 		 * Ensure that the SP is reporting the same version as
701 		 * specified in its manifest. If these do not match there is
702 		 * something wrong with the SP.
703 		 * TODO: Should we abort the SP? For now assert this is not
704 		 *       case.
705 		 */
706 		assert(requested_version ==
707 		       spmc_get_current_sp_ctx()->ffa_version);
708 	} else {
709 		/*
710 		 * If this is called by the normal world, record this
711 		 * information in its descriptor.
712 		 */
713 		spmc_get_hyp_ctx()->ffa_version = requested_version;
714 	}
715 
716 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
717 					  FFA_VERSION_MINOR));
718 }
719 
720 /*******************************************************************************
721  * Helper function to obtain the FF-A version of the calling partition.
722  ******************************************************************************/
723 uint32_t get_partition_ffa_version(bool secure_origin)
724 {
725 	if (secure_origin) {
726 		return spmc_get_current_sp_ctx()->ffa_version;
727 	} else {
728 		return spmc_get_hyp_ctx()->ffa_version;
729 	}
730 }
731 
732 static uint64_t rxtx_map_handler(uint32_t smc_fid,
733 				 bool secure_origin,
734 				 uint64_t x1,
735 				 uint64_t x2,
736 				 uint64_t x3,
737 				 uint64_t x4,
738 				 void *cookie,
739 				 void *handle,
740 				 uint64_t flags)
741 {
742 	int ret;
743 	uint32_t error_code;
744 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
745 	struct mailbox *mbox;
746 	uintptr_t tx_address = x1;
747 	uintptr_t rx_address = x2;
748 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
749 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
750 
751 	/*
752 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
753 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
754 	 * ABI on behalf of a VM and reject it if this is the case.
755 	 */
756 	if (tx_address == 0 || rx_address == 0) {
757 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
758 		return spmc_ffa_error_return(handle,
759 					     FFA_ERROR_INVALID_PARAMETER);
760 	}
761 
762 	/* Ensure the specified buffers are not the same. */
763 	if (tx_address == rx_address) {
764 		WARN("TX Buffer must not be the same as RX Buffer.\n");
765 		return spmc_ffa_error_return(handle,
766 					     FFA_ERROR_INVALID_PARAMETER);
767 	}
768 
769 	/* Ensure the buffer size is not 0. */
770 	if (buf_size == 0U) {
771 		WARN("Buffer size must not be 0\n");
772 		return spmc_ffa_error_return(handle,
773 					     FFA_ERROR_INVALID_PARAMETER);
774 	}
775 
776 	/*
777 	 * Ensure the buffer size is a multiple of the translation granule size
778 	 * in TF-A.
779 	 */
780 	if (buf_size % PAGE_SIZE != 0U) {
781 		WARN("Buffer size must be aligned to translation granule.\n");
782 		return spmc_ffa_error_return(handle,
783 					     FFA_ERROR_INVALID_PARAMETER);
784 	}
785 
786 	/* Obtain the RX/TX buffer pair descriptor. */
787 	mbox = spmc_get_mbox_desc(secure_origin);
788 
789 	spin_lock(&mbox->lock);
790 
791 	/* Check if buffers have already been mapped. */
792 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
793 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
794 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
795 		error_code = FFA_ERROR_DENIED;
796 		goto err;
797 	}
798 
799 	/* memmap the TX buffer as read only. */
800 	ret = mmap_add_dynamic_region(tx_address, /* PA */
801 			tx_address, /* VA */
802 			buf_size, /* size */
803 			mem_atts | MT_RO_DATA); /* attrs */
804 	if (ret != 0) {
805 		/* Return the correct error code. */
806 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
807 						FFA_ERROR_INVALID_PARAMETER;
808 		WARN("Unable to map TX buffer: %d\n", error_code);
809 		goto err;
810 	}
811 
812 	/* memmap the RX buffer as read write. */
813 	ret = mmap_add_dynamic_region(rx_address, /* PA */
814 			rx_address, /* VA */
815 			buf_size, /* size */
816 			mem_atts | MT_RW_DATA); /* attrs */
817 
818 	if (ret != 0) {
819 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
820 						FFA_ERROR_INVALID_PARAMETER;
821 		WARN("Unable to map RX buffer: %d\n", error_code);
822 		/* Unmap the TX buffer again. */
823 		mmap_remove_dynamic_region(tx_address, buf_size);
824 		goto err;
825 	}
826 
827 	mbox->tx_buffer = (void *) tx_address;
828 	mbox->rx_buffer = (void *) rx_address;
829 	mbox->rxtx_page_count = page_count;
830 	spin_unlock(&mbox->lock);
831 
832 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
833 	/* Execution stops here. */
834 err:
835 	spin_unlock(&mbox->lock);
836 	return spmc_ffa_error_return(handle, error_code);
837 }
838 
839 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
840 				   bool secure_origin,
841 				   uint64_t x1,
842 				   uint64_t x2,
843 				   uint64_t x3,
844 				   uint64_t x4,
845 				   void *cookie,
846 				   void *handle,
847 				   uint64_t flags)
848 {
849 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
850 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
851 
852 	/*
853 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
854 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
855 	 * ABI on behalf of a VM and reject it if this is the case.
856 	 */
857 	if (x1 != 0UL) {
858 		return spmc_ffa_error_return(handle,
859 					     FFA_ERROR_INVALID_PARAMETER);
860 	}
861 
862 	spin_lock(&mbox->lock);
863 
864 	/* Check if buffers are currently mapped. */
865 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
866 		spin_unlock(&mbox->lock);
867 		return spmc_ffa_error_return(handle,
868 					     FFA_ERROR_INVALID_PARAMETER);
869 	}
870 
871 	/* Unmap RX Buffer */
872 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
873 				       buf_size) != 0) {
874 		WARN("Unable to unmap RX buffer!\n");
875 	}
876 
877 	mbox->rx_buffer = 0;
878 
879 	/* Unmap TX Buffer */
880 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
881 				       buf_size) != 0) {
882 		WARN("Unable to unmap TX buffer!\n");
883 	}
884 
885 	mbox->tx_buffer = 0;
886 	mbox->rxtx_page_count = 0;
887 
888 	spin_unlock(&mbox->lock);
889 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
890 }
891 
892 /*
893  * Helper function to populate the properties field of a Partition Info Get
894  * descriptor.
895  */
896 static uint32_t
897 partition_info_get_populate_properties(uint32_t sp_properties,
898 				       enum sp_execution_state sp_ec_state)
899 {
900 	uint32_t properties = sp_properties;
901 	uint32_t ec_state;
902 
903 	/* Determine the execution state of the SP. */
904 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
905 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
906 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
907 
908 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
909 
910 	return properties;
911 }
912 
913 /*
914  * Collate the partition information in a v1.1 partition information
915  * descriptor format, this will be converter later if required.
916  */
917 static int partition_info_get_handler_v1_1(uint32_t *uuid,
918 					   struct ffa_partition_info_v1_1
919 						  *partitions,
920 					   uint32_t max_partitions,
921 					   uint32_t *partition_count)
922 {
923 	uint32_t index;
924 	struct ffa_partition_info_v1_1 *desc;
925 	bool null_uuid = is_null_uuid(uuid);
926 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
927 
928 	/* Deal with Logical Partitions. */
929 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
930 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
931 			/* Found a matching UUID, populate appropriately. */
932 			if (*partition_count >= max_partitions) {
933 				return FFA_ERROR_NO_MEMORY;
934 			}
935 
936 			desc = &partitions[*partition_count];
937 			desc->ep_id = el3_lp_descs[index].sp_id;
938 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
939 			/* LSPs must be AArch64. */
940 			desc->properties =
941 				partition_info_get_populate_properties(
942 					el3_lp_descs[index].properties,
943 					SP_STATE_AARCH64);
944 
945 			if (null_uuid) {
946 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
947 			}
948 			(*partition_count)++;
949 		}
950 	}
951 
952 	/* Deal with physical SP's. */
953 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
954 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
955 			/* Found a matching UUID, populate appropriately. */
956 			if (*partition_count >= max_partitions) {
957 				return FFA_ERROR_NO_MEMORY;
958 			}
959 
960 			desc = &partitions[*partition_count];
961 			desc->ep_id = sp_desc[index].sp_id;
962 			/*
963 			 * Execution context count must match No. cores for
964 			 * S-EL1 SPs.
965 			 */
966 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
967 			desc->properties =
968 				partition_info_get_populate_properties(
969 					sp_desc[index].properties,
970 					sp_desc[index].execution_state);
971 
972 			if (null_uuid) {
973 				copy_uuid(desc->uuid, sp_desc[index].uuid);
974 			}
975 			(*partition_count)++;
976 		}
977 	}
978 	return 0;
979 }
980 
981 /*
982  * Handle the case where that caller only wants the count of partitions
983  * matching a given UUID and does not want the corresponding descriptors
984  * populated.
985  */
986 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
987 {
988 	uint32_t index = 0;
989 	uint32_t partition_count = 0;
990 	bool null_uuid = is_null_uuid(uuid);
991 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
992 
993 	/* Deal with Logical Partitions. */
994 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
995 		if (null_uuid ||
996 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
997 			(partition_count)++;
998 		}
999 	}
1000 
1001 	/* Deal with physical SP's. */
1002 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1003 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1004 			(partition_count)++;
1005 		}
1006 	}
1007 	return partition_count;
1008 }
1009 
1010 /*
1011  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1012  * the corresponding descriptor format from the v1.1 descriptor array.
1013  */
1014 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1015 					     *partitions,
1016 					     struct mailbox *mbox,
1017 					     int partition_count)
1018 {
1019 	uint32_t index;
1020 	uint32_t buf_size;
1021 	uint32_t descriptor_size;
1022 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1023 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1024 
1025 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1026 	descriptor_size = partition_count *
1027 			  sizeof(struct ffa_partition_info_v1_0);
1028 
1029 	if (descriptor_size > buf_size) {
1030 		return FFA_ERROR_NO_MEMORY;
1031 	}
1032 
1033 	for (index = 0U; index < partition_count; index++) {
1034 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1035 		v1_0_partitions[index].execution_ctx_count =
1036 			partitions[index].execution_ctx_count;
1037 		/* Only report v1.0 properties. */
1038 		v1_0_partitions[index].properties =
1039 			(partitions[index].properties &
1040 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1041 	}
1042 	return 0;
1043 }
1044 
1045 /*
1046  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1047  * v1.0 implementations.
1048  */
1049 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1050 					   bool secure_origin,
1051 					   uint64_t x1,
1052 					   uint64_t x2,
1053 					   uint64_t x3,
1054 					   uint64_t x4,
1055 					   void *cookie,
1056 					   void *handle,
1057 					   uint64_t flags)
1058 {
1059 	int ret;
1060 	uint32_t partition_count = 0;
1061 	uint32_t size = 0;
1062 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1063 	struct mailbox *mbox;
1064 	uint64_t info_get_flags;
1065 	bool count_only;
1066 	uint32_t uuid[4];
1067 
1068 	uuid[0] = x1;
1069 	uuid[1] = x2;
1070 	uuid[2] = x3;
1071 	uuid[3] = x4;
1072 
1073 	/* Determine if the Partition descriptors should be populated. */
1074 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1075 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1076 
1077 	/* Handle the case where we don't need to populate the descriptors. */
1078 	if (count_only) {
1079 		partition_count = partition_info_get_handler_count_only(uuid);
1080 		if (partition_count == 0) {
1081 			return spmc_ffa_error_return(handle,
1082 						FFA_ERROR_INVALID_PARAMETER);
1083 		}
1084 	} else {
1085 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1086 
1087 		/*
1088 		 * Handle the case where the partition descriptors are required,
1089 		 * check we have the buffers available and populate the
1090 		 * appropriate structure version.
1091 		 */
1092 
1093 		/* Obtain the v1.1 format of the descriptors. */
1094 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1095 						      MAX_SP_LP_PARTITIONS,
1096 						      &partition_count);
1097 
1098 		/* Check if an error occurred during discovery. */
1099 		if (ret != 0) {
1100 			goto err;
1101 		}
1102 
1103 		/* If we didn't find any matches the UUID is unknown. */
1104 		if (partition_count == 0) {
1105 			ret = FFA_ERROR_INVALID_PARAMETER;
1106 			goto err;
1107 		}
1108 
1109 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1110 		mbox = spmc_get_mbox_desc(secure_origin);
1111 
1112 		/*
1113 		 * If the caller has not bothered registering its RX/TX pair
1114 		 * then return an error code.
1115 		 */
1116 		spin_lock(&mbox->lock);
1117 		if (mbox->rx_buffer == NULL) {
1118 			ret = FFA_ERROR_BUSY;
1119 			goto err_unlock;
1120 		}
1121 
1122 		/* Ensure the RX buffer is currently free. */
1123 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1124 			ret = FFA_ERROR_BUSY;
1125 			goto err_unlock;
1126 		}
1127 
1128 		/* Zero the RX buffer before populating. */
1129 		(void)memset(mbox->rx_buffer, 0,
1130 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1131 
1132 		/*
1133 		 * Depending on the FF-A version of the requesting partition
1134 		 * we may need to convert to a v1.0 format otherwise we can copy
1135 		 * directly.
1136 		 */
1137 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1138 			ret = partition_info_populate_v1_0(partitions,
1139 							   mbox,
1140 							   partition_count);
1141 			if (ret != 0) {
1142 				goto err_unlock;
1143 			}
1144 		} else {
1145 			uint32_t buf_size = mbox->rxtx_page_count *
1146 					    FFA_PAGE_SIZE;
1147 
1148 			/* Ensure the descriptor will fit in the buffer. */
1149 			size = sizeof(struct ffa_partition_info_v1_1);
1150 			if (partition_count * size  > buf_size) {
1151 				ret = FFA_ERROR_NO_MEMORY;
1152 				goto err_unlock;
1153 			}
1154 			memcpy(mbox->rx_buffer, partitions,
1155 			       partition_count * size);
1156 		}
1157 
1158 		mbox->state = MAILBOX_STATE_FULL;
1159 		spin_unlock(&mbox->lock);
1160 	}
1161 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1162 
1163 err_unlock:
1164 	spin_unlock(&mbox->lock);
1165 err:
1166 	return spmc_ffa_error_return(handle, ret);
1167 }
1168 
1169 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1170 {
1171 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1172 }
1173 
1174 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1175 					      uint32_t input_properties,
1176 					      void *handle)
1177 {
1178 	/*
1179 	 * If we're called by the normal world we don't support any
1180 	 * additional features.
1181 	 */
1182 	if (!secure_origin) {
1183 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1184 			return spmc_ffa_error_return(handle,
1185 						     FFA_ERROR_NOT_SUPPORTED);
1186 		}
1187 
1188 	} else {
1189 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1190 		/*
1191 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1192 		 * call. If v1.0 check and store whether the SP has requested
1193 		 * the use of the NS bit.
1194 		 */
1195 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1196 			if ((input_properties &
1197 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1198 				return spmc_ffa_error_return(handle,
1199 						       FFA_ERROR_NOT_SUPPORTED);
1200 			}
1201 			return ffa_feature_success(handle,
1202 						   FFA_FEATURES_RET_REQ_NS_BIT);
1203 		} else {
1204 			sp->ns_bit_requested = (input_properties &
1205 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1206 					       0U;
1207 		}
1208 		if (sp->ns_bit_requested) {
1209 			return ffa_feature_success(handle,
1210 						   FFA_FEATURES_RET_REQ_NS_BIT);
1211 		}
1212 	}
1213 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1214 }
1215 
1216 static uint64_t ffa_features_handler(uint32_t smc_fid,
1217 				     bool secure_origin,
1218 				     uint64_t x1,
1219 				     uint64_t x2,
1220 				     uint64_t x3,
1221 				     uint64_t x4,
1222 				     void *cookie,
1223 				     void *handle,
1224 				     uint64_t flags)
1225 {
1226 	uint32_t function_id = (uint32_t) x1;
1227 	uint32_t input_properties = (uint32_t) x2;
1228 
1229 	/* Check if a Feature ID was requested. */
1230 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1231 		/* We currently don't support any additional features. */
1232 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1233 	}
1234 
1235 	/*
1236 	 * Handle the cases where we have separate handlers due to additional
1237 	 * properties.
1238 	 */
1239 	switch (function_id) {
1240 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1241 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1242 		return ffa_features_retrieve_request(secure_origin,
1243 						     input_properties,
1244 						     handle);
1245 	}
1246 
1247 	/*
1248 	 * We don't currently support additional input properties for these
1249 	 * other ABIs therefore ensure this value is set to 0.
1250 	 */
1251 	if (input_properties != 0U) {
1252 		return spmc_ffa_error_return(handle,
1253 					     FFA_ERROR_NOT_SUPPORTED);
1254 	}
1255 
1256 	/* Report if any other FF-A ABI is supported. */
1257 	switch (function_id) {
1258 	/* Supported features from both worlds. */
1259 	case FFA_ERROR:
1260 	case FFA_SUCCESS_SMC32:
1261 	case FFA_INTERRUPT:
1262 	case FFA_SPM_ID_GET:
1263 	case FFA_ID_GET:
1264 	case FFA_FEATURES:
1265 	case FFA_VERSION:
1266 	case FFA_RX_RELEASE:
1267 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1268 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1269 	case FFA_PARTITION_INFO_GET:
1270 	case FFA_RXTX_MAP_SMC32:
1271 	case FFA_RXTX_MAP_SMC64:
1272 	case FFA_RXTX_UNMAP:
1273 	case FFA_MEM_FRAG_TX:
1274 	case FFA_MSG_RUN:
1275 
1276 		/*
1277 		 * We are relying on the fact that the other registers
1278 		 * will be set to 0 as these values align with the
1279 		 * currently implemented features of the SPMC. If this
1280 		 * changes this function must be extended to handle
1281 		 * reporting the additional functionality.
1282 		 */
1283 
1284 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1285 		/* Execution stops here. */
1286 
1287 	/* Supported ABIs only from the secure world. */
1288 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1289 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1290 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1291 	case FFA_MEM_RELINQUISH:
1292 	case FFA_MSG_WAIT:
1293 
1294 		if (!secure_origin) {
1295 			return spmc_ffa_error_return(handle,
1296 				FFA_ERROR_NOT_SUPPORTED);
1297 		}
1298 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1299 		/* Execution stops here. */
1300 
1301 	/* Supported features only from the normal world. */
1302 	case FFA_MEM_SHARE_SMC32:
1303 	case FFA_MEM_SHARE_SMC64:
1304 	case FFA_MEM_LEND_SMC32:
1305 	case FFA_MEM_LEND_SMC64:
1306 	case FFA_MEM_RECLAIM:
1307 	case FFA_MEM_FRAG_RX:
1308 
1309 		if (secure_origin) {
1310 			return spmc_ffa_error_return(handle,
1311 					FFA_ERROR_NOT_SUPPORTED);
1312 		}
1313 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1314 		/* Execution stops here. */
1315 
1316 	default:
1317 		return spmc_ffa_error_return(handle,
1318 					FFA_ERROR_NOT_SUPPORTED);
1319 	}
1320 }
1321 
1322 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1323 				   bool secure_origin,
1324 				   uint64_t x1,
1325 				   uint64_t x2,
1326 				   uint64_t x3,
1327 				   uint64_t x4,
1328 				   void *cookie,
1329 				   void *handle,
1330 				   uint64_t flags)
1331 {
1332 	if (secure_origin) {
1333 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1334 			 spmc_get_current_sp_ctx()->sp_id);
1335 	} else {
1336 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1337 			 spmc_get_hyp_ctx()->ns_ep_id);
1338 	}
1339 }
1340 
1341 /*
1342  * Enable an SP to query the ID assigned to the SPMC.
1343  */
1344 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1345 				       bool secure_origin,
1346 				       uint64_t x1,
1347 				       uint64_t x2,
1348 				       uint64_t x3,
1349 				       uint64_t x4,
1350 				       void *cookie,
1351 				       void *handle,
1352 				       uint64_t flags)
1353 {
1354 	assert(x1 == 0UL);
1355 	assert(x2 == 0UL);
1356 	assert(x3 == 0UL);
1357 	assert(x4 == 0UL);
1358 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1359 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1360 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1361 
1362 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1363 }
1364 
1365 static uint64_t ffa_run_handler(uint32_t smc_fid,
1366 				bool secure_origin,
1367 				uint64_t x1,
1368 				uint64_t x2,
1369 				uint64_t x3,
1370 				uint64_t x4,
1371 				void *cookie,
1372 				void *handle,
1373 				uint64_t flags)
1374 {
1375 	struct secure_partition_desc *sp;
1376 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1377 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1378 	unsigned int idx;
1379 	unsigned int *rt_state;
1380 	unsigned int *rt_model;
1381 
1382 	/* Can only be called from the normal world. */
1383 	if (secure_origin) {
1384 		ERROR("FFA_RUN can only be called from NWd.\n");
1385 		return spmc_ffa_error_return(handle,
1386 					     FFA_ERROR_INVALID_PARAMETER);
1387 	}
1388 
1389 	/* Cannot run a Normal world partition. */
1390 	if (ffa_is_normal_world_id(target_id)) {
1391 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1392 		return spmc_ffa_error_return(handle,
1393 					     FFA_ERROR_INVALID_PARAMETER);
1394 	}
1395 
1396 	/* Check that the target SP exists. */
1397 	sp = spmc_get_sp_ctx(target_id);
1398 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1399 	if (sp == NULL) {
1400 		return spmc_ffa_error_return(handle,
1401 					     FFA_ERROR_INVALID_PARAMETER);
1402 	}
1403 
1404 	idx = get_ec_index(sp);
1405 
1406 	if (idx != vcpu_id) {
1407 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1408 		return spmc_ffa_error_return(handle,
1409 					     FFA_ERROR_INVALID_PARAMETER);
1410 	}
1411 	if (sp->runtime_el == S_EL0) {
1412 		spin_lock(&sp->rt_state_lock);
1413 	}
1414 	rt_state = &((sp->ec[idx]).rt_state);
1415 	rt_model = &((sp->ec[idx]).rt_model);
1416 	if (*rt_state == RT_STATE_RUNNING) {
1417 		if (sp->runtime_el == S_EL0) {
1418 			spin_unlock(&sp->rt_state_lock);
1419 		}
1420 		ERROR("Partition (0x%x) is already running.\n", target_id);
1421 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1422 	}
1423 
1424 	/*
1425 	 * Sanity check that if the execution context was not waiting then it
1426 	 * was either in the direct request or the run partition runtime model.
1427 	 */
1428 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1429 		assert(*rt_model == RT_MODEL_RUN ||
1430 		       *rt_model == RT_MODEL_DIR_REQ);
1431 	}
1432 
1433 	/*
1434 	 * If the context was waiting then update the partition runtime model.
1435 	 */
1436 	if (*rt_state == RT_STATE_WAITING) {
1437 		*rt_model = RT_MODEL_RUN;
1438 	}
1439 
1440 	/*
1441 	 * Forward the request to the correct SP vCPU after updating
1442 	 * its state.
1443 	 */
1444 	*rt_state = RT_STATE_RUNNING;
1445 
1446 	if (sp->runtime_el == S_EL0) {
1447 		spin_unlock(&sp->rt_state_lock);
1448 	}
1449 
1450 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1451 			       handle, cookie, flags, target_id);
1452 }
1453 
1454 static uint64_t rx_release_handler(uint32_t smc_fid,
1455 				   bool secure_origin,
1456 				   uint64_t x1,
1457 				   uint64_t x2,
1458 				   uint64_t x3,
1459 				   uint64_t x4,
1460 				   void *cookie,
1461 				   void *handle,
1462 				   uint64_t flags)
1463 {
1464 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1465 
1466 	spin_lock(&mbox->lock);
1467 
1468 	if (mbox->state != MAILBOX_STATE_FULL) {
1469 		spin_unlock(&mbox->lock);
1470 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1471 	}
1472 
1473 	mbox->state = MAILBOX_STATE_EMPTY;
1474 	spin_unlock(&mbox->lock);
1475 
1476 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1477 }
1478 
1479 /*
1480  * Perform initial validation on the provided secondary entry point.
1481  * For now ensure it does not lie within the BL31 Image or the SP's
1482  * RX/TX buffers as these are mapped within EL3.
1483  * TODO: perform validation for additional invalid memory regions.
1484  */
1485 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1486 {
1487 	struct mailbox *mb;
1488 	uintptr_t buffer_size;
1489 	uintptr_t sp_rx_buffer;
1490 	uintptr_t sp_tx_buffer;
1491 	uintptr_t sp_rx_buffer_limit;
1492 	uintptr_t sp_tx_buffer_limit;
1493 
1494 	mb = &sp->mailbox;
1495 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1496 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1497 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1498 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1499 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1500 
1501 	/*
1502 	 * Check if the entry point lies within BL31, or the
1503 	 * SP's RX or TX buffer.
1504 	 */
1505 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1506 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1507 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1508 		return -EINVAL;
1509 	}
1510 	return 0;
1511 }
1512 
1513 /*******************************************************************************
1514  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1515  *  register an entry point for initialization during a secondary cold boot.
1516  ******************************************************************************/
1517 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1518 					    bool secure_origin,
1519 					    uint64_t x1,
1520 					    uint64_t x2,
1521 					    uint64_t x3,
1522 					    uint64_t x4,
1523 					    void *cookie,
1524 					    void *handle,
1525 					    uint64_t flags)
1526 {
1527 	struct secure_partition_desc *sp;
1528 	struct sp_exec_ctx *sp_ctx;
1529 
1530 	/* This request cannot originate from the Normal world. */
1531 	if (!secure_origin) {
1532 		WARN("%s: Can only be called from SWd.\n", __func__);
1533 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1534 	}
1535 
1536 	/* Get the context of the current SP. */
1537 	sp = spmc_get_current_sp_ctx();
1538 	if (sp == NULL) {
1539 		WARN("%s: Cannot find SP context.\n", __func__);
1540 		return spmc_ffa_error_return(handle,
1541 					     FFA_ERROR_INVALID_PARAMETER);
1542 	}
1543 
1544 	/* Only an S-EL1 SP should be invoking this ABI. */
1545 	if (sp->runtime_el != S_EL1) {
1546 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1547 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1548 	}
1549 
1550 	/* Ensure the SP is in its initialization state. */
1551 	sp_ctx = spmc_get_sp_ec(sp);
1552 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1553 		WARN("%s: Can only be called during SP initialization.\n",
1554 		     __func__);
1555 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1556 	}
1557 
1558 	/* Perform initial validation of the secondary entry point. */
1559 	if (validate_secondary_ep(x1, sp)) {
1560 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1561 		     __func__, x1);
1562 		return spmc_ffa_error_return(handle,
1563 					     FFA_ERROR_INVALID_PARAMETER);
1564 	}
1565 
1566 	/*
1567 	 * Update the secondary entrypoint in SP context.
1568 	 * We don't need a lock here as during partition initialization there
1569 	 * will only be a single core online.
1570 	 */
1571 	sp->secondary_ep = x1;
1572 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1573 
1574 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1575 }
1576 
1577 /*******************************************************************************
1578  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1579  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1580  * function converts a permission value from the FF-A format to the mmap_attr_t
1581  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1582  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1583  * ignored by the function xlat_change_mem_attributes_ctx().
1584  ******************************************************************************/
1585 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1586 {
1587 	unsigned int tf_attr = 0U;
1588 	unsigned int access;
1589 
1590 	/* Deal with data access permissions first. */
1591 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1592 
1593 	switch (access) {
1594 	case FFA_MEM_PERM_DATA_RW:
1595 		/* Return 0 if the execute is set with RW. */
1596 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1597 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1598 		}
1599 		break;
1600 
1601 	case FFA_MEM_PERM_DATA_RO:
1602 		tf_attr |= MT_RO | MT_USER;
1603 		/* Deal with the instruction access permissions next. */
1604 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1605 			tf_attr |= MT_EXECUTE;
1606 		} else {
1607 			tf_attr |= MT_EXECUTE_NEVER;
1608 		}
1609 		break;
1610 
1611 	case FFA_MEM_PERM_DATA_NA:
1612 	default:
1613 		return tf_attr;
1614 	}
1615 
1616 	return tf_attr;
1617 }
1618 
1619 /*******************************************************************************
1620  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1621  ******************************************************************************/
1622 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1623 					 bool secure_origin,
1624 					 uint64_t x1,
1625 					 uint64_t x2,
1626 					 uint64_t x3,
1627 					 uint64_t x4,
1628 					 void *cookie,
1629 					 void *handle,
1630 					 uint64_t flags)
1631 {
1632 	struct secure_partition_desc *sp;
1633 	unsigned int idx;
1634 	uintptr_t base_va = (uintptr_t) x1;
1635 	size_t size = (size_t)(x2 * PAGE_SIZE);
1636 	uint32_t tf_attr;
1637 	int ret;
1638 
1639 	/* This request cannot originate from the Normal world. */
1640 	if (!secure_origin) {
1641 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1642 	}
1643 
1644 	if (size == 0) {
1645 		return spmc_ffa_error_return(handle,
1646 					     FFA_ERROR_INVALID_PARAMETER);
1647 	}
1648 
1649 	/* Get the context of the current SP. */
1650 	sp = spmc_get_current_sp_ctx();
1651 	if (sp == NULL) {
1652 		return spmc_ffa_error_return(handle,
1653 					     FFA_ERROR_INVALID_PARAMETER);
1654 	}
1655 
1656 	/* A S-EL1 SP has no business invoking this ABI. */
1657 	if (sp->runtime_el == S_EL1) {
1658 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1659 	}
1660 
1661 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1662 		return spmc_ffa_error_return(handle,
1663 					     FFA_ERROR_INVALID_PARAMETER);
1664 	}
1665 
1666 	/* Get the execution context of the calling SP. */
1667 	idx = get_ec_index(sp);
1668 
1669 	/*
1670 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1671 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1672 	 * and can only be initialising on this cpu.
1673 	 */
1674 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1675 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1676 	}
1677 
1678 	VERBOSE("Setting memory permissions:\n");
1679 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1680 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1681 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1682 
1683 	/* Convert inbound permissions to TF-A permission attributes */
1684 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1685 	if (tf_attr == 0U) {
1686 		return spmc_ffa_error_return(handle,
1687 					     FFA_ERROR_INVALID_PARAMETER);
1688 	}
1689 
1690 	/* Request the change in permissions */
1691 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1692 					     base_va, size, tf_attr);
1693 	if (ret != 0) {
1694 		return spmc_ffa_error_return(handle,
1695 					     FFA_ERROR_INVALID_PARAMETER);
1696 	}
1697 
1698 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1699 }
1700 
1701 /*******************************************************************************
1702  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1703  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1704  * function converts a permission value from the mmap_attr_t format to the FF-A
1705  * format.
1706  ******************************************************************************/
1707 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1708 {
1709 	unsigned int perms = 0U;
1710 	unsigned int data_access;
1711 
1712 	if ((attr & MT_USER) == 0) {
1713 		/* No access from EL0. */
1714 		data_access = FFA_MEM_PERM_DATA_NA;
1715 	} else {
1716 		if ((attr & MT_RW) != 0) {
1717 			data_access = FFA_MEM_PERM_DATA_RW;
1718 		} else {
1719 			data_access = FFA_MEM_PERM_DATA_RO;
1720 		}
1721 	}
1722 
1723 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1724 		<< FFA_MEM_PERM_DATA_SHIFT;
1725 
1726 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1727 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1728 	}
1729 
1730 	return perms;
1731 }
1732 
1733 /*******************************************************************************
1734  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1735  ******************************************************************************/
1736 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1737 					 bool secure_origin,
1738 					 uint64_t x1,
1739 					 uint64_t x2,
1740 					 uint64_t x3,
1741 					 uint64_t x4,
1742 					 void *cookie,
1743 					 void *handle,
1744 					 uint64_t flags)
1745 {
1746 	struct secure_partition_desc *sp;
1747 	unsigned int idx;
1748 	uintptr_t base_va = (uintptr_t)x1;
1749 	uint32_t tf_attr = 0;
1750 	int ret;
1751 
1752 	/* This request cannot originate from the Normal world. */
1753 	if (!secure_origin) {
1754 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1755 	}
1756 
1757 	/* Get the context of the current SP. */
1758 	sp = spmc_get_current_sp_ctx();
1759 	if (sp == NULL) {
1760 		return spmc_ffa_error_return(handle,
1761 					     FFA_ERROR_INVALID_PARAMETER);
1762 	}
1763 
1764 	/* A S-EL1 SP has no business invoking this ABI. */
1765 	if (sp->runtime_el == S_EL1) {
1766 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1767 	}
1768 
1769 	/* Get the execution context of the calling SP. */
1770 	idx = get_ec_index(sp);
1771 
1772 	/*
1773 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1774 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1775 	 * and can only be initialising on this cpu.
1776 	 */
1777 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1778 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1779 	}
1780 
1781 	/* Request the permissions */
1782 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1783 	if (ret != 0) {
1784 		return spmc_ffa_error_return(handle,
1785 					     FFA_ERROR_INVALID_PARAMETER);
1786 	}
1787 
1788 	/* Convert TF-A permission to FF-A permissions attributes. */
1789 	x2 = mmap_perm_to_ffa_perm(tf_attr);
1790 
1791 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1792 }
1793 
1794 /*******************************************************************************
1795  * This function will parse the Secure Partition Manifest. From manifest, it
1796  * will fetch details for preparing Secure partition image context and secure
1797  * partition image boot arguments if any.
1798  ******************************************************************************/
1799 static int sp_manifest_parse(void *sp_manifest, int offset,
1800 			     struct secure_partition_desc *sp,
1801 			     entry_point_info_t *ep_info,
1802 			     int32_t *boot_info_reg)
1803 {
1804 	int32_t ret, node;
1805 	uint32_t config_32;
1806 
1807 	/*
1808 	 * Look for the mandatory fields that are expected to be present in
1809 	 * the SP manifests.
1810 	 */
1811 	node = fdt_path_offset(sp_manifest, "/");
1812 	if (node < 0) {
1813 		ERROR("Did not find root node.\n");
1814 		return node;
1815 	}
1816 
1817 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1818 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1819 	if (ret != 0) {
1820 		ERROR("Missing Secure Partition UUID.\n");
1821 		return ret;
1822 	}
1823 
1824 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1825 	if (ret != 0) {
1826 		ERROR("Missing SP Exception Level information.\n");
1827 		return ret;
1828 	}
1829 
1830 	sp->runtime_el = config_32;
1831 
1832 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1833 	if (ret != 0) {
1834 		ERROR("Missing Secure Partition FF-A Version.\n");
1835 		return ret;
1836 	}
1837 
1838 	sp->ffa_version = config_32;
1839 
1840 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1841 	if (ret != 0) {
1842 		ERROR("Missing Secure Partition Execution State.\n");
1843 		return ret;
1844 	}
1845 
1846 	sp->execution_state = config_32;
1847 
1848 	ret = fdt_read_uint32(sp_manifest, node,
1849 			      "messaging-method", &config_32);
1850 	if (ret != 0) {
1851 		ERROR("Missing Secure Partition messaging method.\n");
1852 		return ret;
1853 	}
1854 
1855 	/* Validate this entry, we currently only support direct messaging. */
1856 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1857 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1858 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1859 		     config_32);
1860 		return -EINVAL;
1861 	}
1862 
1863 	sp->properties = config_32;
1864 
1865 	ret = fdt_read_uint32(sp_manifest, node,
1866 			      "execution-ctx-count", &config_32);
1867 
1868 	if (ret != 0) {
1869 		ERROR("Missing SP Execution Context Count.\n");
1870 		return ret;
1871 	}
1872 
1873 	/*
1874 	 * Ensure this field is set correctly in the manifest however
1875 	 * since this is currently a hardcoded value for S-EL1 partitions
1876 	 * we don't need to save it here, just validate.
1877 	 */
1878 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
1879 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1880 			config_32, PLATFORM_CORE_COUNT);
1881 		return -EINVAL;
1882 	}
1883 
1884 	/*
1885 	 * Look for the optional fields that are expected to be present in
1886 	 * an SP manifest.
1887 	 */
1888 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1889 	if (ret != 0) {
1890 		WARN("Missing Secure Partition ID.\n");
1891 	} else {
1892 		if (!is_ffa_secure_id_valid(config_32)) {
1893 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1894 			      config_32);
1895 			return -EINVAL;
1896 		}
1897 		sp->sp_id = config_32;
1898 	}
1899 
1900 	ret = fdt_read_uint32(sp_manifest, node,
1901 			      "power-management-messages", &config_32);
1902 	if (ret != 0) {
1903 		WARN("Missing Power Management Messages entry.\n");
1904 	} else {
1905 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
1906 			ERROR("Power messages not supported for S-EL0 SP\n");
1907 			return -EINVAL;
1908 		}
1909 
1910 		/*
1911 		 * Ensure only the currently supported power messages have
1912 		 * been requested.
1913 		 */
1914 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1915 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1916 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1917 			ERROR("Requested unsupported PM messages (%x)\n",
1918 			      config_32);
1919 			return -EINVAL;
1920 		}
1921 		sp->pwr_mgmt_msgs = config_32;
1922 	}
1923 
1924 	ret = fdt_read_uint32(sp_manifest, node,
1925 			      "gp-register-num", &config_32);
1926 	if (ret != 0) {
1927 		WARN("Missing boot information register.\n");
1928 	} else {
1929 		/* Check if a register number between 0-3 is specified. */
1930 		if (config_32 < 4) {
1931 			*boot_info_reg = config_32;
1932 		} else {
1933 			WARN("Incorrect boot information register (%u).\n",
1934 			     config_32);
1935 		}
1936 	}
1937 
1938 	return 0;
1939 }
1940 
1941 /*******************************************************************************
1942  * This function gets the Secure Partition Manifest base and maps the manifest
1943  * region.
1944  * Currently only one Secure Partition manifest is considered which is used to
1945  * prepare the context for the single Secure Partition.
1946  ******************************************************************************/
1947 static int find_and_prepare_sp_context(void)
1948 {
1949 	void *sp_manifest;
1950 	uintptr_t manifest_base;
1951 	uintptr_t manifest_base_align;
1952 	entry_point_info_t *next_image_ep_info;
1953 	int32_t ret, boot_info_reg = -1;
1954 	struct secure_partition_desc *sp;
1955 
1956 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1957 	if (next_image_ep_info == NULL) {
1958 		WARN("No Secure Partition image provided by BL2.\n");
1959 		return -ENOENT;
1960 	}
1961 
1962 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1963 	if (sp_manifest == NULL) {
1964 		WARN("Secure Partition manifest absent.\n");
1965 		return -ENOENT;
1966 	}
1967 
1968 	manifest_base = (uintptr_t)sp_manifest;
1969 	manifest_base_align = page_align(manifest_base, DOWN);
1970 
1971 	/*
1972 	 * Map the secure partition manifest region in the EL3 translation
1973 	 * regime.
1974 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1975 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1976 	 * not completely accommodate the secure partition manifest region.
1977 	 */
1978 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1979 				      manifest_base_align,
1980 				      PAGE_SIZE * 2,
1981 				      MT_RO_DATA);
1982 	if (ret != 0) {
1983 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1984 		return ret;
1985 	}
1986 
1987 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1988 					    "arm,ffa-manifest-1.0");
1989 	if (ret < 0) {
1990 		ERROR("Error happened in SP manifest reading.\n");
1991 		return -EINVAL;
1992 	}
1993 
1994 	/*
1995 	 * Store the size of the manifest so that it can be used later to pass
1996 	 * the manifest as boot information later.
1997 	 */
1998 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1999 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2000 	     next_image_ep_info->args.arg1);
2001 
2002 	/*
2003 	 * Select an SP descriptor for initialising the partition's execution
2004 	 * context on the primary CPU.
2005 	 */
2006 	sp = spmc_get_current_sp_ctx();
2007 
2008 #if SPMC_AT_EL3_SEL0_SP
2009 	/* Assign translation tables context. */
2010 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2011 
2012 #endif /* SPMC_AT_EL3_SEL0_SP */
2013 	/* Initialize entry point information for the SP */
2014 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2015 		       SECURE | EP_ST_ENABLE);
2016 
2017 	/* Parse the SP manifest. */
2018 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2019 				&boot_info_reg);
2020 	if (ret != 0) {
2021 		ERROR("Error in Secure Partition manifest parsing.\n");
2022 		return ret;
2023 	}
2024 
2025 	/* Check that the runtime EL in the manifest was correct. */
2026 	if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) {
2027 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
2028 		return -EINVAL;
2029 	}
2030 
2031 	/* Perform any common initialisation. */
2032 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2033 
2034 	/* Perform any initialisation specific to S-EL1 SPs. */
2035 	if (sp->runtime_el == S_EL1) {
2036 		spmc_el1_sp_setup(sp, next_image_ep_info);
2037 	}
2038 
2039 #if SPMC_AT_EL3_SEL0_SP
2040 	/* Setup spsr in endpoint info for common context management routine. */
2041 	if (sp->runtime_el == S_EL0) {
2042 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2043 	}
2044 #endif /* SPMC_AT_EL3_SEL0_SP */
2045 
2046 	/* Initialize the SP context with the required ep info. */
2047 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
2048 
2049 #if SPMC_AT_EL3_SEL0_SP
2050 	/*
2051 	 * Perform any initialisation specific to S-EL0 not set by common
2052 	 * context management routine.
2053 	 */
2054 	if (sp->runtime_el == S_EL0) {
2055 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2056 	}
2057 #endif /* SPMC_AT_EL3_SEL0_SP */
2058 	return 0;
2059 }
2060 
2061 /*******************************************************************************
2062  * This function takes an SP context pointer and performs a synchronous entry
2063  * into it.
2064  ******************************************************************************/
2065 static int32_t logical_sp_init(void)
2066 {
2067 	int32_t rc = 0;
2068 	struct el3_lp_desc *el3_lp_descs;
2069 
2070 	/* Perform initial validation of the Logical Partitions. */
2071 	rc = el3_sp_desc_validate();
2072 	if (rc != 0) {
2073 		ERROR("Logical Partition validation failed!\n");
2074 		return rc;
2075 	}
2076 
2077 	el3_lp_descs = get_el3_lp_array();
2078 
2079 	INFO("Logical Secure Partition init start.\n");
2080 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2081 		rc = el3_lp_descs[i].init();
2082 		if (rc != 0) {
2083 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2084 			      el3_lp_descs[i].sp_id);
2085 			return rc;
2086 		}
2087 		VERBOSE("Logical SP (0x%x) Initialized\n",
2088 			      el3_lp_descs[i].sp_id);
2089 	}
2090 
2091 	INFO("Logical Secure Partition init completed.\n");
2092 
2093 	return rc;
2094 }
2095 
2096 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2097 {
2098 	uint64_t rc;
2099 
2100 	assert(ec != NULL);
2101 
2102 	/* Assign the context of the SP to this CPU */
2103 	cm_set_context(&(ec->cpu_ctx), SECURE);
2104 
2105 	/* Restore the context assigned above */
2106 	cm_el1_sysregs_context_restore(SECURE);
2107 	cm_set_next_eret_context(SECURE);
2108 
2109 	/* Invalidate TLBs at EL1. */
2110 	tlbivmalle1();
2111 	dsbish();
2112 
2113 	/* Enter Secure Partition */
2114 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2115 
2116 	/* Save secure state */
2117 	cm_el1_sysregs_context_save(SECURE);
2118 
2119 	return rc;
2120 }
2121 
2122 /*******************************************************************************
2123  * SPMC Helper Functions.
2124  ******************************************************************************/
2125 static int32_t sp_init(void)
2126 {
2127 	uint64_t rc;
2128 	struct secure_partition_desc *sp;
2129 	struct sp_exec_ctx *ec;
2130 
2131 	sp = spmc_get_current_sp_ctx();
2132 	ec = spmc_get_sp_ec(sp);
2133 	ec->rt_model = RT_MODEL_INIT;
2134 	ec->rt_state = RT_STATE_RUNNING;
2135 
2136 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2137 
2138 	rc = spmc_sp_synchronous_entry(ec);
2139 	if (rc != 0) {
2140 		/* Indicate SP init was not successful. */
2141 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2142 		      sp->sp_id, rc);
2143 		return 0;
2144 	}
2145 
2146 	ec->rt_state = RT_STATE_WAITING;
2147 	INFO("Secure Partition initialized.\n");
2148 
2149 	return 1;
2150 }
2151 
2152 static void initalize_sp_descs(void)
2153 {
2154 	struct secure_partition_desc *sp;
2155 
2156 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2157 		sp = &sp_desc[i];
2158 		sp->sp_id = INV_SP_ID;
2159 		sp->mailbox.rx_buffer = NULL;
2160 		sp->mailbox.tx_buffer = NULL;
2161 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2162 		sp->secondary_ep = 0;
2163 	}
2164 }
2165 
2166 static void initalize_ns_ep_descs(void)
2167 {
2168 	struct ns_endpoint_desc *ns_ep;
2169 
2170 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2171 		ns_ep = &ns_ep_desc[i];
2172 		/*
2173 		 * Clashes with the Hypervisor ID but will not be a
2174 		 * problem in practice.
2175 		 */
2176 		ns_ep->ns_ep_id = 0;
2177 		ns_ep->ffa_version = 0;
2178 		ns_ep->mailbox.rx_buffer = NULL;
2179 		ns_ep->mailbox.tx_buffer = NULL;
2180 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2181 	}
2182 }
2183 
2184 /*******************************************************************************
2185  * Initialize SPMC attributes for the SPMD.
2186  ******************************************************************************/
2187 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2188 {
2189 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2190 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2191 	spmc_attrs->exec_state = MODE_RW_64;
2192 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2193 }
2194 
2195 /*******************************************************************************
2196  * Initialize contexts of all Secure Partitions.
2197  ******************************************************************************/
2198 int32_t spmc_setup(void)
2199 {
2200 	int32_t ret;
2201 	uint32_t flags;
2202 
2203 	/* Initialize endpoint descriptors */
2204 	initalize_sp_descs();
2205 	initalize_ns_ep_descs();
2206 
2207 	/*
2208 	 * Retrieve the information of the datastore for tracking shared memory
2209 	 * requests allocated by platform code and zero the region if available.
2210 	 */
2211 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2212 					    &spmc_shmem_obj_state.data_size);
2213 	if (ret != 0) {
2214 		ERROR("Failed to obtain memory descriptor backing store!\n");
2215 		return ret;
2216 	}
2217 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2218 
2219 	/* Setup logical SPs. */
2220 	ret = logical_sp_init();
2221 	if (ret != 0) {
2222 		ERROR("Failed to initialize Logical Partitions.\n");
2223 		return ret;
2224 	}
2225 
2226 	/* Perform physical SP setup. */
2227 
2228 	/* Disable MMU at EL1 (initialized by BL2) */
2229 	disable_mmu_icache_el1();
2230 
2231 	/* Initialize context of the SP */
2232 	INFO("Secure Partition context setup start.\n");
2233 
2234 	ret = find_and_prepare_sp_context();
2235 	if (ret != 0) {
2236 		ERROR("Error in SP finding and context preparation.\n");
2237 		return ret;
2238 	}
2239 
2240 	/* Register power management hooks with PSCI */
2241 	psci_register_spd_pm_hook(&spmc_pm);
2242 
2243 	/*
2244 	 * Register an interrupt handler for S-EL1 interrupts
2245 	 * when generated during code executing in the
2246 	 * non-secure state.
2247 	 */
2248 	flags = 0;
2249 	set_interrupt_rm_flag(flags, NON_SECURE);
2250 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2251 					      spmc_sp_interrupt_handler,
2252 					      flags);
2253 	if (ret != 0) {
2254 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2255 		panic();
2256 	}
2257 
2258 	/* Register init function for deferred init.  */
2259 	bl31_register_bl32_init(&sp_init);
2260 
2261 	INFO("Secure Partition setup done.\n");
2262 
2263 	return 0;
2264 }
2265 
2266 /*******************************************************************************
2267  * Secure Partition Manager SMC handler.
2268  ******************************************************************************/
2269 uint64_t spmc_smc_handler(uint32_t smc_fid,
2270 			  bool secure_origin,
2271 			  uint64_t x1,
2272 			  uint64_t x2,
2273 			  uint64_t x3,
2274 			  uint64_t x4,
2275 			  void *cookie,
2276 			  void *handle,
2277 			  uint64_t flags)
2278 {
2279 	switch (smc_fid) {
2280 
2281 	case FFA_VERSION:
2282 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2283 					   x4, cookie, handle, flags);
2284 
2285 	case FFA_SPM_ID_GET:
2286 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2287 					     x3, x4, cookie, handle, flags);
2288 
2289 	case FFA_ID_GET:
2290 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2291 					  x4, cookie, handle, flags);
2292 
2293 	case FFA_FEATURES:
2294 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2295 					    x4, cookie, handle, flags);
2296 
2297 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2298 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2299 						   x2, x3, x4, cookie, handle,
2300 						   flags);
2301 
2302 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2303 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2304 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2305 					      x3, x4, cookie, handle, flags);
2306 
2307 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2308 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2309 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2310 					       x3, x4, cookie, handle, flags);
2311 
2312 	case FFA_RXTX_MAP_SMC32:
2313 	case FFA_RXTX_MAP_SMC64:
2314 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2315 					cookie, handle, flags);
2316 
2317 	case FFA_RXTX_UNMAP:
2318 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2319 					  x4, cookie, handle, flags);
2320 
2321 	case FFA_PARTITION_INFO_GET:
2322 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2323 						  x2, x3, x4, cookie, handle,
2324 						  flags);
2325 
2326 	case FFA_RX_RELEASE:
2327 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2328 					  x4, cookie, handle, flags);
2329 
2330 	case FFA_MSG_WAIT:
2331 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2332 					cookie, handle, flags);
2333 
2334 	case FFA_ERROR:
2335 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2336 					cookie, handle, flags);
2337 
2338 	case FFA_MSG_RUN:
2339 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2340 				       cookie, handle, flags);
2341 
2342 	case FFA_MEM_SHARE_SMC32:
2343 	case FFA_MEM_SHARE_SMC64:
2344 	case FFA_MEM_LEND_SMC32:
2345 	case FFA_MEM_LEND_SMC64:
2346 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2347 					 cookie, handle, flags);
2348 
2349 	case FFA_MEM_FRAG_TX:
2350 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2351 					    x4, cookie, handle, flags);
2352 
2353 	case FFA_MEM_FRAG_RX:
2354 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2355 					    x4, cookie, handle, flags);
2356 
2357 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2358 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2359 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2360 						 x3, x4, cookie, handle, flags);
2361 
2362 	case FFA_MEM_RELINQUISH:
2363 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2364 					       x3, x4, cookie, handle, flags);
2365 
2366 	case FFA_MEM_RECLAIM:
2367 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2368 					    x4, cookie, handle, flags);
2369 
2370 	case FFA_MEM_PERM_GET:
2371 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2372 						x3, x4, cookie, handle, flags);
2373 
2374 	case FFA_MEM_PERM_SET:
2375 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2376 						x3, x4, cookie, handle, flags);
2377 
2378 	default:
2379 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2380 		break;
2381 	}
2382 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2383 }
2384 
2385 /*******************************************************************************
2386  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2387  * validates the interrupt and upon success arranges entry into the SP for
2388  * handling the interrupt.
2389  ******************************************************************************/
2390 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2391 					  uint32_t flags,
2392 					  void *handle,
2393 					  void *cookie)
2394 {
2395 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2396 	struct sp_exec_ctx *ec;
2397 	uint32_t linear_id = plat_my_core_pos();
2398 
2399 	/* Sanity check for a NULL pointer dereference. */
2400 	assert(sp != NULL);
2401 
2402 	/* Check the security state when the exception was generated. */
2403 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2404 
2405 	/* Panic if not an S-EL1 Partition. */
2406 	if (sp->runtime_el != S_EL1) {
2407 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2408 		      linear_id);
2409 		panic();
2410 	}
2411 
2412 	/* Obtain a reference to the SP execution context. */
2413 	ec = spmc_get_sp_ec(sp);
2414 
2415 	/* Ensure that the execution context is in waiting state else panic. */
2416 	if (ec->rt_state != RT_STATE_WAITING) {
2417 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2418 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2419 		panic();
2420 	}
2421 
2422 	/* Update the runtime model and state of the partition. */
2423 	ec->rt_model = RT_MODEL_INTR;
2424 	ec->rt_state = RT_STATE_RUNNING;
2425 
2426 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2427 
2428 	/*
2429 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2430 	 * populated as the SP can determine this by itself.
2431 	 */
2432 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2433 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2434 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2435 				     handle);
2436 }
2437