xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 70d37dec8366aa8f88df91e568d8250e4089b214)
1 /*
2  * Copyright (c) 2022-2025, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 #if TRANSFER_LIST
32 #include <transfer_list.h>
33 #endif
34 
35 #include <platform_def.h>
36 
37 /* FFA_MEM_PERM_* helpers */
38 #define FFA_MEM_PERM_MASK		U(7)
39 #define FFA_MEM_PERM_DATA_MASK		U(3)
40 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
41 #define FFA_MEM_PERM_DATA_NA		U(0)
42 #define FFA_MEM_PERM_DATA_RW		U(1)
43 #define FFA_MEM_PERM_DATA_RES		U(2)
44 #define FFA_MEM_PERM_DATA_RO		U(3)
45 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
46 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
47 
48 /* Declare the maximum number of SPs and El3 LPs. */
49 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
50 
51 /*
52  * Allocate a secure partition descriptor to describe each SP in the system that
53  * does not reside at EL3.
54  */
55 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
56 
57 /*
58  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
59  * the system that interacts with a SP. It is used to track the Hypervisor
60  * buffer pair, version and ID for now. It could be extended to track VM
61  * properties when the SPMC supports indirect messaging.
62  */
63 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
64 
65 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
66 					  uint32_t flags,
67 					  void *handle,
68 					  void *cookie);
69 
70 /*
71  * Helper function to obtain the array storing the EL3
72  * Logical Partition descriptors.
73  */
74 struct el3_lp_desc *get_el3_lp_array(void)
75 {
76 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
77 }
78 
79 /*
80  * Helper function to obtain the descriptor of the last SP to whom control was
81  * handed to on this physical cpu. Currently, we assume there is only one SP.
82  * TODO: Expand to track multiple partitions when required.
83  */
84 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
85 {
86 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
87 }
88 
89 /*
90  * Helper function to obtain the execution context of an SP on the
91  * current physical cpu.
92  */
93 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
94 {
95 	return &(sp->ec[get_ec_index(sp)]);
96 }
97 
98 /* Helper function to get pointer to SP context from its ID. */
99 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
100 {
101 	/* Check for Secure World Partitions. */
102 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
103 		if (sp_desc[i].sp_id == id) {
104 			return &(sp_desc[i]);
105 		}
106 	}
107 	return NULL;
108 }
109 
110 /*
111  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
112  * We assume that the first descriptor is reserved for this entity.
113  */
114 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
115 {
116 	return &(ns_ep_desc[0]);
117 }
118 
119 /*
120  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
121  * or OS kernel in the normal world or the last SP that was run.
122  */
123 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
124 {
125 	/* Obtain the RX/TX buffer pair descriptor. */
126 	if (secure_origin) {
127 		return &(spmc_get_current_sp_ctx()->mailbox);
128 	} else {
129 		return &(spmc_get_hyp_ctx()->mailbox);
130 	}
131 }
132 
133 /******************************************************************************
134  * This function returns to the place where spmc_sp_synchronous_entry() was
135  * called originally.
136  ******************************************************************************/
137 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
138 {
139 	/*
140 	 * The SPM must have initiated the original request through a
141 	 * synchronous entry into the secure partition. Jump back to the
142 	 * original C runtime context with the value of rc in x0;
143 	 */
144 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
145 
146 	panic();
147 }
148 
149 /*******************************************************************************
150  * Return FFA_ERROR with specified error code.
151  ******************************************************************************/
152 uint64_t spmc_ffa_error_return(void *handle, int error_code)
153 {
154 	SMC_RET8(handle, FFA_ERROR,
155 		 FFA_TARGET_INFO_MBZ, error_code,
156 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
157 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
158 }
159 
160 /******************************************************************************
161  * Helper function to validate a secure partition ID to ensure it does not
162  * conflict with any other FF-A component and follows the convention to
163  * indicate it resides within the secure world.
164  ******************************************************************************/
165 bool is_ffa_secure_id_valid(uint16_t partition_id)
166 {
167 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
168 
169 	/* Ensure the ID is not the invalid partition ID. */
170 	if (partition_id == INV_SP_ID) {
171 		return false;
172 	}
173 
174 	/* Ensure the ID is not the SPMD ID. */
175 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
176 		return false;
177 	}
178 
179 	/*
180 	 * Ensure the ID follows the convention to indicate it resides
181 	 * in the secure world.
182 	 */
183 	if (!ffa_is_secure_world_id(partition_id)) {
184 		return false;
185 	}
186 
187 	/* Ensure we don't conflict with the SPMC partition ID. */
188 	if (partition_id == FFA_SPMC_ID) {
189 		return false;
190 	}
191 
192 	/* Ensure we do not already have an SP context with this ID. */
193 	if (spmc_get_sp_ctx(partition_id)) {
194 		return false;
195 	}
196 
197 	/* Ensure we don't clash with any Logical SP's. */
198 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
199 		if (el3_lp_descs[i].sp_id == partition_id) {
200 			return false;
201 		}
202 	}
203 
204 	return true;
205 }
206 
207 /*******************************************************************************
208  * This function either forwards the request to the other world or returns
209  * with an ERET depending on the source of the call.
210  * We can assume that the destination is for an entity at a lower exception
211  * level as any messages destined for a logical SP resident in EL3 will have
212  * already been taken care of by the SPMC before entering this function.
213  ******************************************************************************/
214 static uint64_t spmc_smc_return(uint32_t smc_fid,
215 				bool secure_origin,
216 				uint64_t x1,
217 				uint64_t x2,
218 				uint64_t x3,
219 				uint64_t x4,
220 				void *handle,
221 				void *cookie,
222 				uint64_t flags,
223 				uint16_t dst_id,
224 				uint32_t sp_ffa_version)
225 {
226 	/* If the destination is in the normal world always go via the SPMD. */
227 	if (ffa_is_normal_world_id(dst_id)) {
228 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
229 					cookie, handle, flags, sp_ffa_version);
230 	}
231 	/*
232 	 * If the caller is secure and we want to return to the secure world,
233 	 * ERET directly.
234 	 */
235 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
236 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
237 	}
238 	/* If we originated in the normal world then switch contexts. */
239 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
240 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
241 					     x3, x4, handle, flags, sp_ffa_version);
242 	} else {
243 		/* Unknown State. */
244 		panic();
245 	}
246 
247 	/* Shouldn't be Reached. */
248 	return 0;
249 }
250 
251 /*******************************************************************************
252  * FF-A ABI Handlers.
253  ******************************************************************************/
254 
255 /*******************************************************************************
256  * Helper function to validate arg2 as part of a direct message.
257  ******************************************************************************/
258 static inline bool direct_msg_validate_arg2(uint64_t x2)
259 {
260 	/* Check message type. */
261 	if (x2 & FFA_FWK_MSG_BIT) {
262 		/* We have a framework message, ensure it is a known message. */
263 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
264 			VERBOSE("Invalid message format 0x%lx.\n", x2);
265 			return false;
266 		}
267 	} else {
268 		/* We have a partition messages, ensure x2 is not set. */
269 		if (x2 != (uint64_t) 0) {
270 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
271 				x2);
272 			return false;
273 		}
274 	}
275 	return true;
276 }
277 
278 /*******************************************************************************
279  * Helper function to validate the destination ID of a direct response.
280  ******************************************************************************/
281 static bool direct_msg_validate_dst_id(uint16_t dst_id)
282 {
283 	struct secure_partition_desc *sp;
284 
285 	/* Check if we're targeting a normal world partition. */
286 	if (ffa_is_normal_world_id(dst_id)) {
287 		return true;
288 	}
289 
290 	/* Or directed to the SPMC itself.*/
291 	if (dst_id == FFA_SPMC_ID) {
292 		return true;
293 	}
294 
295 	/* Otherwise ensure the SP exists. */
296 	sp = spmc_get_sp_ctx(dst_id);
297 	if (sp != NULL) {
298 		return true;
299 	}
300 
301 	return false;
302 }
303 
304 /*******************************************************************************
305  * Helper function to validate the response from a Logical Partition.
306  ******************************************************************************/
307 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
308 					void *handle)
309 {
310 	/* Retrieve populated Direct Response Arguments. */
311 	uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0);
312 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
313 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
314 	uint16_t src_id = ffa_endpoint_source(x1);
315 	uint16_t dst_id = ffa_endpoint_destination(x1);
316 
317 	if (src_id != lp_id) {
318 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
319 		return false;
320 	}
321 
322 	/*
323 	 * Check the destination ID is valid and ensure the LP is responding to
324 	 * the original request.
325 	 */
326 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
327 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
328 		return false;
329 	}
330 
331 	if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) &&
332 			!direct_msg_validate_arg2(x2)) {
333 		ERROR("Invalid EL3 LP message encoding.\n");
334 		return false;
335 	}
336 	return true;
337 }
338 
339 /*******************************************************************************
340  * Helper function to check that partition can receive direct msg or not.
341  ******************************************************************************/
342 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum)
343 {
344 	if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ &&
345 			((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) ||
346 			(dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 &&
347 			((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) {
348 		return false;
349 	}
350 
351 	return true;
352 }
353 
354 /*******************************************************************************
355  * Helper function to obtain the FF-A version of the calling partition.
356  ******************************************************************************/
357 uint32_t get_partition_ffa_version(bool secure_origin)
358 {
359 	if (secure_origin) {
360 		return spmc_get_current_sp_ctx()->ffa_version;
361 	} else {
362 		return spmc_get_hyp_ctx()->ffa_version;
363 	}
364 }
365 
366 /*******************************************************************************
367  * Handle direct request messages and route to the appropriate destination.
368  ******************************************************************************/
369 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
370 				       bool secure_origin,
371 				       uint64_t x1,
372 				       uint64_t x2,
373 				       uint64_t x3,
374 				       uint64_t x4,
375 				       void *cookie,
376 				       void *handle,
377 				       uint64_t flags)
378 {
379 	uint16_t src_id = ffa_endpoint_source(x1);
380 	uint16_t dst_id = ffa_endpoint_destination(x1);
381 	uint16_t dir_req_funcid;
382 	struct el3_lp_desc *el3_lp_descs;
383 	struct secure_partition_desc *sp;
384 	unsigned int idx;
385 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
386 
387 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ?
388 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
389 
390 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ2) &&
391 			ffa_version < MAKE_FFA_VERSION(U(1), U(2))) {
392 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
393 	}
394 
395 	/*
396 	 * Sanity check for DIRECT_REQ:
397 	 * Check if arg2 has been populated correctly based on message type
398 	 */
399 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) &&
400 			!direct_msg_validate_arg2(x2)) {
401 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
402 	}
403 
404 	/* Validate Sender is either the current SP or from the normal world. */
405 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
406 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
407 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
408 		return spmc_ffa_error_return(handle,
409 					FFA_ERROR_INVALID_PARAMETER);
410 	}
411 
412 	el3_lp_descs = get_el3_lp_array();
413 
414 	/* Check if the request is destined for a Logical Partition. */
415 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
416 		if (el3_lp_descs[i].sp_id == dst_id) {
417 			if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) {
418 				return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
419 			}
420 
421 			uint64_t ret = el3_lp_descs[i].direct_req(
422 						smc_fid, secure_origin, x1, x2,
423 						x3, x4, cookie, handle, flags);
424 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
425 							 handle)) {
426 				panic();
427 			}
428 
429 			/* Message checks out. */
430 			return ret;
431 		}
432 	}
433 
434 	/*
435 	 * If the request was not targeted to a LSP and from the secure world
436 	 * then it is invalid since a SP cannot call into the Normal world and
437 	 * there is no other SP to call into. If there are other SPs in future
438 	 * then the partition runtime model would need to be validated as well.
439 	 */
440 	if (secure_origin) {
441 		VERBOSE("Direct request not supported to the Normal World.\n");
442 		return spmc_ffa_error_return(handle,
443 					     FFA_ERROR_INVALID_PARAMETER);
444 	}
445 
446 	/* Check if the SP ID is valid. */
447 	sp = spmc_get_sp_ctx(dst_id);
448 	if (sp == NULL) {
449 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
450 			dst_id);
451 		return spmc_ffa_error_return(handle,
452 					     FFA_ERROR_INVALID_PARAMETER);
453 	}
454 
455 	if (!direct_msg_receivable(sp->properties, dir_req_funcid)) {
456 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
457 	}
458 
459 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
460 	if (sp->runtime_el == S_EL0) {
461 		spin_lock(&sp->rt_state_lock);
462 	}
463 
464 	/*
465 	 * Check that the target execution context is in a waiting state before
466 	 * forwarding the direct request to it.
467 	 */
468 	idx = get_ec_index(sp);
469 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
470 		VERBOSE("SP context on core%u is not waiting (%u).\n",
471 			idx, sp->ec[idx].rt_model);
472 
473 		if (sp->runtime_el == S_EL0) {
474 			spin_unlock(&sp->rt_state_lock);
475 		}
476 
477 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
478 	}
479 
480 	/*
481 	 * Everything checks out so forward the request to the SP after updating
482 	 * its state and runtime model.
483 	 */
484 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
485 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
486 	sp->ec[idx].dir_req_origin_id = src_id;
487 	sp->ec[idx].dir_req_funcid = dir_req_funcid;
488 
489 	if (sp->runtime_el == S_EL0) {
490 		spin_unlock(&sp->rt_state_lock);
491 	}
492 
493 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
494 			       handle, cookie, flags, dst_id, sp->ffa_version);
495 }
496 
497 /*******************************************************************************
498  * Handle direct response messages and route to the appropriate destination.
499  ******************************************************************************/
500 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
501 					bool secure_origin,
502 					uint64_t x1,
503 					uint64_t x2,
504 					uint64_t x3,
505 					uint64_t x4,
506 					void *cookie,
507 					void *handle,
508 					uint64_t flags)
509 {
510 	uint16_t dst_id = ffa_endpoint_destination(x1);
511 	uint16_t dir_req_funcid;
512 	struct secure_partition_desc *sp;
513 	unsigned int idx;
514 
515 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ?
516 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
517 
518 	/* Check if arg2 has been populated correctly based on message type. */
519 	if (!direct_msg_validate_arg2(x2)) {
520 		return spmc_ffa_error_return(handle,
521 					     FFA_ERROR_INVALID_PARAMETER);
522 	}
523 
524 	/* Check that the response did not originate from the Normal world. */
525 	if (!secure_origin) {
526 		VERBOSE("Direct Response not supported from Normal World.\n");
527 		return spmc_ffa_error_return(handle,
528 					     FFA_ERROR_INVALID_PARAMETER);
529 	}
530 
531 	/*
532 	 * Check that the response is either targeted to the Normal world or the
533 	 * SPMC e.g. a PM response.
534 	 */
535 	if (!direct_msg_validate_dst_id(dst_id)) {
536 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
537 			dst_id);
538 		return spmc_ffa_error_return(handle,
539 					     FFA_ERROR_INVALID_PARAMETER);
540 	}
541 
542 	/* Obtain the SP descriptor and update its runtime state. */
543 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
544 	if (sp == NULL) {
545 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
546 			dst_id);
547 		return spmc_ffa_error_return(handle,
548 					     FFA_ERROR_INVALID_PARAMETER);
549 	}
550 
551 	if (sp->runtime_el == S_EL0) {
552 		spin_lock(&sp->rt_state_lock);
553 	}
554 
555 	/* Sanity check state is being tracked correctly in the SPMC. */
556 	idx = get_ec_index(sp);
557 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
558 
559 	/* Ensure SP execution context was in the right runtime model. */
560 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
561 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
562 			idx, sp->ec[idx].rt_model);
563 		if (sp->runtime_el == S_EL0) {
564 			spin_unlock(&sp->rt_state_lock);
565 		}
566 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
567 	}
568 
569 	if (dir_req_funcid != sp->ec[idx].dir_req_funcid) {
570 		WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n",
571 		     sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx);
572 		if (sp->runtime_el == S_EL0) {
573 			spin_unlock(&sp->rt_state_lock);
574 		}
575 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
576 	}
577 
578 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
579 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
580 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
581 		if (sp->runtime_el == S_EL0) {
582 			spin_unlock(&sp->rt_state_lock);
583 		}
584 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
585 	}
586 
587 	/* Update the state of the SP execution context. */
588 	sp->ec[idx].rt_state = RT_STATE_WAITING;
589 
590 	/* Clear the ongoing direct request ID. */
591 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
592 
593 	/* Clear the ongoing direct request message version. */
594 	sp->ec[idx].dir_req_funcid = 0U;
595 
596 	if (sp->runtime_el == S_EL0) {
597 		spin_unlock(&sp->rt_state_lock);
598 	}
599 
600 	/*
601 	 * If the receiver is not the SPMC then forward the response to the
602 	 * Normal world.
603 	 */
604 	if (dst_id == FFA_SPMC_ID) {
605 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
606 		/* Should not get here. */
607 		panic();
608 	}
609 
610 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
611 			       handle, cookie, flags, dst_id, sp->ffa_version);
612 }
613 
614 /*******************************************************************************
615  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
616  * cycles.
617  ******************************************************************************/
618 static uint64_t msg_wait_handler(uint32_t smc_fid,
619 				 bool secure_origin,
620 				 uint64_t x1,
621 				 uint64_t x2,
622 				 uint64_t x3,
623 				 uint64_t x4,
624 				 void *cookie,
625 				 void *handle,
626 				 uint64_t flags)
627 {
628 	struct secure_partition_desc *sp;
629 	unsigned int idx;
630 
631 	/*
632 	 * Check that the response did not originate from the Normal world as
633 	 * only the secure world can call this ABI.
634 	 */
635 	if (!secure_origin) {
636 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
637 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
638 	}
639 
640 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
641 	sp = spmc_get_current_sp_ctx();
642 	if (sp == NULL) {
643 		return spmc_ffa_error_return(handle,
644 					     FFA_ERROR_INVALID_PARAMETER);
645 	}
646 
647 	/*
648 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
649 	 */
650 	idx = get_ec_index(sp);
651 	if (sp->runtime_el == S_EL0) {
652 		spin_lock(&sp->rt_state_lock);
653 	}
654 
655 	/* Ensure SP execution context was in the right runtime model. */
656 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
657 		if (sp->runtime_el == S_EL0) {
658 			spin_unlock(&sp->rt_state_lock);
659 		}
660 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
661 	}
662 
663 	/* Sanity check the state is being tracked correctly in the SPMC. */
664 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
665 
666 	/*
667 	 * Perform a synchronous exit if the partition was initialising. The
668 	 * state is updated after the exit.
669 	 */
670 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
671 		if (sp->runtime_el == S_EL0) {
672 			spin_unlock(&sp->rt_state_lock);
673 		}
674 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
675 		/* Should not get here */
676 		panic();
677 	}
678 
679 	/* Update the state of the SP execution context. */
680 	sp->ec[idx].rt_state = RT_STATE_WAITING;
681 
682 	/* Resume normal world if a secure interrupt was handled. */
683 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
684 		if (sp->runtime_el == S_EL0) {
685 			spin_unlock(&sp->rt_state_lock);
686 		}
687 
688 		return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
689 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
690 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
691 					     handle, flags, sp->ffa_version);
692 	}
693 
694 	/* Protect the runtime state of a S-EL0 SP with a lock. */
695 	if (sp->runtime_el == S_EL0) {
696 		spin_unlock(&sp->rt_state_lock);
697 	}
698 
699 	/* Forward the response to the Normal world. */
700 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
701 			       handle, cookie, flags, FFA_NWD_ID, sp->ffa_version);
702 }
703 
704 static uint64_t ffa_error_handler(uint32_t smc_fid,
705 				 bool secure_origin,
706 				 uint64_t x1,
707 				 uint64_t x2,
708 				 uint64_t x3,
709 				 uint64_t x4,
710 				 void *cookie,
711 				 void *handle,
712 				 uint64_t flags)
713 {
714 	struct secure_partition_desc *sp;
715 	unsigned int idx;
716 	uint16_t dst_id = ffa_endpoint_destination(x1);
717 	bool cancel_dir_req = false;
718 
719 	/* Check that the response did not originate from the Normal world. */
720 	if (!secure_origin) {
721 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
722 	}
723 
724 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
725 	sp = spmc_get_current_sp_ctx();
726 	if (sp == NULL) {
727 		return spmc_ffa_error_return(handle,
728 					     FFA_ERROR_INVALID_PARAMETER);
729 	}
730 
731 	/* Get the execution context of the SP that invoked FFA_ERROR. */
732 	idx = get_ec_index(sp);
733 
734 	/*
735 	 * We only expect FFA_ERROR to be received during SP initialisation
736 	 * otherwise this is an invalid call.
737 	 */
738 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
739 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
740 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
741 		/* Should not get here. */
742 		panic();
743 	}
744 
745 	if (sp->runtime_el == S_EL0) {
746 		spin_lock(&sp->rt_state_lock);
747 	}
748 
749 	if (sp->ec[idx].rt_state == RT_STATE_RUNNING &&
750 			sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
751 		sp->ec[idx].rt_state = RT_STATE_WAITING;
752 		sp->ec[idx].dir_req_origin_id = INV_SP_ID;
753 		sp->ec[idx].dir_req_funcid = 0x00;
754 		cancel_dir_req = true;
755 	}
756 
757 	if (sp->runtime_el == S_EL0) {
758 		spin_unlock(&sp->rt_state_lock);
759 	}
760 
761 	if (cancel_dir_req) {
762 		if (dst_id == FFA_SPMC_ID) {
763 			spmc_sp_synchronous_exit(&sp->ec[idx], x4);
764 			/* Should not get here. */
765 			panic();
766 		} else
767 			return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
768 					       handle, cookie, flags, dst_id, sp->ffa_version);
769 	}
770 
771 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
772 }
773 
774 static uint64_t ffa_version_handler(uint32_t smc_fid,
775 				    bool secure_origin,
776 				    uint64_t x1,
777 				    uint64_t x2,
778 				    uint64_t x3,
779 				    uint64_t x4,
780 				    void *cookie,
781 				    void *handle,
782 				    uint64_t flags)
783 {
784 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
785 
786 	if (requested_version & FFA_VERSION_BIT31_MASK) {
787 		/* Invalid encoding, return an error. */
788 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
789 		/* Execution stops here. */
790 	}
791 
792 	/* Determine the caller to store the requested version. */
793 	if (secure_origin) {
794 		/*
795 		 * Ensure that the SP is reporting the same version as
796 		 * specified in its manifest. If these do not match there is
797 		 * something wrong with the SP.
798 		 * TODO: Should we abort the SP? For now assert this is not
799 		 *       case.
800 		 */
801 		assert(requested_version ==
802 		       spmc_get_current_sp_ctx()->ffa_version);
803 	} else {
804 		/*
805 		 * If this is called by the normal world, record this
806 		 * information in its descriptor.
807 		 */
808 		spmc_get_hyp_ctx()->ffa_version = requested_version;
809 	}
810 
811 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
812 					  FFA_VERSION_MINOR));
813 }
814 
815 static uint64_t rxtx_map_handler(uint32_t smc_fid,
816 				 bool secure_origin,
817 				 uint64_t x1,
818 				 uint64_t x2,
819 				 uint64_t x3,
820 				 uint64_t x4,
821 				 void *cookie,
822 				 void *handle,
823 				 uint64_t flags)
824 {
825 	int ret;
826 	uint32_t error_code;
827 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
828 	struct mailbox *mbox;
829 	uintptr_t tx_address = x1;
830 	uintptr_t rx_address = x2;
831 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
832 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
833 
834 	/*
835 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
836 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
837 	 * ABI on behalf of a VM and reject it if this is the case.
838 	 */
839 	if (tx_address == 0 || rx_address == 0) {
840 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
841 		return spmc_ffa_error_return(handle,
842 					     FFA_ERROR_INVALID_PARAMETER);
843 	}
844 
845 	/* Ensure the specified buffers are not the same. */
846 	if (tx_address == rx_address) {
847 		WARN("TX Buffer must not be the same as RX Buffer.\n");
848 		return spmc_ffa_error_return(handle,
849 					     FFA_ERROR_INVALID_PARAMETER);
850 	}
851 
852 	/* Ensure the buffer size is not 0. */
853 	if (buf_size == 0U) {
854 		WARN("Buffer size must not be 0\n");
855 		return spmc_ffa_error_return(handle,
856 					     FFA_ERROR_INVALID_PARAMETER);
857 	}
858 
859 	/*
860 	 * Ensure the buffer size is a multiple of the translation granule size
861 	 * in TF-A.
862 	 */
863 	if (buf_size % PAGE_SIZE != 0U) {
864 		WARN("Buffer size must be aligned to translation granule.\n");
865 		return spmc_ffa_error_return(handle,
866 					     FFA_ERROR_INVALID_PARAMETER);
867 	}
868 
869 	/* Obtain the RX/TX buffer pair descriptor. */
870 	mbox = spmc_get_mbox_desc(secure_origin);
871 
872 	spin_lock(&mbox->lock);
873 
874 	/* Check if buffers have already been mapped. */
875 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
876 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
877 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
878 		error_code = FFA_ERROR_DENIED;
879 		goto err;
880 	}
881 
882 	/* memmap the TX buffer as read only. */
883 	ret = mmap_add_dynamic_region(tx_address, /* PA */
884 			tx_address, /* VA */
885 			buf_size, /* size */
886 			mem_atts | MT_RO_DATA); /* attrs */
887 	if (ret != 0) {
888 		/* Return the correct error code. */
889 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
890 						FFA_ERROR_INVALID_PARAMETER;
891 		WARN("Unable to map TX buffer: %d\n", error_code);
892 		goto err;
893 	}
894 
895 	/* memmap the RX buffer as read write. */
896 	ret = mmap_add_dynamic_region(rx_address, /* PA */
897 			rx_address, /* VA */
898 			buf_size, /* size */
899 			mem_atts | MT_RW_DATA); /* attrs */
900 
901 	if (ret != 0) {
902 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
903 						FFA_ERROR_INVALID_PARAMETER;
904 		WARN("Unable to map RX buffer: %d\n", error_code);
905 		/* Unmap the TX buffer again. */
906 		mmap_remove_dynamic_region(tx_address, buf_size);
907 		goto err;
908 	}
909 
910 	mbox->tx_buffer = (void *) tx_address;
911 	mbox->rx_buffer = (void *) rx_address;
912 	mbox->rxtx_page_count = page_count;
913 	spin_unlock(&mbox->lock);
914 
915 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
916 	/* Execution stops here. */
917 err:
918 	spin_unlock(&mbox->lock);
919 	return spmc_ffa_error_return(handle, error_code);
920 }
921 
922 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
923 				   bool secure_origin,
924 				   uint64_t x1,
925 				   uint64_t x2,
926 				   uint64_t x3,
927 				   uint64_t x4,
928 				   void *cookie,
929 				   void *handle,
930 				   uint64_t flags)
931 {
932 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
933 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
934 
935 	/*
936 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
937 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
938 	 * ABI on behalf of a VM and reject it if this is the case.
939 	 */
940 	if (x1 != 0UL) {
941 		return spmc_ffa_error_return(handle,
942 					     FFA_ERROR_INVALID_PARAMETER);
943 	}
944 
945 	spin_lock(&mbox->lock);
946 
947 	/* Check if buffers are currently mapped. */
948 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
949 		spin_unlock(&mbox->lock);
950 		return spmc_ffa_error_return(handle,
951 					     FFA_ERROR_INVALID_PARAMETER);
952 	}
953 
954 	/* Unmap RX Buffer */
955 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
956 				       buf_size) != 0) {
957 		WARN("Unable to unmap RX buffer!\n");
958 	}
959 
960 	mbox->rx_buffer = 0;
961 
962 	/* Unmap TX Buffer */
963 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
964 				       buf_size) != 0) {
965 		WARN("Unable to unmap TX buffer!\n");
966 	}
967 
968 	mbox->tx_buffer = 0;
969 	mbox->rxtx_page_count = 0;
970 
971 	spin_unlock(&mbox->lock);
972 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
973 }
974 
975 /*
976  * Helper function to populate the properties field of a Partition Info Get
977  * descriptor.
978  */
979 static uint32_t
980 partition_info_get_populate_properties(uint32_t sp_properties,
981 				       enum sp_execution_state sp_ec_state)
982 {
983 	uint32_t properties = sp_properties;
984 	uint32_t ec_state;
985 
986 	/* Determine the execution state of the SP. */
987 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
988 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
989 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
990 
991 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
992 
993 	return properties;
994 }
995 
996 /*
997  * Collate the partition information in a v1.1 partition information
998  * descriptor format, this will be converter later if required.
999  */
1000 static int partition_info_get_handler_v1_1(uint32_t *uuid,
1001 					   struct ffa_partition_info_v1_1
1002 						  *partitions,
1003 					   uint32_t max_partitions,
1004 					   uint32_t *partition_count)
1005 {
1006 	uint32_t index;
1007 	struct ffa_partition_info_v1_1 *desc;
1008 	bool null_uuid = is_null_uuid(uuid);
1009 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1010 
1011 	/* Deal with Logical Partitions. */
1012 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1013 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
1014 			/* Found a matching UUID, populate appropriately. */
1015 			if (*partition_count >= max_partitions) {
1016 				return FFA_ERROR_NO_MEMORY;
1017 			}
1018 
1019 			desc = &partitions[*partition_count];
1020 			desc->ep_id = el3_lp_descs[index].sp_id;
1021 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1022 			/* LSPs must be AArch64. */
1023 			desc->properties =
1024 				partition_info_get_populate_properties(
1025 					el3_lp_descs[index].properties,
1026 					SP_STATE_AARCH64);
1027 
1028 			if (null_uuid) {
1029 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
1030 			}
1031 			(*partition_count)++;
1032 		}
1033 	}
1034 
1035 	/* Deal with physical SP's. */
1036 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1037 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1038 			/* Found a matching UUID, populate appropriately. */
1039 			if (*partition_count >= max_partitions) {
1040 				return FFA_ERROR_NO_MEMORY;
1041 			}
1042 
1043 			desc = &partitions[*partition_count];
1044 			desc->ep_id = sp_desc[index].sp_id;
1045 			/*
1046 			 * Execution context count must match No. cores for
1047 			 * S-EL1 SPs.
1048 			 */
1049 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1050 			desc->properties =
1051 				partition_info_get_populate_properties(
1052 					sp_desc[index].properties,
1053 					sp_desc[index].execution_state);
1054 
1055 			if (null_uuid) {
1056 				copy_uuid(desc->uuid, sp_desc[index].uuid);
1057 			}
1058 			(*partition_count)++;
1059 		}
1060 	}
1061 	return 0;
1062 }
1063 
1064 /*
1065  * Handle the case where that caller only wants the count of partitions
1066  * matching a given UUID and does not want the corresponding descriptors
1067  * populated.
1068  */
1069 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
1070 {
1071 	uint32_t index = 0;
1072 	uint32_t partition_count = 0;
1073 	bool null_uuid = is_null_uuid(uuid);
1074 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1075 
1076 	/* Deal with Logical Partitions. */
1077 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1078 		if (null_uuid ||
1079 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
1080 			(partition_count)++;
1081 		}
1082 	}
1083 
1084 	/* Deal with physical SP's. */
1085 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1086 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1087 			(partition_count)++;
1088 		}
1089 	}
1090 	return partition_count;
1091 }
1092 
1093 /*
1094  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1095  * the corresponding descriptor format from the v1.1 descriptor array.
1096  */
1097 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1098 					     *partitions,
1099 					     struct mailbox *mbox,
1100 					     int partition_count)
1101 {
1102 	uint32_t index;
1103 	uint32_t buf_size;
1104 	uint32_t descriptor_size;
1105 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1106 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1107 
1108 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1109 	descriptor_size = partition_count *
1110 			  sizeof(struct ffa_partition_info_v1_0);
1111 
1112 	if (descriptor_size > buf_size) {
1113 		return FFA_ERROR_NO_MEMORY;
1114 	}
1115 
1116 	for (index = 0U; index < partition_count; index++) {
1117 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1118 		v1_0_partitions[index].execution_ctx_count =
1119 			partitions[index].execution_ctx_count;
1120 		/* Only report v1.0 properties. */
1121 		v1_0_partitions[index].properties =
1122 			(partitions[index].properties &
1123 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1124 	}
1125 	return 0;
1126 }
1127 
1128 /*
1129  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1130  * v1.0 implementations.
1131  */
1132 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1133 					   bool secure_origin,
1134 					   uint64_t x1,
1135 					   uint64_t x2,
1136 					   uint64_t x3,
1137 					   uint64_t x4,
1138 					   void *cookie,
1139 					   void *handle,
1140 					   uint64_t flags)
1141 {
1142 	int ret;
1143 	uint32_t partition_count = 0;
1144 	uint32_t size = 0;
1145 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1146 	struct mailbox *mbox;
1147 	uint64_t info_get_flags;
1148 	bool count_only;
1149 	uint32_t uuid[4];
1150 
1151 	uuid[0] = x1;
1152 	uuid[1] = x2;
1153 	uuid[2] = x3;
1154 	uuid[3] = x4;
1155 
1156 	/* Determine if the Partition descriptors should be populated. */
1157 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1158 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1159 
1160 	/* Handle the case where we don't need to populate the descriptors. */
1161 	if (count_only) {
1162 		partition_count = partition_info_get_handler_count_only(uuid);
1163 		if (partition_count == 0) {
1164 			return spmc_ffa_error_return(handle,
1165 						FFA_ERROR_INVALID_PARAMETER);
1166 		}
1167 	} else {
1168 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1169 
1170 		/*
1171 		 * Handle the case where the partition descriptors are required,
1172 		 * check we have the buffers available and populate the
1173 		 * appropriate structure version.
1174 		 */
1175 
1176 		/* Obtain the v1.1 format of the descriptors. */
1177 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1178 						      MAX_SP_LP_PARTITIONS,
1179 						      &partition_count);
1180 
1181 		/* Check if an error occurred during discovery. */
1182 		if (ret != 0) {
1183 			goto err;
1184 		}
1185 
1186 		/* If we didn't find any matches the UUID is unknown. */
1187 		if (partition_count == 0) {
1188 			ret = FFA_ERROR_INVALID_PARAMETER;
1189 			goto err;
1190 		}
1191 
1192 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1193 		mbox = spmc_get_mbox_desc(secure_origin);
1194 
1195 		/*
1196 		 * If the caller has not bothered registering its RX/TX pair
1197 		 * then return an error code.
1198 		 */
1199 		spin_lock(&mbox->lock);
1200 		if (mbox->rx_buffer == NULL) {
1201 			ret = FFA_ERROR_BUSY;
1202 			goto err_unlock;
1203 		}
1204 
1205 		/* Ensure the RX buffer is currently free. */
1206 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1207 			ret = FFA_ERROR_BUSY;
1208 			goto err_unlock;
1209 		}
1210 
1211 		/* Zero the RX buffer before populating. */
1212 		(void)memset(mbox->rx_buffer, 0,
1213 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1214 
1215 		/*
1216 		 * Depending on the FF-A version of the requesting partition
1217 		 * we may need to convert to a v1.0 format otherwise we can copy
1218 		 * directly.
1219 		 */
1220 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1221 			ret = partition_info_populate_v1_0(partitions,
1222 							   mbox,
1223 							   partition_count);
1224 			if (ret != 0) {
1225 				goto err_unlock;
1226 			}
1227 		} else {
1228 			uint32_t buf_size = mbox->rxtx_page_count *
1229 					    FFA_PAGE_SIZE;
1230 
1231 			/* Ensure the descriptor will fit in the buffer. */
1232 			size = sizeof(struct ffa_partition_info_v1_1);
1233 			if (partition_count * size  > buf_size) {
1234 				ret = FFA_ERROR_NO_MEMORY;
1235 				goto err_unlock;
1236 			}
1237 			memcpy(mbox->rx_buffer, partitions,
1238 			       partition_count * size);
1239 		}
1240 
1241 		mbox->state = MAILBOX_STATE_FULL;
1242 		spin_unlock(&mbox->lock);
1243 	}
1244 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1245 
1246 err_unlock:
1247 	spin_unlock(&mbox->lock);
1248 err:
1249 	return spmc_ffa_error_return(handle, ret);
1250 }
1251 
1252 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1253 {
1254 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1255 }
1256 
1257 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1258 					      uint32_t input_properties,
1259 					      void *handle)
1260 {
1261 	/*
1262 	 * If we're called by the normal world we don't support any
1263 	 * additional features.
1264 	 */
1265 	if (!secure_origin) {
1266 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1267 			return spmc_ffa_error_return(handle,
1268 						     FFA_ERROR_NOT_SUPPORTED);
1269 		}
1270 
1271 	} else {
1272 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1273 		/*
1274 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1275 		 * call. If v1.0 check and store whether the SP has requested
1276 		 * the use of the NS bit.
1277 		 */
1278 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1279 			if ((input_properties &
1280 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1281 				return spmc_ffa_error_return(handle,
1282 						       FFA_ERROR_NOT_SUPPORTED);
1283 			}
1284 			return ffa_feature_success(handle,
1285 						   FFA_FEATURES_RET_REQ_NS_BIT);
1286 		} else {
1287 			sp->ns_bit_requested = (input_properties &
1288 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1289 					       0U;
1290 		}
1291 		if (sp->ns_bit_requested) {
1292 			return ffa_feature_success(handle,
1293 						   FFA_FEATURES_RET_REQ_NS_BIT);
1294 		}
1295 	}
1296 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1297 }
1298 
1299 static uint64_t ffa_features_handler(uint32_t smc_fid,
1300 				     bool secure_origin,
1301 				     uint64_t x1,
1302 				     uint64_t x2,
1303 				     uint64_t x3,
1304 				     uint64_t x4,
1305 				     void *cookie,
1306 				     void *handle,
1307 				     uint64_t flags)
1308 {
1309 	uint32_t function_id = (uint32_t) x1;
1310 	uint32_t input_properties = (uint32_t) x2;
1311 
1312 	/* Check if a Feature ID was requested. */
1313 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1314 		/* We currently don't support any additional features. */
1315 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1316 	}
1317 
1318 	/*
1319 	 * Handle the cases where we have separate handlers due to additional
1320 	 * properties.
1321 	 */
1322 	switch (function_id) {
1323 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1324 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1325 		return ffa_features_retrieve_request(secure_origin,
1326 						     input_properties,
1327 						     handle);
1328 	}
1329 
1330 	/*
1331 	 * We don't currently support additional input properties for these
1332 	 * other ABIs therefore ensure this value is set to 0.
1333 	 */
1334 	if (input_properties != 0U) {
1335 		return spmc_ffa_error_return(handle,
1336 					     FFA_ERROR_NOT_SUPPORTED);
1337 	}
1338 
1339 	/* Report if any other FF-A ABI is supported. */
1340 	switch (function_id) {
1341 	/* Supported features from both worlds. */
1342 	case FFA_ERROR:
1343 	case FFA_SUCCESS_SMC32:
1344 	case FFA_INTERRUPT:
1345 	case FFA_SPM_ID_GET:
1346 	case FFA_ID_GET:
1347 	case FFA_FEATURES:
1348 	case FFA_VERSION:
1349 	case FFA_RX_RELEASE:
1350 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1351 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1352 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1353 	case FFA_PARTITION_INFO_GET:
1354 	case FFA_RXTX_MAP_SMC32:
1355 	case FFA_RXTX_MAP_SMC64:
1356 	case FFA_RXTX_UNMAP:
1357 	case FFA_MEM_FRAG_TX:
1358 	case FFA_MSG_RUN:
1359 
1360 		/*
1361 		 * We are relying on the fact that the other registers
1362 		 * will be set to 0 as these values align with the
1363 		 * currently implemented features of the SPMC. If this
1364 		 * changes this function must be extended to handle
1365 		 * reporting the additional functionality.
1366 		 */
1367 
1368 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1369 		/* Execution stops here. */
1370 
1371 	/* Supported ABIs only from the secure world. */
1372 	case FFA_MEM_PERM_GET_SMC32:
1373 	case FFA_MEM_PERM_GET_SMC64:
1374 	case FFA_MEM_PERM_SET_SMC32:
1375 	case FFA_MEM_PERM_SET_SMC64:
1376 	/* these ABIs are only supported from S-EL0 SPs */
1377 	#if !(SPMC_AT_EL3_SEL0_SP)
1378 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1379 	#endif
1380 	/* fall through */
1381 
1382 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1383 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1384 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1385 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1386 	case FFA_MEM_RELINQUISH:
1387 	case FFA_MSG_WAIT:
1388 	case FFA_CONSOLE_LOG_SMC32:
1389 	case FFA_CONSOLE_LOG_SMC64:
1390 		if (!secure_origin) {
1391 			return spmc_ffa_error_return(handle,
1392 				FFA_ERROR_NOT_SUPPORTED);
1393 		}
1394 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1395 		/* Execution stops here. */
1396 
1397 	/* Supported features only from the normal world. */
1398 	case FFA_MEM_SHARE_SMC32:
1399 	case FFA_MEM_SHARE_SMC64:
1400 	case FFA_MEM_LEND_SMC32:
1401 	case FFA_MEM_LEND_SMC64:
1402 	case FFA_MEM_RECLAIM:
1403 	case FFA_MEM_FRAG_RX:
1404 
1405 		if (secure_origin) {
1406 			return spmc_ffa_error_return(handle,
1407 					FFA_ERROR_NOT_SUPPORTED);
1408 		}
1409 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1410 		/* Execution stops here. */
1411 
1412 	default:
1413 		return spmc_ffa_error_return(handle,
1414 					FFA_ERROR_NOT_SUPPORTED);
1415 	}
1416 }
1417 
1418 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1419 				   bool secure_origin,
1420 				   uint64_t x1,
1421 				   uint64_t x2,
1422 				   uint64_t x3,
1423 				   uint64_t x4,
1424 				   void *cookie,
1425 				   void *handle,
1426 				   uint64_t flags)
1427 {
1428 	if (secure_origin) {
1429 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1430 			 spmc_get_current_sp_ctx()->sp_id);
1431 	} else {
1432 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1433 			 spmc_get_hyp_ctx()->ns_ep_id);
1434 	}
1435 }
1436 
1437 /*
1438  * Enable an SP to query the ID assigned to the SPMC.
1439  */
1440 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1441 				       bool secure_origin,
1442 				       uint64_t x1,
1443 				       uint64_t x2,
1444 				       uint64_t x3,
1445 				       uint64_t x4,
1446 				       void *cookie,
1447 				       void *handle,
1448 				       uint64_t flags)
1449 {
1450 	assert(x1 == 0UL);
1451 	assert(x2 == 0UL);
1452 	assert(x3 == 0UL);
1453 	assert(x4 == 0UL);
1454 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1455 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1456 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1457 
1458 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1459 }
1460 
1461 static uint64_t ffa_run_handler(uint32_t smc_fid,
1462 				bool secure_origin,
1463 				uint64_t x1,
1464 				uint64_t x2,
1465 				uint64_t x3,
1466 				uint64_t x4,
1467 				void *cookie,
1468 				void *handle,
1469 				uint64_t flags)
1470 {
1471 	struct secure_partition_desc *sp;
1472 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1473 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1474 	unsigned int idx;
1475 	unsigned int *rt_state;
1476 	unsigned int *rt_model;
1477 
1478 	/* Can only be called from the normal world. */
1479 	if (secure_origin) {
1480 		ERROR("FFA_RUN can only be called from NWd.\n");
1481 		return spmc_ffa_error_return(handle,
1482 					     FFA_ERROR_INVALID_PARAMETER);
1483 	}
1484 
1485 	/* Cannot run a Normal world partition. */
1486 	if (ffa_is_normal_world_id(target_id)) {
1487 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1488 		return spmc_ffa_error_return(handle,
1489 					     FFA_ERROR_INVALID_PARAMETER);
1490 	}
1491 
1492 	/* Check that the target SP exists. */
1493 	sp = spmc_get_sp_ctx(target_id);
1494 	if (sp == NULL) {
1495 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1496 		return spmc_ffa_error_return(handle,
1497 					     FFA_ERROR_INVALID_PARAMETER);
1498 	}
1499 
1500 	idx = get_ec_index(sp);
1501 
1502 	if (idx != vcpu_id) {
1503 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1504 		return spmc_ffa_error_return(handle,
1505 					     FFA_ERROR_INVALID_PARAMETER);
1506 	}
1507 	if (sp->runtime_el == S_EL0) {
1508 		spin_lock(&sp->rt_state_lock);
1509 	}
1510 	rt_state = &((sp->ec[idx]).rt_state);
1511 	rt_model = &((sp->ec[idx]).rt_model);
1512 	if (*rt_state == RT_STATE_RUNNING) {
1513 		if (sp->runtime_el == S_EL0) {
1514 			spin_unlock(&sp->rt_state_lock);
1515 		}
1516 		ERROR("Partition (0x%x) is already running.\n", target_id);
1517 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1518 	}
1519 
1520 	/*
1521 	 * Sanity check that if the execution context was not waiting then it
1522 	 * was either in the direct request or the run partition runtime model.
1523 	 */
1524 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1525 		assert(*rt_model == RT_MODEL_RUN ||
1526 		       *rt_model == RT_MODEL_DIR_REQ);
1527 	}
1528 
1529 	/*
1530 	 * If the context was waiting then update the partition runtime model.
1531 	 */
1532 	if (*rt_state == RT_STATE_WAITING) {
1533 		*rt_model = RT_MODEL_RUN;
1534 	}
1535 
1536 	/*
1537 	 * Forward the request to the correct SP vCPU after updating
1538 	 * its state.
1539 	 */
1540 	*rt_state = RT_STATE_RUNNING;
1541 
1542 	if (sp->runtime_el == S_EL0) {
1543 		spin_unlock(&sp->rt_state_lock);
1544 	}
1545 
1546 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1547 			       handle, cookie, flags, target_id, sp->ffa_version);
1548 }
1549 
1550 static uint64_t rx_release_handler(uint32_t smc_fid,
1551 				   bool secure_origin,
1552 				   uint64_t x1,
1553 				   uint64_t x2,
1554 				   uint64_t x3,
1555 				   uint64_t x4,
1556 				   void *cookie,
1557 				   void *handle,
1558 				   uint64_t flags)
1559 {
1560 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1561 
1562 	spin_lock(&mbox->lock);
1563 
1564 	if (mbox->state != MAILBOX_STATE_FULL) {
1565 		spin_unlock(&mbox->lock);
1566 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1567 	}
1568 
1569 	mbox->state = MAILBOX_STATE_EMPTY;
1570 	spin_unlock(&mbox->lock);
1571 
1572 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1573 }
1574 
1575 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1576 				     bool secure_origin,
1577 				     uint64_t x1,
1578 				     uint64_t x2,
1579 				     uint64_t x3,
1580 				     uint64_t x4,
1581 				     void *cookie,
1582 				     void *handle,
1583 				     uint64_t flags)
1584 {
1585 	/* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1586 	char chars[48] = {0};
1587 	size_t chars_max;
1588 	size_t chars_count = x1;
1589 
1590 	/* Does not support request from Nwd. */
1591 	if (!secure_origin) {
1592 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1593 	}
1594 
1595 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1596 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1597 		uint32_t *registers = (uint32_t *)chars;
1598 		registers[0] = (uint32_t)x2;
1599 		registers[1] = (uint32_t)x3;
1600 		registers[2] = (uint32_t)x4;
1601 		registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1602 		registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1603 		registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1604 		chars_max = 6 * sizeof(uint32_t);
1605 	} else {
1606 		uint64_t *registers = (uint64_t *)chars;
1607 		registers[0] = x2;
1608 		registers[1] = x3;
1609 		registers[2] = x4;
1610 		registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1611 		registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1612 		registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1613 		chars_max = 6 * sizeof(uint64_t);
1614 	}
1615 
1616 	if ((chars_count == 0) || (chars_count > chars_max)) {
1617 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1618 	}
1619 
1620 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1621 		putchar(chars[i]);
1622 	}
1623 
1624 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1625 }
1626 
1627 /*
1628  * Perform initial validation on the provided secondary entry point.
1629  * For now ensure it does not lie within the BL31 Image or the SP's
1630  * RX/TX buffers as these are mapped within EL3.
1631  * TODO: perform validation for additional invalid memory regions.
1632  */
1633 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1634 {
1635 	struct mailbox *mb;
1636 	uintptr_t buffer_size;
1637 	uintptr_t sp_rx_buffer;
1638 	uintptr_t sp_tx_buffer;
1639 	uintptr_t sp_rx_buffer_limit;
1640 	uintptr_t sp_tx_buffer_limit;
1641 
1642 	mb = &sp->mailbox;
1643 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1644 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1645 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1646 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1647 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1648 
1649 	/*
1650 	 * Check if the entry point lies within BL31, or the
1651 	 * SP's RX or TX buffer.
1652 	 */
1653 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1654 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1655 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1656 		return -EINVAL;
1657 	}
1658 	return 0;
1659 }
1660 
1661 /*******************************************************************************
1662  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1663  *  register an entry point for initialization during a secondary cold boot.
1664  ******************************************************************************/
1665 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1666 					    bool secure_origin,
1667 					    uint64_t x1,
1668 					    uint64_t x2,
1669 					    uint64_t x3,
1670 					    uint64_t x4,
1671 					    void *cookie,
1672 					    void *handle,
1673 					    uint64_t flags)
1674 {
1675 	struct secure_partition_desc *sp;
1676 	struct sp_exec_ctx *sp_ctx;
1677 
1678 	/* This request cannot originate from the Normal world. */
1679 	if (!secure_origin) {
1680 		WARN("%s: Can only be called from SWd.\n", __func__);
1681 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1682 	}
1683 
1684 	/* Get the context of the current SP. */
1685 	sp = spmc_get_current_sp_ctx();
1686 	if (sp == NULL) {
1687 		WARN("%s: Cannot find SP context.\n", __func__);
1688 		return spmc_ffa_error_return(handle,
1689 					     FFA_ERROR_INVALID_PARAMETER);
1690 	}
1691 
1692 	/* Only an S-EL1 SP should be invoking this ABI. */
1693 	if (sp->runtime_el != S_EL1) {
1694 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1695 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1696 	}
1697 
1698 	/* Ensure the SP is in its initialization state. */
1699 	sp_ctx = spmc_get_sp_ec(sp);
1700 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1701 		WARN("%s: Can only be called during SP initialization.\n",
1702 		     __func__);
1703 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1704 	}
1705 
1706 	/* Perform initial validation of the secondary entry point. */
1707 	if (validate_secondary_ep(x1, sp)) {
1708 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1709 		     __func__, x1);
1710 		return spmc_ffa_error_return(handle,
1711 					     FFA_ERROR_INVALID_PARAMETER);
1712 	}
1713 
1714 	/*
1715 	 * Update the secondary entrypoint in SP context.
1716 	 * We don't need a lock here as during partition initialization there
1717 	 * will only be a single core online.
1718 	 */
1719 	sp->secondary_ep = x1;
1720 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1721 
1722 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1723 }
1724 
1725 /*******************************************************************************
1726  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1727  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1728  * function converts a permission value from the FF-A format to the mmap_attr_t
1729  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1730  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1731  * ignored by the function xlat_change_mem_attributes_ctx().
1732  ******************************************************************************/
1733 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1734 {
1735 	unsigned int tf_attr = 0U;
1736 	unsigned int access;
1737 
1738 	/* Deal with data access permissions first. */
1739 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1740 
1741 	switch (access) {
1742 	case FFA_MEM_PERM_DATA_RW:
1743 		/* Return 0 if the execute is set with RW. */
1744 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1745 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1746 		}
1747 		break;
1748 
1749 	case FFA_MEM_PERM_DATA_RO:
1750 		tf_attr |= MT_RO | MT_USER;
1751 		/* Deal with the instruction access permissions next. */
1752 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1753 			tf_attr |= MT_EXECUTE;
1754 		} else {
1755 			tf_attr |= MT_EXECUTE_NEVER;
1756 		}
1757 		break;
1758 
1759 	case FFA_MEM_PERM_DATA_NA:
1760 	default:
1761 		return tf_attr;
1762 	}
1763 
1764 	return tf_attr;
1765 }
1766 
1767 /*******************************************************************************
1768  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1769  ******************************************************************************/
1770 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1771 					 bool secure_origin,
1772 					 uint64_t x1,
1773 					 uint64_t x2,
1774 					 uint64_t x3,
1775 					 uint64_t x4,
1776 					 void *cookie,
1777 					 void *handle,
1778 					 uint64_t flags)
1779 {
1780 	struct secure_partition_desc *sp;
1781 	unsigned int idx;
1782 	uintptr_t base_va = (uintptr_t) x1;
1783 	size_t size = (size_t)(x2 * PAGE_SIZE);
1784 	uint32_t tf_attr;
1785 	int ret;
1786 
1787 	/* This request cannot originate from the Normal world. */
1788 	if (!secure_origin) {
1789 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1790 	}
1791 
1792 	if (size == 0) {
1793 		return spmc_ffa_error_return(handle,
1794 					     FFA_ERROR_INVALID_PARAMETER);
1795 	}
1796 
1797 	/* Get the context of the current SP. */
1798 	sp = spmc_get_current_sp_ctx();
1799 	if (sp == NULL) {
1800 		return spmc_ffa_error_return(handle,
1801 					     FFA_ERROR_INVALID_PARAMETER);
1802 	}
1803 
1804 	/* A S-EL1 SP has no business invoking this ABI. */
1805 	if (sp->runtime_el == S_EL1) {
1806 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1807 	}
1808 
1809 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1810 		return spmc_ffa_error_return(handle,
1811 					     FFA_ERROR_INVALID_PARAMETER);
1812 	}
1813 
1814 	/* Get the execution context of the calling SP. */
1815 	idx = get_ec_index(sp);
1816 
1817 	/*
1818 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1819 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1820 	 * and can only be initialising on this cpu.
1821 	 */
1822 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1823 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1824 	}
1825 
1826 	VERBOSE("Setting memory permissions:\n");
1827 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1828 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1829 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1830 
1831 	/* Convert inbound permissions to TF-A permission attributes */
1832 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1833 	if (tf_attr == 0U) {
1834 		return spmc_ffa_error_return(handle,
1835 					     FFA_ERROR_INVALID_PARAMETER);
1836 	}
1837 
1838 	/* Request the change in permissions */
1839 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1840 					     base_va, size, tf_attr);
1841 	if (ret != 0) {
1842 		return spmc_ffa_error_return(handle,
1843 					     FFA_ERROR_INVALID_PARAMETER);
1844 	}
1845 
1846 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1847 }
1848 
1849 /*******************************************************************************
1850  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1851  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1852  * function converts a permission value from the mmap_attr_t format to the FF-A
1853  * format.
1854  ******************************************************************************/
1855 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1856 {
1857 	unsigned int perms = 0U;
1858 	unsigned int data_access;
1859 
1860 	if ((attr & MT_USER) == 0) {
1861 		/* No access from EL0. */
1862 		data_access = FFA_MEM_PERM_DATA_NA;
1863 	} else {
1864 		if ((attr & MT_RW) != 0) {
1865 			data_access = FFA_MEM_PERM_DATA_RW;
1866 		} else {
1867 			data_access = FFA_MEM_PERM_DATA_RO;
1868 		}
1869 	}
1870 
1871 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1872 		<< FFA_MEM_PERM_DATA_SHIFT;
1873 
1874 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1875 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1876 	}
1877 
1878 	return perms;
1879 }
1880 
1881 /*******************************************************************************
1882  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1883  ******************************************************************************/
1884 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1885 					 bool secure_origin,
1886 					 uint64_t x1,
1887 					 uint64_t x2,
1888 					 uint64_t x3,
1889 					 uint64_t x4,
1890 					 void *cookie,
1891 					 void *handle,
1892 					 uint64_t flags)
1893 {
1894 	struct secure_partition_desc *sp;
1895 	unsigned int idx;
1896 	uintptr_t base_va = (uintptr_t)x1;
1897 	uint64_t max_page_count = x2 + 1;
1898 	uint64_t page_count = 0;
1899 	uint32_t base_page_attr = 0;
1900 	uint32_t page_attr = 0;
1901 	unsigned int table_level;
1902 	int ret;
1903 
1904 	/* This request cannot originate from the Normal world. */
1905 	if (!secure_origin) {
1906 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1907 	}
1908 
1909 	/* Get the context of the current SP. */
1910 	sp = spmc_get_current_sp_ctx();
1911 	if (sp == NULL) {
1912 		return spmc_ffa_error_return(handle,
1913 					     FFA_ERROR_INVALID_PARAMETER);
1914 	}
1915 
1916 	/* A S-EL1 SP has no business invoking this ABI. */
1917 	if (sp->runtime_el == S_EL1) {
1918 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1919 	}
1920 
1921 	/* Get the execution context of the calling SP. */
1922 	idx = get_ec_index(sp);
1923 
1924 	/*
1925 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1926 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1927 	 * and can only be initialising on this cpu.
1928 	 */
1929 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1930 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1931 	}
1932 
1933 	base_va &= ~(PAGE_SIZE_MASK);
1934 
1935 	/* Request the permissions */
1936 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1937 			&base_page_attr, &table_level);
1938 	if (ret != 0) {
1939 		return spmc_ffa_error_return(handle,
1940 					     FFA_ERROR_INVALID_PARAMETER);
1941 	}
1942 
1943 	/*
1944 	 * Caculate how many pages in this block entry from base_va including
1945 	 * its page.
1946 	 */
1947 	page_count = ((XLAT_BLOCK_SIZE(table_level) -
1948 			(base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT);
1949 	base_va += XLAT_BLOCK_SIZE(table_level);
1950 
1951 	while ((page_count < max_page_count) && (base_va != 0x00)) {
1952 		ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1953 				&page_attr, &table_level);
1954 		if (ret != 0) {
1955 			return spmc_ffa_error_return(handle,
1956 						     FFA_ERROR_INVALID_PARAMETER);
1957 		}
1958 
1959 		if (page_attr != base_page_attr) {
1960 			break;
1961 		}
1962 
1963 		base_va += XLAT_BLOCK_SIZE(table_level);
1964 		page_count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT);
1965 	}
1966 
1967 	if (page_count > max_page_count) {
1968 		page_count = max_page_count;
1969 	}
1970 
1971 	/* Convert TF-A permission to FF-A permissions attributes. */
1972 	x2 = mmap_perm_to_ffa_perm(base_page_attr);
1973 
1974 	/* x3 should be page count - 1 */
1975 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, x2, --page_count);
1976 }
1977 
1978 /*******************************************************************************
1979  * This function will parse the Secure Partition Manifest. From manifest, it
1980  * will fetch details for preparing Secure partition image context and secure
1981  * partition image boot arguments if any.
1982  ******************************************************************************/
1983 static int sp_manifest_parse(void *sp_manifest, int offset,
1984 			     struct secure_partition_desc *sp,
1985 			     entry_point_info_t *ep_info,
1986 			     int32_t *boot_info_reg)
1987 {
1988 	int32_t ret, node;
1989 	uint32_t config_32;
1990 
1991 	/*
1992 	 * Look for the mandatory fields that are expected to be present in
1993 	 * the SP manifests.
1994 	 */
1995 	node = fdt_path_offset(sp_manifest, "/");
1996 	if (node < 0) {
1997 		ERROR("Did not find root node.\n");
1998 		return node;
1999 	}
2000 
2001 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
2002 				    ARRAY_SIZE(sp->uuid), sp->uuid);
2003 	if (ret != 0) {
2004 		ERROR("Missing Secure Partition UUID.\n");
2005 		return ret;
2006 	}
2007 
2008 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
2009 	if (ret != 0) {
2010 		ERROR("Missing SP Exception Level information.\n");
2011 		return ret;
2012 	}
2013 
2014 	sp->runtime_el = config_32;
2015 
2016 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
2017 	if (ret != 0) {
2018 		ERROR("Missing Secure Partition FF-A Version.\n");
2019 		return ret;
2020 	}
2021 
2022 	sp->ffa_version = config_32;
2023 
2024 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
2025 	if (ret != 0) {
2026 		ERROR("Missing Secure Partition Execution State.\n");
2027 		return ret;
2028 	}
2029 
2030 	sp->execution_state = config_32;
2031 
2032 	ret = fdt_read_uint32(sp_manifest, node,
2033 			      "messaging-method", &config_32);
2034 	if (ret != 0) {
2035 		ERROR("Missing Secure Partition messaging method.\n");
2036 		return ret;
2037 	}
2038 
2039 	/* Validate this entry, we currently only support direct messaging. */
2040 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
2041 			  FFA_PARTITION_DIRECT_REQ_SEND |
2042 			  FFA_PARTITION_DIRECT_REQ2_RECV |
2043 			  FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
2044 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
2045 		     config_32);
2046 		return -EINVAL;
2047 	}
2048 
2049 	sp->properties = config_32;
2050 
2051 	ret = fdt_read_uint32(sp_manifest, node,
2052 			      "execution-ctx-count", &config_32);
2053 
2054 	if (ret != 0) {
2055 		ERROR("Missing SP Execution Context Count.\n");
2056 		return ret;
2057 	}
2058 
2059 	/*
2060 	 * Ensure this field is set correctly in the manifest however
2061 	 * since this is currently a hardcoded value for S-EL1 partitions
2062 	 * we don't need to save it here, just validate.
2063 	 */
2064 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
2065 		ERROR("SP Execution Context Count (%u) must be %u.\n",
2066 			config_32, PLATFORM_CORE_COUNT);
2067 		return -EINVAL;
2068 	}
2069 
2070 	/*
2071 	 * Look for the optional fields that are expected to be present in
2072 	 * an SP manifest.
2073 	 */
2074 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
2075 	if (ret != 0) {
2076 		WARN("Missing Secure Partition ID.\n");
2077 	} else {
2078 		if (!is_ffa_secure_id_valid(config_32)) {
2079 			ERROR("Invalid Secure Partition ID (0x%x).\n",
2080 			      config_32);
2081 			return -EINVAL;
2082 		}
2083 		sp->sp_id = config_32;
2084 	}
2085 
2086 	ret = fdt_read_uint32(sp_manifest, node,
2087 			      "power-management-messages", &config_32);
2088 	if (ret != 0) {
2089 		WARN("Missing Power Management Messages entry.\n");
2090 	} else {
2091 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
2092 			ERROR("Power messages not supported for S-EL0 SP\n");
2093 			return -EINVAL;
2094 		}
2095 
2096 		/*
2097 		 * Ensure only the currently supported power messages have
2098 		 * been requested.
2099 		 */
2100 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
2101 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
2102 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
2103 			ERROR("Requested unsupported PM messages (%x)\n",
2104 			      config_32);
2105 			return -EINVAL;
2106 		}
2107 		sp->pwr_mgmt_msgs = config_32;
2108 	}
2109 
2110 	ret = fdt_read_uint32(sp_manifest, node,
2111 			      "gp-register-num", &config_32);
2112 	if (ret != 0) {
2113 		WARN("Missing boot information register.\n");
2114 	} else {
2115 		/* Check if a register number between 0-3 is specified. */
2116 		if (config_32 < 4) {
2117 			*boot_info_reg = config_32;
2118 		} else {
2119 			WARN("Incorrect boot information register (%u).\n",
2120 			     config_32);
2121 		}
2122 	}
2123 
2124 	ret = fdt_read_uint32(sp_manifest, node,
2125 			      "vm-availability-messages", &config_32);
2126 	if (ret != 0) {
2127 		WARN("Missing VM availability messaging.\n");
2128 	} else if ((sp->properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0) {
2129 		ERROR("VM availability messaging requested without "
2130 		      "direct message receive support.\n");
2131 		return -EINVAL;
2132 	} else {
2133 		/* Validate this entry. */
2134 		if ((config_32 & ~(FFA_VM_AVAILABILITY_CREATED |
2135 				  FFA_VM_AVAILABILITY_DESTROYED)) != 0U) {
2136 			WARN("Invalid VM availability messaging (0x%x)\n",
2137 			     config_32);
2138 			return -EINVAL;
2139 		}
2140 
2141 		if ((config_32 & FFA_VM_AVAILABILITY_CREATED) != 0U) {
2142 			sp->properties |= FFA_PARTITION_VM_CREATED;
2143 		}
2144 		if ((config_32 & FFA_VM_AVAILABILITY_DESTROYED) != 0U) {
2145 			sp->properties |= FFA_PARTITION_VM_DESTROYED;
2146 		}
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 /*******************************************************************************
2153  * This function gets the Secure Partition Manifest base and maps the manifest
2154  * region.
2155  * Currently only one Secure Partition manifest is considered which is used to
2156  * prepare the context for the single Secure Partition.
2157  ******************************************************************************/
2158 static int find_and_prepare_sp_context(void)
2159 {
2160 	void *sp_manifest;
2161 	uintptr_t manifest_base;
2162 	uintptr_t manifest_base_align __maybe_unused;
2163 	entry_point_info_t *next_image_ep_info;
2164 	int32_t ret, boot_info_reg = -1;
2165 	struct secure_partition_desc *sp;
2166 	struct transfer_list_header *tl __maybe_unused;
2167 	struct transfer_list_entry *te __maybe_unused;
2168 
2169 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2170 	if (next_image_ep_info == NULL) {
2171 		WARN("No Secure Partition image provided by BL2.\n");
2172 		return -ENOENT;
2173 	}
2174 
2175 
2176 #if TRANSFER_LIST && !RESET_TO_BL31
2177 	tl = (struct transfer_list_header *)next_image_ep_info->args.arg3;
2178 	te = transfer_list_find(tl, TL_TAG_DT_FFA_MANIFEST);
2179 	if (te == NULL) {
2180 		WARN("Secure Partition manifest absent.\n");
2181 		return -ENOENT;
2182 	}
2183 
2184 	sp_manifest = (void *)transfer_list_entry_data(te);
2185 	manifest_base = (uintptr_t)sp_manifest;
2186 #else
2187 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2188 	if (sp_manifest == NULL) {
2189 		WARN("Secure Partition manifest absent.\n");
2190 		return -ENOENT;
2191 	}
2192 
2193 	manifest_base = (uintptr_t)sp_manifest;
2194 	manifest_base_align = page_align(manifest_base, DOWN);
2195 
2196 	/*
2197 	 * Map the secure partition manifest region in the EL3 translation
2198 	 * regime.
2199 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2200 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2201 	 * not completely accommodate the secure partition manifest region.
2202 	 */
2203 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2204 				      manifest_base_align,
2205 				      PAGE_SIZE * 2,
2206 				      MT_RO_DATA);
2207 	if (ret != 0) {
2208 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2209 		return ret;
2210 	}
2211 #endif
2212 
2213 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2214 					    "arm,ffa-manifest-1.0");
2215 	if (ret < 0) {
2216 		ERROR("Error happened in SP manifest reading.\n");
2217 		return -EINVAL;
2218 	}
2219 
2220 	/*
2221 	 * Store the size of the manifest so that it can be used later to pass
2222 	 * the manifest as boot information later.
2223 	 */
2224 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2225 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2226 	     next_image_ep_info->args.arg1);
2227 
2228 	/*
2229 	 * Select an SP descriptor for initialising the partition's execution
2230 	 * context on the primary CPU.
2231 	 */
2232 	sp = spmc_get_current_sp_ctx();
2233 
2234 #if SPMC_AT_EL3_SEL0_SP
2235 	/* Assign translation tables context. */
2236 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2237 
2238 #endif /* SPMC_AT_EL3_SEL0_SP */
2239 	/* Initialize entry point information for the SP */
2240 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2241 		       SECURE | EP_ST_ENABLE);
2242 
2243 	/* Parse the SP manifest. */
2244 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2245 				&boot_info_reg);
2246 	if (ret != 0) {
2247 		ERROR("Error in Secure Partition manifest parsing.\n");
2248 		return ret;
2249 	}
2250 
2251 	/* Perform any common initialisation. */
2252 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2253 
2254 	/* Perform any initialisation specific to S-EL1 SPs. */
2255 	if (sp->runtime_el == S_EL1) {
2256 		spmc_el1_sp_setup(sp, next_image_ep_info);
2257 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2258 	}
2259 #if SPMC_AT_EL3_SEL0_SP
2260 	/* Perform any initialisation specific to S-EL0 SPs. */
2261 	else if (sp->runtime_el == S_EL0) {
2262 		/* Setup spsr in endpoint info for common context management routine. */
2263 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2264 
2265 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2266 
2267 		/*
2268 		 * Perform any initialisation specific to S-EL0 not set by common
2269 		 * context management routine.
2270 		 */
2271 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2272 	}
2273 #endif /* SPMC_AT_EL3_SEL0_SP */
2274 	else {
2275 		ERROR("Unexpected runtime EL: %u\n", sp->runtime_el);
2276 		return -EINVAL;
2277 	}
2278 
2279 	return 0;
2280 }
2281 
2282 /*******************************************************************************
2283  * This function takes an SP context pointer and performs a synchronous entry
2284  * into it.
2285  ******************************************************************************/
2286 static int32_t logical_sp_init(void)
2287 {
2288 	int32_t rc = 0;
2289 	struct el3_lp_desc *el3_lp_descs;
2290 
2291 	/* Perform initial validation of the Logical Partitions. */
2292 	rc = el3_sp_desc_validate();
2293 	if (rc != 0) {
2294 		ERROR("Logical Partition validation failed!\n");
2295 		return rc;
2296 	}
2297 
2298 	el3_lp_descs = get_el3_lp_array();
2299 
2300 	INFO("Logical Secure Partition init start.\n");
2301 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2302 		rc = el3_lp_descs[i].init();
2303 		if (rc != 0) {
2304 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2305 			      el3_lp_descs[i].sp_id);
2306 			return rc;
2307 		}
2308 		VERBOSE("Logical SP (0x%x) Initialized\n",
2309 			      el3_lp_descs[i].sp_id);
2310 	}
2311 
2312 	INFO("Logical Secure Partition init completed.\n");
2313 
2314 	return rc;
2315 }
2316 
2317 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2318 {
2319 	uint64_t rc;
2320 
2321 	assert(ec != NULL);
2322 
2323 	/* Assign the context of the SP to this CPU */
2324 	cm_set_context(&(ec->cpu_ctx), SECURE);
2325 
2326 	/* Restore the context assigned above */
2327 	cm_el1_sysregs_context_restore(SECURE);
2328 	cm_set_next_eret_context(SECURE);
2329 
2330 	/* Invalidate TLBs at EL1. */
2331 	tlbivmalle1();
2332 	dsbish();
2333 
2334 	/* Enter Secure Partition */
2335 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2336 
2337 	/* Save secure state */
2338 	cm_el1_sysregs_context_save(SECURE);
2339 
2340 	return rc;
2341 }
2342 
2343 /*******************************************************************************
2344  * SPMC Helper Functions.
2345  ******************************************************************************/
2346 static int32_t sp_init(void)
2347 {
2348 	uint64_t rc;
2349 	struct secure_partition_desc *sp;
2350 	struct sp_exec_ctx *ec;
2351 
2352 	sp = spmc_get_current_sp_ctx();
2353 	ec = spmc_get_sp_ec(sp);
2354 	ec->rt_model = RT_MODEL_INIT;
2355 	ec->rt_state = RT_STATE_RUNNING;
2356 
2357 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2358 
2359 	rc = spmc_sp_synchronous_entry(ec);
2360 	if (rc != 0) {
2361 		/* Indicate SP init was not successful. */
2362 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2363 		      sp->sp_id, rc);
2364 		return 0;
2365 	}
2366 
2367 	ec->rt_state = RT_STATE_WAITING;
2368 	INFO("Secure Partition initialized.\n");
2369 
2370 	return 1;
2371 }
2372 
2373 static void initalize_sp_descs(void)
2374 {
2375 	struct secure_partition_desc *sp;
2376 
2377 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2378 		sp = &sp_desc[i];
2379 		sp->sp_id = INV_SP_ID;
2380 		sp->mailbox.rx_buffer = NULL;
2381 		sp->mailbox.tx_buffer = NULL;
2382 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2383 		sp->secondary_ep = 0;
2384 	}
2385 }
2386 
2387 static void initalize_ns_ep_descs(void)
2388 {
2389 	struct ns_endpoint_desc *ns_ep;
2390 
2391 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2392 		ns_ep = &ns_ep_desc[i];
2393 		/*
2394 		 * Clashes with the Hypervisor ID but will not be a
2395 		 * problem in practice.
2396 		 */
2397 		ns_ep->ns_ep_id = 0;
2398 		ns_ep->ffa_version = 0;
2399 		ns_ep->mailbox.rx_buffer = NULL;
2400 		ns_ep->mailbox.tx_buffer = NULL;
2401 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2402 	}
2403 }
2404 
2405 /*******************************************************************************
2406  * Initialize SPMC attributes for the SPMD.
2407  ******************************************************************************/
2408 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2409 {
2410 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2411 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2412 	spmc_attrs->exec_state = MODE_RW_64;
2413 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2414 }
2415 
2416 /*******************************************************************************
2417  * Initialize contexts of all Secure Partitions.
2418  ******************************************************************************/
2419 int32_t spmc_setup(void)
2420 {
2421 	int32_t ret;
2422 	uint32_t flags;
2423 
2424 	/* Initialize endpoint descriptors */
2425 	initalize_sp_descs();
2426 	initalize_ns_ep_descs();
2427 
2428 	/*
2429 	 * Retrieve the information of the datastore for tracking shared memory
2430 	 * requests allocated by platform code and zero the region if available.
2431 	 */
2432 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2433 					    &spmc_shmem_obj_state.data_size);
2434 	if (ret != 0) {
2435 		ERROR("Failed to obtain memory descriptor backing store!\n");
2436 		return ret;
2437 	}
2438 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2439 
2440 	/* Setup logical SPs. */
2441 	ret = logical_sp_init();
2442 	if (ret != 0) {
2443 		ERROR("Failed to initialize Logical Partitions.\n");
2444 		return ret;
2445 	}
2446 
2447 	/* Perform physical SP setup. */
2448 
2449 	/* Disable MMU at EL1 (initialized by BL2) */
2450 	disable_mmu_icache_el1();
2451 
2452 	/* Initialize context of the SP */
2453 	INFO("Secure Partition context setup start.\n");
2454 
2455 	ret = find_and_prepare_sp_context();
2456 	if (ret != 0) {
2457 		ERROR("Error in SP finding and context preparation.\n");
2458 		return ret;
2459 	}
2460 
2461 	/* Register power management hooks with PSCI */
2462 	psci_register_spd_pm_hook(&spmc_pm);
2463 
2464 	/*
2465 	 * Register an interrupt handler for S-EL1 interrupts
2466 	 * when generated during code executing in the
2467 	 * non-secure state.
2468 	 */
2469 	flags = 0;
2470 	set_interrupt_rm_flag(flags, NON_SECURE);
2471 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2472 					      spmc_sp_interrupt_handler,
2473 					      flags);
2474 	if (ret != 0) {
2475 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2476 		panic();
2477 	}
2478 
2479 	/* Register init function for deferred init.  */
2480 	bl31_register_bl32_init(&sp_init);
2481 
2482 	INFO("Secure Partition setup done.\n");
2483 
2484 	return 0;
2485 }
2486 
2487 /*******************************************************************************
2488  * Secure Partition Manager SMC handler.
2489  ******************************************************************************/
2490 uint64_t spmc_smc_handler(uint32_t smc_fid,
2491 			  bool secure_origin,
2492 			  uint64_t x1,
2493 			  uint64_t x2,
2494 			  uint64_t x3,
2495 			  uint64_t x4,
2496 			  void *cookie,
2497 			  void *handle,
2498 			  uint64_t flags)
2499 {
2500 	switch (smc_fid) {
2501 
2502 	case FFA_VERSION:
2503 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2504 					   x4, cookie, handle, flags);
2505 
2506 	case FFA_SPM_ID_GET:
2507 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2508 					     x3, x4, cookie, handle, flags);
2509 
2510 	case FFA_ID_GET:
2511 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2512 					  x4, cookie, handle, flags);
2513 
2514 	case FFA_FEATURES:
2515 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2516 					    x4, cookie, handle, flags);
2517 
2518 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2519 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2520 						   x2, x3, x4, cookie, handle,
2521 						   flags);
2522 
2523 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2524 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2525 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
2526 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2527 					      x3, x4, cookie, handle, flags);
2528 
2529 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2530 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2531 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
2532 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2533 					       x3, x4, cookie, handle, flags);
2534 
2535 	case FFA_RXTX_MAP_SMC32:
2536 	case FFA_RXTX_MAP_SMC64:
2537 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2538 					cookie, handle, flags);
2539 
2540 	case FFA_RXTX_UNMAP:
2541 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2542 					  x4, cookie, handle, flags);
2543 
2544 	case FFA_PARTITION_INFO_GET:
2545 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2546 						  x2, x3, x4, cookie, handle,
2547 						  flags);
2548 
2549 	case FFA_RX_RELEASE:
2550 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2551 					  x4, cookie, handle, flags);
2552 
2553 	case FFA_MSG_WAIT:
2554 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2555 					cookie, handle, flags);
2556 
2557 	case FFA_ERROR:
2558 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2559 					cookie, handle, flags);
2560 
2561 	case FFA_MSG_RUN:
2562 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2563 				       cookie, handle, flags);
2564 
2565 	case FFA_MEM_SHARE_SMC32:
2566 	case FFA_MEM_SHARE_SMC64:
2567 	case FFA_MEM_LEND_SMC32:
2568 	case FFA_MEM_LEND_SMC64:
2569 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2570 					 cookie, handle, flags);
2571 
2572 	case FFA_MEM_FRAG_TX:
2573 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2574 					    x4, cookie, handle, flags);
2575 
2576 	case FFA_MEM_FRAG_RX:
2577 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2578 					    x4, cookie, handle, flags);
2579 
2580 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2581 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2582 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2583 						 x3, x4, cookie, handle, flags);
2584 
2585 	case FFA_MEM_RELINQUISH:
2586 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2587 					       x3, x4, cookie, handle, flags);
2588 
2589 	case FFA_MEM_RECLAIM:
2590 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2591 						x4, cookie, handle, flags);
2592 	case FFA_CONSOLE_LOG_SMC32:
2593 	case FFA_CONSOLE_LOG_SMC64:
2594 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2595 						x4, cookie, handle, flags);
2596 
2597 	case FFA_MEM_PERM_GET_SMC32:
2598 	case FFA_MEM_PERM_GET_SMC64:
2599 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2600 						x3, x4, cookie, handle, flags);
2601 
2602 	case FFA_MEM_PERM_SET_SMC32:
2603 	case FFA_MEM_PERM_SET_SMC64:
2604 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2605 						x3, x4, cookie, handle, flags);
2606 
2607 	default:
2608 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2609 		break;
2610 	}
2611 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2612 }
2613 
2614 /*******************************************************************************
2615  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2616  * validates the interrupt and upon success arranges entry into the SP for
2617  * handling the interrupt.
2618  ******************************************************************************/
2619 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2620 					  uint32_t flags,
2621 					  void *handle,
2622 					  void *cookie)
2623 {
2624 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2625 	struct sp_exec_ctx *ec;
2626 	uint32_t linear_id = plat_my_core_pos();
2627 
2628 	/* Sanity check for a NULL pointer dereference. */
2629 	assert(sp != NULL);
2630 
2631 	/* Check the security state when the exception was generated. */
2632 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2633 
2634 	/* Panic if not an S-EL1 Partition. */
2635 	if (sp->runtime_el != S_EL1) {
2636 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2637 		      linear_id);
2638 		panic();
2639 	}
2640 
2641 	/* Obtain a reference to the SP execution context. */
2642 	ec = spmc_get_sp_ec(sp);
2643 
2644 	/* Ensure that the execution context is in waiting state else panic. */
2645 	if (ec->rt_state != RT_STATE_WAITING) {
2646 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2647 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2648 		panic();
2649 	}
2650 
2651 	/* Update the runtime model and state of the partition. */
2652 	ec->rt_model = RT_MODEL_INTR;
2653 	ec->rt_state = RT_STATE_RUNNING;
2654 
2655 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2656 
2657 	/*
2658 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2659 	 * populated as the SP can determine this by itself.
2660 	 * The flags field is forced to 0 mainly to pass the SVE hint bit
2661 	 * cleared for consumption by the lower EL.
2662 	 */
2663 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2664 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2665 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2666 				     handle, 0ULL, sp->ffa_version);
2667 }
2668