xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 05d22c3045e2e972c2262b9ccd6c82cb7545bf83)
1 /*
2  * Copyright (c) 2022-2025, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 #if TRANSFER_LIST
32 #include <transfer_list.h>
33 #endif
34 
35 #include <platform_def.h>
36 
37 /* FFA_MEM_PERM_* helpers */
38 #define FFA_MEM_PERM_MASK		U(7)
39 #define FFA_MEM_PERM_DATA_MASK		U(3)
40 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
41 #define FFA_MEM_PERM_DATA_NA		U(0)
42 #define FFA_MEM_PERM_DATA_RW		U(1)
43 #define FFA_MEM_PERM_DATA_RES		U(2)
44 #define FFA_MEM_PERM_DATA_RO		U(3)
45 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
46 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
47 
48 /* Declare the maximum number of SPs and El3 LPs. */
49 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
50 
51 #define FFA_VERSION_SPMC_MAJOR U(1)
52 #define FFA_VERSION_SPMC_MINOR U(2)
53 
54 /*
55  * Allocate a secure partition descriptor to describe each SP in the system that
56  * does not reside at EL3.
57  */
58 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
59 
60 /*
61  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
62  * the system that interacts with a SP. It is used to track the Hypervisor
63  * buffer pair, version and ID for now. It could be extended to track VM
64  * properties when the SPMC supports indirect messaging.
65  */
66 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
67 
68 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
69 					  uint32_t flags,
70 					  void *handle,
71 					  void *cookie);
72 
73 /*
74  * Helper function to obtain the array storing the EL3
75  * Logical Partition descriptors.
76  */
77 struct el3_lp_desc *get_el3_lp_array(void)
78 {
79 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
80 }
81 
82 /*
83  * Helper function to obtain the descriptor of the last SP to whom control was
84  * handed to on this physical cpu. Currently, we assume there is only one SP.
85  * TODO: Expand to track multiple partitions when required.
86  */
87 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
88 {
89 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
90 }
91 
92 /*
93  * Helper function to obtain the execution context of an SP on the
94  * current physical cpu.
95  */
96 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
97 {
98 	return &(sp->ec[get_ec_index(sp)]);
99 }
100 
101 /* Helper function to get pointer to SP context from its ID. */
102 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
103 {
104 	/* Check for Secure World Partitions. */
105 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
106 		if (sp_desc[i].sp_id == id) {
107 			return &(sp_desc[i]);
108 		}
109 	}
110 	return NULL;
111 }
112 
113 /*
114  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
115  * We assume that the first descriptor is reserved for this entity.
116  */
117 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
118 {
119 	return &(ns_ep_desc[0]);
120 }
121 
122 /*
123  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
124  * or OS kernel in the normal world or the last SP that was run.
125  */
126 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
127 {
128 	/* Obtain the RX/TX buffer pair descriptor. */
129 	if (secure_origin) {
130 		return &(spmc_get_current_sp_ctx()->mailbox);
131 	} else {
132 		return &(spmc_get_hyp_ctx()->mailbox);
133 	}
134 }
135 
136 /******************************************************************************
137  * This function returns to the place where spmc_sp_synchronous_entry() was
138  * called originally.
139  ******************************************************************************/
140 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
141 {
142 	/*
143 	 * The SPM must have initiated the original request through a
144 	 * synchronous entry into the secure partition. Jump back to the
145 	 * original C runtime context with the value of rc in x0;
146 	 */
147 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
148 
149 	panic();
150 }
151 
152 /*******************************************************************************
153  * Return FFA_ERROR with specified error code.
154  ******************************************************************************/
155 uint64_t spmc_ffa_error_return(void *handle, int error_code)
156 {
157 	SMC_RET8(handle, FFA_ERROR,
158 		 FFA_TARGET_INFO_MBZ, error_code,
159 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
160 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
161 }
162 
163 /******************************************************************************
164  * Helper function to validate a secure partition ID to ensure it does not
165  * conflict with any other FF-A component and follows the convention to
166  * indicate it resides within the secure world.
167  ******************************************************************************/
168 bool is_ffa_secure_id_valid(uint16_t partition_id)
169 {
170 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
171 
172 	/* Ensure the ID is not the invalid partition ID. */
173 	if (partition_id == INV_SP_ID) {
174 		return false;
175 	}
176 
177 	/* Ensure the ID is not the SPMD ID. */
178 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
179 		return false;
180 	}
181 
182 	/*
183 	 * Ensure the ID follows the convention to indicate it resides
184 	 * in the secure world.
185 	 */
186 	if (!ffa_is_secure_world_id(partition_id)) {
187 		return false;
188 	}
189 
190 	/* Ensure we don't conflict with the SPMC partition ID. */
191 	if (partition_id == FFA_SPMC_ID) {
192 		return false;
193 	}
194 
195 	/* Ensure we do not already have an SP context with this ID. */
196 	if (spmc_get_sp_ctx(partition_id)) {
197 		return false;
198 	}
199 
200 	/* Ensure we don't clash with any Logical SP's. */
201 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
202 		if (el3_lp_descs[i].sp_id == partition_id) {
203 			return false;
204 		}
205 	}
206 
207 	return true;
208 }
209 
210 /*******************************************************************************
211  * This function either forwards the request to the other world or returns
212  * with an ERET depending on the source of the call.
213  * We can assume that the destination is for an entity at a lower exception
214  * level as any messages destined for a logical SP resident in EL3 will have
215  * already been taken care of by the SPMC before entering this function.
216  ******************************************************************************/
217 static uint64_t spmc_smc_return(uint32_t smc_fid,
218 				bool secure_origin,
219 				uint64_t x1,
220 				uint64_t x2,
221 				uint64_t x3,
222 				uint64_t x4,
223 				void *handle,
224 				void *cookie,
225 				uint64_t flags,
226 				uint16_t dst_id,
227 				uint32_t sp_ffa_version)
228 {
229 	/* If the destination is in the normal world always go via the SPMD. */
230 	if (ffa_is_normal_world_id(dst_id)) {
231 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
232 					cookie, handle, flags, sp_ffa_version);
233 	}
234 	/*
235 	 * If the caller is secure and we want to return to the secure world,
236 	 * ERET directly.
237 	 */
238 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
239 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
240 	}
241 	/* If we originated in the normal world then switch contexts. */
242 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
243 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
244 					     x3, x4, handle, flags, sp_ffa_version);
245 	} else {
246 		/* Unknown State. */
247 		panic();
248 	}
249 
250 	/* Shouldn't be Reached. */
251 	return 0;
252 }
253 
254 /*******************************************************************************
255  * FF-A ABI Handlers.
256  ******************************************************************************/
257 
258 /*******************************************************************************
259  * Helper function to validate arg2 as part of a direct message.
260  ******************************************************************************/
261 static inline bool direct_msg_validate_arg2(uint64_t x2)
262 {
263 	/* Check message type. */
264 	if (x2 & FFA_FWK_MSG_BIT) {
265 		/* We have a framework message, ensure it is a known message. */
266 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
267 			VERBOSE("Invalid message format 0x%lx.\n", x2);
268 			return false;
269 		}
270 	} else {
271 		/* We have a partition messages, ensure x2 is not set. */
272 		if (x2 != (uint64_t) 0) {
273 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
274 				x2);
275 			return false;
276 		}
277 	}
278 	return true;
279 }
280 
281 /*******************************************************************************
282  * Helper function to validate the destination ID of a direct response.
283  ******************************************************************************/
284 static bool direct_msg_validate_dst_id(uint16_t dst_id)
285 {
286 	struct secure_partition_desc *sp;
287 
288 	/* Check if we're targeting a normal world partition. */
289 	if (ffa_is_normal_world_id(dst_id)) {
290 		return true;
291 	}
292 
293 	/* Or directed to the SPMC itself.*/
294 	if (dst_id == FFA_SPMC_ID) {
295 		return true;
296 	}
297 
298 	/* Otherwise ensure the SP exists. */
299 	sp = spmc_get_sp_ctx(dst_id);
300 	if (sp != NULL) {
301 		return true;
302 	}
303 
304 	return false;
305 }
306 
307 /*******************************************************************************
308  * Helper function to validate the response from a Logical Partition.
309  ******************************************************************************/
310 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
311 					void *handle)
312 {
313 	/* Retrieve populated Direct Response Arguments. */
314 	uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0);
315 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
316 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
317 	uint16_t src_id = ffa_endpoint_source(x1);
318 	uint16_t dst_id = ffa_endpoint_destination(x1);
319 
320 	if (src_id != lp_id) {
321 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
322 		return false;
323 	}
324 
325 	/*
326 	 * Check the destination ID is valid and ensure the LP is responding to
327 	 * the original request.
328 	 */
329 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
330 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
331 		return false;
332 	}
333 
334 	if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) &&
335 			!direct_msg_validate_arg2(x2)) {
336 		ERROR("Invalid EL3 LP message encoding.\n");
337 		return false;
338 	}
339 	return true;
340 }
341 
342 /*******************************************************************************
343  * Helper function to check that partition can receive direct msg or not.
344  ******************************************************************************/
345 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum)
346 {
347 	if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ &&
348 			((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) ||
349 			(dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 &&
350 			((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) {
351 		return false;
352 	}
353 
354 	return true;
355 }
356 
357 /*******************************************************************************
358  * Helper function to obtain the FF-A version of the calling partition.
359  ******************************************************************************/
360 uint32_t get_partition_ffa_version(bool secure_origin)
361 {
362 	if (secure_origin) {
363 		return spmc_get_current_sp_ctx()->ffa_version;
364 	} else {
365 		return spmc_get_hyp_ctx()->ffa_version;
366 	}
367 }
368 
369 /*******************************************************************************
370  * Handle direct request messages and route to the appropriate destination.
371  ******************************************************************************/
372 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
373 				       bool secure_origin,
374 				       uint64_t x1,
375 				       uint64_t x2,
376 				       uint64_t x3,
377 				       uint64_t x4,
378 				       void *cookie,
379 				       void *handle,
380 				       uint64_t flags)
381 {
382 	uint16_t src_id = ffa_endpoint_source(x1);
383 	uint16_t dst_id = ffa_endpoint_destination(x1);
384 	uint16_t dir_req_funcid;
385 	struct el3_lp_desc *el3_lp_descs;
386 	struct secure_partition_desc *sp;
387 	unsigned int idx;
388 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
389 
390 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ?
391 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
392 
393 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ2) &&
394 			ffa_version < MAKE_FFA_VERSION(U(1), U(2))) {
395 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
396 	}
397 
398 	/*
399 	 * Sanity check for DIRECT_REQ:
400 	 * Check if arg2 has been populated correctly based on message type
401 	 */
402 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) &&
403 			!direct_msg_validate_arg2(x2)) {
404 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
405 	}
406 
407 	/* Validate Sender is either the current SP or from the normal world. */
408 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
409 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
410 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
411 		return spmc_ffa_error_return(handle,
412 					FFA_ERROR_INVALID_PARAMETER);
413 	}
414 
415 	el3_lp_descs = get_el3_lp_array();
416 
417 	/* Check if the request is destined for a Logical Partition. */
418 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
419 		if (el3_lp_descs[i].sp_id == dst_id) {
420 			if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) {
421 				return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
422 			}
423 
424 			uint64_t ret = el3_lp_descs[i].direct_req(
425 						smc_fid, secure_origin, x1, x2,
426 						x3, x4, cookie, handle, flags);
427 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
428 							 handle)) {
429 				panic();
430 			}
431 
432 			/* Message checks out. */
433 			return ret;
434 		}
435 	}
436 
437 	/*
438 	 * If the request was not targeted to a LSP and from the secure world
439 	 * then it is invalid since a SP cannot call into the Normal world and
440 	 * there is no other SP to call into. If there are other SPs in future
441 	 * then the partition runtime model would need to be validated as well.
442 	 */
443 	if (secure_origin) {
444 		VERBOSE("Direct request not supported to the Normal World.\n");
445 		return spmc_ffa_error_return(handle,
446 					     FFA_ERROR_INVALID_PARAMETER);
447 	}
448 
449 	/* Check if the SP ID is valid. */
450 	sp = spmc_get_sp_ctx(dst_id);
451 	if (sp == NULL) {
452 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
453 			dst_id);
454 		return spmc_ffa_error_return(handle,
455 					     FFA_ERROR_INVALID_PARAMETER);
456 	}
457 
458 	if (!direct_msg_receivable(sp->properties, dir_req_funcid)) {
459 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
460 	}
461 
462 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
463 	if (sp->runtime_el == S_EL0) {
464 		spin_lock(&sp->rt_state_lock);
465 	}
466 
467 	/*
468 	 * Check that the target execution context is in a waiting state before
469 	 * forwarding the direct request to it.
470 	 */
471 	idx = get_ec_index(sp);
472 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
473 		VERBOSE("SP context on core%u is not waiting (%u).\n",
474 			idx, sp->ec[idx].rt_model);
475 
476 		if (sp->runtime_el == S_EL0) {
477 			spin_unlock(&sp->rt_state_lock);
478 		}
479 
480 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
481 	}
482 
483 	/*
484 	 * Everything checks out so forward the request to the SP after updating
485 	 * its state and runtime model.
486 	 */
487 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
488 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
489 	sp->ec[idx].dir_req_origin_id = src_id;
490 	sp->ec[idx].dir_req_funcid = dir_req_funcid;
491 
492 	if (sp->runtime_el == S_EL0) {
493 		spin_unlock(&sp->rt_state_lock);
494 	}
495 
496 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
497 			       handle, cookie, flags, dst_id, sp->ffa_version);
498 }
499 
500 /*******************************************************************************
501  * Handle direct response messages and route to the appropriate destination.
502  ******************************************************************************/
503 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
504 					bool secure_origin,
505 					uint64_t x1,
506 					uint64_t x2,
507 					uint64_t x3,
508 					uint64_t x4,
509 					void *cookie,
510 					void *handle,
511 					uint64_t flags)
512 {
513 	uint16_t dst_id = ffa_endpoint_destination(x1);
514 	uint16_t dir_req_funcid;
515 	struct secure_partition_desc *sp;
516 	unsigned int idx;
517 
518 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ?
519 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
520 
521 	/* Check if arg2 has been populated correctly based on message type. */
522 	if (!direct_msg_validate_arg2(x2)) {
523 		return spmc_ffa_error_return(handle,
524 					     FFA_ERROR_INVALID_PARAMETER);
525 	}
526 
527 	/* Check that the response did not originate from the Normal world. */
528 	if (!secure_origin) {
529 		VERBOSE("Direct Response not supported from Normal World.\n");
530 		return spmc_ffa_error_return(handle,
531 					     FFA_ERROR_INVALID_PARAMETER);
532 	}
533 
534 	/*
535 	 * Check that the response is either targeted to the Normal world or the
536 	 * SPMC e.g. a PM response.
537 	 */
538 	if (!direct_msg_validate_dst_id(dst_id)) {
539 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
540 			dst_id);
541 		return spmc_ffa_error_return(handle,
542 					     FFA_ERROR_INVALID_PARAMETER);
543 	}
544 
545 	/* Obtain the SP descriptor and update its runtime state. */
546 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
547 	if (sp == NULL) {
548 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
549 			dst_id);
550 		return spmc_ffa_error_return(handle,
551 					     FFA_ERROR_INVALID_PARAMETER);
552 	}
553 
554 	if (sp->runtime_el == S_EL0) {
555 		spin_lock(&sp->rt_state_lock);
556 	}
557 
558 	/* Sanity check state is being tracked correctly in the SPMC. */
559 	idx = get_ec_index(sp);
560 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
561 
562 	/* Ensure SP execution context was in the right runtime model. */
563 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
564 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
565 			idx, sp->ec[idx].rt_model);
566 		if (sp->runtime_el == S_EL0) {
567 			spin_unlock(&sp->rt_state_lock);
568 		}
569 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
570 	}
571 
572 	if (dir_req_funcid != sp->ec[idx].dir_req_funcid) {
573 		WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n",
574 		     sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx);
575 		if (sp->runtime_el == S_EL0) {
576 			spin_unlock(&sp->rt_state_lock);
577 		}
578 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
579 	}
580 
581 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
582 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
583 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
584 		if (sp->runtime_el == S_EL0) {
585 			spin_unlock(&sp->rt_state_lock);
586 		}
587 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
588 	}
589 
590 	/* Update the state of the SP execution context. */
591 	sp->ec[idx].rt_state = RT_STATE_WAITING;
592 
593 	/* Clear the ongoing direct request ID. */
594 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
595 
596 	/* Clear the ongoing direct request message version. */
597 	sp->ec[idx].dir_req_funcid = 0U;
598 
599 	if (sp->runtime_el == S_EL0) {
600 		spin_unlock(&sp->rt_state_lock);
601 	}
602 
603 	/*
604 	 * If the receiver is not the SPMC then forward the response to the
605 	 * Normal world.
606 	 */
607 	if (dst_id == FFA_SPMC_ID) {
608 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
609 		/* Should not get here. */
610 		panic();
611 	}
612 
613 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
614 			       handle, cookie, flags, dst_id, sp->ffa_version);
615 }
616 
617 /*******************************************************************************
618  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
619  * cycles.
620  ******************************************************************************/
621 static uint64_t msg_wait_handler(uint32_t smc_fid,
622 				 bool secure_origin,
623 				 uint64_t x1,
624 				 uint64_t x2,
625 				 uint64_t x3,
626 				 uint64_t x4,
627 				 void *cookie,
628 				 void *handle,
629 				 uint64_t flags)
630 {
631 	struct secure_partition_desc *sp;
632 	unsigned int idx;
633 
634 	/*
635 	 * Check that the response did not originate from the Normal world as
636 	 * only the secure world can call this ABI.
637 	 */
638 	if (!secure_origin) {
639 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
640 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
641 	}
642 
643 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
644 	sp = spmc_get_current_sp_ctx();
645 	if (sp == NULL) {
646 		return spmc_ffa_error_return(handle,
647 					     FFA_ERROR_INVALID_PARAMETER);
648 	}
649 
650 	/*
651 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
652 	 */
653 	idx = get_ec_index(sp);
654 	if (sp->runtime_el == S_EL0) {
655 		spin_lock(&sp->rt_state_lock);
656 	}
657 
658 	/* Ensure SP execution context was in the right runtime model. */
659 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
660 		if (sp->runtime_el == S_EL0) {
661 			spin_unlock(&sp->rt_state_lock);
662 		}
663 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
664 	}
665 
666 	/* Sanity check the state is being tracked correctly in the SPMC. */
667 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
668 
669 	/*
670 	 * Perform a synchronous exit if the partition was initialising. The
671 	 * state is updated after the exit.
672 	 */
673 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
674 		if (sp->runtime_el == S_EL0) {
675 			spin_unlock(&sp->rt_state_lock);
676 		}
677 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
678 		/* Should not get here */
679 		panic();
680 	}
681 
682 	/* Update the state of the SP execution context. */
683 	sp->ec[idx].rt_state = RT_STATE_WAITING;
684 
685 	/* Resume normal world if a secure interrupt was handled. */
686 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
687 		if (sp->runtime_el == S_EL0) {
688 			spin_unlock(&sp->rt_state_lock);
689 		}
690 
691 		return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
692 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
693 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
694 					     handle, flags, sp->ffa_version);
695 	}
696 
697 	/* Protect the runtime state of a S-EL0 SP with a lock. */
698 	if (sp->runtime_el == S_EL0) {
699 		spin_unlock(&sp->rt_state_lock);
700 	}
701 
702 	/* Forward the response to the Normal world. */
703 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
704 			       handle, cookie, flags, FFA_NWD_ID, sp->ffa_version);
705 }
706 
707 static uint64_t ffa_error_handler(uint32_t smc_fid,
708 				 bool secure_origin,
709 				 uint64_t x1,
710 				 uint64_t x2,
711 				 uint64_t x3,
712 				 uint64_t x4,
713 				 void *cookie,
714 				 void *handle,
715 				 uint64_t flags)
716 {
717 	struct secure_partition_desc *sp;
718 	unsigned int idx;
719 	uint16_t dst_id = ffa_endpoint_destination(x1);
720 	bool cancel_dir_req = false;
721 
722 	/* Check that the response did not originate from the Normal world. */
723 	if (!secure_origin) {
724 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
725 	}
726 
727 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
728 	sp = spmc_get_current_sp_ctx();
729 	if (sp == NULL) {
730 		return spmc_ffa_error_return(handle,
731 					     FFA_ERROR_INVALID_PARAMETER);
732 	}
733 
734 	/* Get the execution context of the SP that invoked FFA_ERROR. */
735 	idx = get_ec_index(sp);
736 
737 	/*
738 	 * We only expect FFA_ERROR to be received during SP initialisation
739 	 * otherwise this is an invalid call.
740 	 */
741 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
742 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
743 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
744 		/* Should not get here. */
745 		panic();
746 	}
747 
748 	if (sp->runtime_el == S_EL0) {
749 		spin_lock(&sp->rt_state_lock);
750 	}
751 
752 	if (sp->ec[idx].rt_state == RT_STATE_RUNNING &&
753 			sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
754 		sp->ec[idx].rt_state = RT_STATE_WAITING;
755 		sp->ec[idx].dir_req_origin_id = INV_SP_ID;
756 		sp->ec[idx].dir_req_funcid = 0x00;
757 		cancel_dir_req = true;
758 	}
759 
760 	if (sp->runtime_el == S_EL0) {
761 		spin_unlock(&sp->rt_state_lock);
762 	}
763 
764 	if (cancel_dir_req) {
765 		if (dst_id == FFA_SPMC_ID) {
766 			spmc_sp_synchronous_exit(&sp->ec[idx], x4);
767 			/* Should not get here. */
768 			panic();
769 		} else
770 			return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
771 					       handle, cookie, flags, dst_id, sp->ffa_version);
772 	}
773 
774 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
775 }
776 
777 static uint64_t ffa_version_handler(uint32_t smc_fid,
778 				    bool secure_origin,
779 				    uint64_t x1,
780 				    uint64_t x2,
781 				    uint64_t x3,
782 				    uint64_t x4,
783 				    void *cookie,
784 				    void *handle,
785 				    uint64_t flags)
786 {
787 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
788 
789 	if (requested_version & FFA_VERSION_BIT31_MASK) {
790 		/* Invalid encoding, return an error. */
791 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
792 		/* Execution stops here. */
793 	}
794 
795 	/* Determine the caller to store the requested version. */
796 	if (secure_origin) {
797 		/*
798 		 * Ensure that the SP is reporting the same version as
799 		 * specified in its manifest. If these do not match there is
800 		 * something wrong with the SP.
801 		 * TODO: Should we abort the SP? For now assert this is not
802 		 *       case.
803 		 */
804 		assert(requested_version ==
805 		       spmc_get_current_sp_ctx()->ffa_version);
806 	} else {
807 		/*
808 		 * If this is called by the normal world, record this
809 		 * information in its descriptor.
810 		 */
811 		spmc_get_hyp_ctx()->ffa_version = requested_version;
812 	}
813 
814 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_SPMC_MAJOR,
815 					  FFA_VERSION_SPMC_MINOR));
816 }
817 
818 static uint64_t rxtx_map_handler(uint32_t smc_fid,
819 				 bool secure_origin,
820 				 uint64_t x1,
821 				 uint64_t x2,
822 				 uint64_t x3,
823 				 uint64_t x4,
824 				 void *cookie,
825 				 void *handle,
826 				 uint64_t flags)
827 {
828 	int ret;
829 	uint32_t error_code;
830 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
831 	struct mailbox *mbox;
832 	uintptr_t tx_address = x1;
833 	uintptr_t rx_address = x2;
834 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
835 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
836 
837 	/*
838 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
839 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
840 	 * ABI on behalf of a VM and reject it if this is the case.
841 	 */
842 	if (tx_address == 0 || rx_address == 0) {
843 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
844 		return spmc_ffa_error_return(handle,
845 					     FFA_ERROR_INVALID_PARAMETER);
846 	}
847 
848 	/* Ensure the specified buffers are not the same. */
849 	if (tx_address == rx_address) {
850 		WARN("TX Buffer must not be the same as RX Buffer.\n");
851 		return spmc_ffa_error_return(handle,
852 					     FFA_ERROR_INVALID_PARAMETER);
853 	}
854 
855 	/* Ensure the buffer size is not 0. */
856 	if (buf_size == 0U) {
857 		WARN("Buffer size must not be 0\n");
858 		return spmc_ffa_error_return(handle,
859 					     FFA_ERROR_INVALID_PARAMETER);
860 	}
861 
862 	/*
863 	 * Ensure the buffer size is a multiple of the translation granule size
864 	 * in TF-A.
865 	 */
866 	if (buf_size % PAGE_SIZE != 0U) {
867 		WARN("Buffer size must be aligned to translation granule.\n");
868 		return spmc_ffa_error_return(handle,
869 					     FFA_ERROR_INVALID_PARAMETER);
870 	}
871 
872 	/* Obtain the RX/TX buffer pair descriptor. */
873 	mbox = spmc_get_mbox_desc(secure_origin);
874 
875 	spin_lock(&mbox->lock);
876 
877 	/* Check if buffers have already been mapped. */
878 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
879 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
880 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
881 		error_code = FFA_ERROR_DENIED;
882 		goto err;
883 	}
884 
885 	/* memmap the TX buffer as read only. */
886 	ret = mmap_add_dynamic_region(tx_address, /* PA */
887 			tx_address, /* VA */
888 			buf_size, /* size */
889 			mem_atts | MT_RO_DATA); /* attrs */
890 	if (ret != 0) {
891 		/* Return the correct error code. */
892 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
893 						FFA_ERROR_INVALID_PARAMETER;
894 		WARN("Unable to map TX buffer: %d\n", error_code);
895 		goto err;
896 	}
897 
898 	/* memmap the RX buffer as read write. */
899 	ret = mmap_add_dynamic_region(rx_address, /* PA */
900 			rx_address, /* VA */
901 			buf_size, /* size */
902 			mem_atts | MT_RW_DATA); /* attrs */
903 
904 	if (ret != 0) {
905 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
906 						FFA_ERROR_INVALID_PARAMETER;
907 		WARN("Unable to map RX buffer: %d\n", error_code);
908 		/* Unmap the TX buffer again. */
909 		mmap_remove_dynamic_region(tx_address, buf_size);
910 		goto err;
911 	}
912 
913 	mbox->tx_buffer = (void *) tx_address;
914 	mbox->rx_buffer = (void *) rx_address;
915 	mbox->rxtx_page_count = page_count;
916 	spin_unlock(&mbox->lock);
917 
918 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
919 	/* Execution stops here. */
920 err:
921 	spin_unlock(&mbox->lock);
922 	return spmc_ffa_error_return(handle, error_code);
923 }
924 
925 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
926 				   bool secure_origin,
927 				   uint64_t x1,
928 				   uint64_t x2,
929 				   uint64_t x3,
930 				   uint64_t x4,
931 				   void *cookie,
932 				   void *handle,
933 				   uint64_t flags)
934 {
935 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
936 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
937 
938 	/*
939 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
940 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
941 	 * ABI on behalf of a VM and reject it if this is the case.
942 	 */
943 	if (x1 != 0UL) {
944 		return spmc_ffa_error_return(handle,
945 					     FFA_ERROR_INVALID_PARAMETER);
946 	}
947 
948 	spin_lock(&mbox->lock);
949 
950 	/* Check if buffers are currently mapped. */
951 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
952 		spin_unlock(&mbox->lock);
953 		return spmc_ffa_error_return(handle,
954 					     FFA_ERROR_INVALID_PARAMETER);
955 	}
956 
957 	/* Unmap RX Buffer */
958 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
959 				       buf_size) != 0) {
960 		WARN("Unable to unmap RX buffer!\n");
961 	}
962 
963 	mbox->rx_buffer = 0;
964 
965 	/* Unmap TX Buffer */
966 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
967 				       buf_size) != 0) {
968 		WARN("Unable to unmap TX buffer!\n");
969 	}
970 
971 	mbox->tx_buffer = 0;
972 	mbox->rxtx_page_count = 0;
973 
974 	spin_unlock(&mbox->lock);
975 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
976 }
977 
978 /*
979  * Helper function to populate the properties field of a Partition Info Get
980  * descriptor.
981  */
982 static uint32_t
983 partition_info_get_populate_properties(uint32_t sp_properties,
984 				       enum sp_execution_state sp_ec_state)
985 {
986 	uint32_t properties = sp_properties;
987 	uint32_t ec_state;
988 
989 	/* Determine the execution state of the SP. */
990 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
991 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
992 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
993 
994 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
995 
996 	return properties;
997 }
998 
999 /*
1000  * Collate the partition information in a v1.1 partition information
1001  * descriptor format, this will be converter later if required.
1002  */
1003 static int partition_info_get_handler_v1_1(uint32_t *uuid,
1004 					   struct ffa_partition_info_v1_1
1005 						  *partitions,
1006 					   uint32_t max_partitions,
1007 					   uint32_t *partition_count)
1008 {
1009 	uint32_t index;
1010 	struct ffa_partition_info_v1_1 *desc;
1011 	bool null_uuid = is_null_uuid(uuid);
1012 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1013 
1014 	/* Deal with Logical Partitions. */
1015 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1016 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
1017 			/* Found a matching UUID, populate appropriately. */
1018 			if (*partition_count >= max_partitions) {
1019 				return FFA_ERROR_NO_MEMORY;
1020 			}
1021 
1022 			desc = &partitions[*partition_count];
1023 			desc->ep_id = el3_lp_descs[index].sp_id;
1024 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1025 			/* LSPs must be AArch64. */
1026 			desc->properties =
1027 				partition_info_get_populate_properties(
1028 					el3_lp_descs[index].properties,
1029 					SP_STATE_AARCH64);
1030 
1031 			if (null_uuid) {
1032 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
1033 			}
1034 			(*partition_count)++;
1035 		}
1036 	}
1037 
1038 	/* Deal with physical SP's. */
1039 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1040 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1041 			/* Found a matching UUID, populate appropriately. */
1042 			if (*partition_count >= max_partitions) {
1043 				return FFA_ERROR_NO_MEMORY;
1044 			}
1045 
1046 			desc = &partitions[*partition_count];
1047 			desc->ep_id = sp_desc[index].sp_id;
1048 			/*
1049 			 * Execution context count must match No. cores for
1050 			 * S-EL1 SPs.
1051 			 */
1052 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1053 			desc->properties =
1054 				partition_info_get_populate_properties(
1055 					sp_desc[index].properties,
1056 					sp_desc[index].execution_state);
1057 
1058 			if (null_uuid) {
1059 				copy_uuid(desc->uuid, sp_desc[index].uuid);
1060 			}
1061 			(*partition_count)++;
1062 		}
1063 	}
1064 	return 0;
1065 }
1066 
1067 /*
1068  * Handle the case where that caller only wants the count of partitions
1069  * matching a given UUID and does not want the corresponding descriptors
1070  * populated.
1071  */
1072 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
1073 {
1074 	uint32_t index = 0;
1075 	uint32_t partition_count = 0;
1076 	bool null_uuid = is_null_uuid(uuid);
1077 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1078 
1079 	/* Deal with Logical Partitions. */
1080 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1081 		if (null_uuid ||
1082 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
1083 			(partition_count)++;
1084 		}
1085 	}
1086 
1087 	/* Deal with physical SP's. */
1088 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1089 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1090 			(partition_count)++;
1091 		}
1092 	}
1093 	return partition_count;
1094 }
1095 
1096 /*
1097  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1098  * the corresponding descriptor format from the v1.1 descriptor array.
1099  */
1100 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1101 					     *partitions,
1102 					     struct mailbox *mbox,
1103 					     int partition_count)
1104 {
1105 	uint32_t index;
1106 	uint32_t buf_size;
1107 	uint32_t descriptor_size;
1108 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1109 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1110 
1111 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1112 	descriptor_size = partition_count *
1113 			  sizeof(struct ffa_partition_info_v1_0);
1114 
1115 	if (descriptor_size > buf_size) {
1116 		return FFA_ERROR_NO_MEMORY;
1117 	}
1118 
1119 	for (index = 0U; index < partition_count; index++) {
1120 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1121 		v1_0_partitions[index].execution_ctx_count =
1122 			partitions[index].execution_ctx_count;
1123 		/* Only report v1.0 properties. */
1124 		v1_0_partitions[index].properties =
1125 			(partitions[index].properties &
1126 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1127 	}
1128 	return 0;
1129 }
1130 
1131 /*
1132  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1133  * v1.0 implementations.
1134  */
1135 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1136 					   bool secure_origin,
1137 					   uint64_t x1,
1138 					   uint64_t x2,
1139 					   uint64_t x3,
1140 					   uint64_t x4,
1141 					   void *cookie,
1142 					   void *handle,
1143 					   uint64_t flags)
1144 {
1145 	int ret;
1146 	uint32_t partition_count = 0;
1147 	uint32_t size = 0;
1148 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1149 	struct mailbox *mbox;
1150 	uint64_t info_get_flags;
1151 	bool count_only;
1152 	uint32_t uuid[4];
1153 
1154 	uuid[0] = x1;
1155 	uuid[1] = x2;
1156 	uuid[2] = x3;
1157 	uuid[3] = x4;
1158 
1159 	/* Determine if the Partition descriptors should be populated. */
1160 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1161 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1162 
1163 	/* Handle the case where we don't need to populate the descriptors. */
1164 	if (count_only) {
1165 		partition_count = partition_info_get_handler_count_only(uuid);
1166 		if (partition_count == 0) {
1167 			return spmc_ffa_error_return(handle,
1168 						FFA_ERROR_INVALID_PARAMETER);
1169 		}
1170 	} else {
1171 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1172 
1173 		/*
1174 		 * Handle the case where the partition descriptors are required,
1175 		 * check we have the buffers available and populate the
1176 		 * appropriate structure version.
1177 		 */
1178 
1179 		/* Obtain the v1.1 format of the descriptors. */
1180 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1181 						      MAX_SP_LP_PARTITIONS,
1182 						      &partition_count);
1183 
1184 		/* Check if an error occurred during discovery. */
1185 		if (ret != 0) {
1186 			goto err;
1187 		}
1188 
1189 		/* If we didn't find any matches the UUID is unknown. */
1190 		if (partition_count == 0) {
1191 			ret = FFA_ERROR_INVALID_PARAMETER;
1192 			goto err;
1193 		}
1194 
1195 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1196 		mbox = spmc_get_mbox_desc(secure_origin);
1197 
1198 		/*
1199 		 * If the caller has not bothered registering its RX/TX pair
1200 		 * then return an error code.
1201 		 */
1202 		spin_lock(&mbox->lock);
1203 		if (mbox->rx_buffer == NULL) {
1204 			ret = FFA_ERROR_BUSY;
1205 			goto err_unlock;
1206 		}
1207 
1208 		/* Ensure the RX buffer is currently free. */
1209 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1210 			ret = FFA_ERROR_BUSY;
1211 			goto err_unlock;
1212 		}
1213 
1214 		/* Zero the RX buffer before populating. */
1215 		(void)memset(mbox->rx_buffer, 0,
1216 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1217 
1218 		/*
1219 		 * Depending on the FF-A version of the requesting partition
1220 		 * we may need to convert to a v1.0 format otherwise we can copy
1221 		 * directly.
1222 		 */
1223 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1224 			ret = partition_info_populate_v1_0(partitions,
1225 							   mbox,
1226 							   partition_count);
1227 			if (ret != 0) {
1228 				goto err_unlock;
1229 			}
1230 		} else {
1231 			uint32_t buf_size = mbox->rxtx_page_count *
1232 					    FFA_PAGE_SIZE;
1233 
1234 			/* Ensure the descriptor will fit in the buffer. */
1235 			size = sizeof(struct ffa_partition_info_v1_1);
1236 			if (partition_count * size  > buf_size) {
1237 				ret = FFA_ERROR_NO_MEMORY;
1238 				goto err_unlock;
1239 			}
1240 			memcpy(mbox->rx_buffer, partitions,
1241 			       partition_count * size);
1242 		}
1243 
1244 		mbox->state = MAILBOX_STATE_FULL;
1245 		spin_unlock(&mbox->lock);
1246 	}
1247 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1248 
1249 err_unlock:
1250 	spin_unlock(&mbox->lock);
1251 err:
1252 	return spmc_ffa_error_return(handle, ret);
1253 }
1254 
1255 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1256 {
1257 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1258 }
1259 
1260 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1261 					      uint32_t input_properties,
1262 					      void *handle)
1263 {
1264 	/*
1265 	 * If we're called by the normal world we don't support any
1266 	 * additional features.
1267 	 */
1268 	if (!secure_origin) {
1269 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1270 			return spmc_ffa_error_return(handle,
1271 						     FFA_ERROR_NOT_SUPPORTED);
1272 		}
1273 
1274 	} else {
1275 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1276 		/*
1277 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1278 		 * call. If v1.0 check and store whether the SP has requested
1279 		 * the use of the NS bit.
1280 		 */
1281 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1282 			if ((input_properties &
1283 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1284 				return spmc_ffa_error_return(handle,
1285 						       FFA_ERROR_NOT_SUPPORTED);
1286 			}
1287 			return ffa_feature_success(handle,
1288 						   FFA_FEATURES_RET_REQ_NS_BIT);
1289 		} else {
1290 			sp->ns_bit_requested = (input_properties &
1291 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1292 					       0U;
1293 		}
1294 		if (sp->ns_bit_requested) {
1295 			return ffa_feature_success(handle,
1296 						   FFA_FEATURES_RET_REQ_NS_BIT);
1297 		}
1298 	}
1299 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1300 }
1301 
1302 static uint64_t ffa_features_handler(uint32_t smc_fid,
1303 				     bool secure_origin,
1304 				     uint64_t x1,
1305 				     uint64_t x2,
1306 				     uint64_t x3,
1307 				     uint64_t x4,
1308 				     void *cookie,
1309 				     void *handle,
1310 				     uint64_t flags)
1311 {
1312 	uint32_t function_id = (uint32_t) x1;
1313 	uint32_t input_properties = (uint32_t) x2;
1314 
1315 	/* Check if a Feature ID was requested. */
1316 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1317 		/* We currently don't support any additional features. */
1318 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1319 	}
1320 
1321 	/*
1322 	 * Handle the cases where we have separate handlers due to additional
1323 	 * properties.
1324 	 */
1325 	switch (function_id) {
1326 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1327 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1328 		return ffa_features_retrieve_request(secure_origin,
1329 						     input_properties,
1330 						     handle);
1331 	}
1332 
1333 	/*
1334 	 * We don't currently support additional input properties for these
1335 	 * other ABIs therefore ensure this value is set to 0.
1336 	 */
1337 	if (input_properties != 0U) {
1338 		return spmc_ffa_error_return(handle,
1339 					     FFA_ERROR_NOT_SUPPORTED);
1340 	}
1341 
1342 	/* Report if any other FF-A ABI is supported. */
1343 	switch (function_id) {
1344 	/* Supported features from both worlds. */
1345 	case FFA_ERROR:
1346 	case FFA_SUCCESS_SMC32:
1347 	case FFA_INTERRUPT:
1348 	case FFA_SPM_ID_GET:
1349 	case FFA_ID_GET:
1350 	case FFA_FEATURES:
1351 	case FFA_VERSION:
1352 	case FFA_RX_RELEASE:
1353 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1354 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1355 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1356 	case FFA_PARTITION_INFO_GET:
1357 	case FFA_RXTX_MAP_SMC32:
1358 	case FFA_RXTX_MAP_SMC64:
1359 	case FFA_RXTX_UNMAP:
1360 	case FFA_MEM_FRAG_TX:
1361 	case FFA_MSG_RUN:
1362 
1363 		/*
1364 		 * We are relying on the fact that the other registers
1365 		 * will be set to 0 as these values align with the
1366 		 * currently implemented features of the SPMC. If this
1367 		 * changes this function must be extended to handle
1368 		 * reporting the additional functionality.
1369 		 */
1370 
1371 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1372 		/* Execution stops here. */
1373 
1374 	/* Supported ABIs only from the secure world. */
1375 	case FFA_MEM_PERM_GET_SMC32:
1376 	case FFA_MEM_PERM_GET_SMC64:
1377 	case FFA_MEM_PERM_SET_SMC32:
1378 	case FFA_MEM_PERM_SET_SMC64:
1379 	/* these ABIs are only supported from S-EL0 SPs */
1380 	#if !(SPMC_AT_EL3_SEL0_SP)
1381 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1382 	#endif
1383 	/* fall through */
1384 
1385 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1386 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1387 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1388 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1389 	case FFA_MEM_RELINQUISH:
1390 	case FFA_MSG_WAIT:
1391 	case FFA_CONSOLE_LOG_SMC32:
1392 	case FFA_CONSOLE_LOG_SMC64:
1393 		if (!secure_origin) {
1394 			return spmc_ffa_error_return(handle,
1395 				FFA_ERROR_NOT_SUPPORTED);
1396 		}
1397 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1398 		/* Execution stops here. */
1399 
1400 	/* Supported features only from the normal world. */
1401 	case FFA_MEM_SHARE_SMC32:
1402 	case FFA_MEM_SHARE_SMC64:
1403 	case FFA_MEM_LEND_SMC32:
1404 	case FFA_MEM_LEND_SMC64:
1405 	case FFA_MEM_RECLAIM:
1406 	case FFA_MEM_FRAG_RX:
1407 
1408 		if (secure_origin) {
1409 			return spmc_ffa_error_return(handle,
1410 					FFA_ERROR_NOT_SUPPORTED);
1411 		}
1412 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1413 		/* Execution stops here. */
1414 
1415 	default:
1416 		return spmc_ffa_error_return(handle,
1417 					FFA_ERROR_NOT_SUPPORTED);
1418 	}
1419 }
1420 
1421 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1422 				   bool secure_origin,
1423 				   uint64_t x1,
1424 				   uint64_t x2,
1425 				   uint64_t x3,
1426 				   uint64_t x4,
1427 				   void *cookie,
1428 				   void *handle,
1429 				   uint64_t flags)
1430 {
1431 	if (secure_origin) {
1432 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1433 			 spmc_get_current_sp_ctx()->sp_id);
1434 	} else {
1435 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1436 			 spmc_get_hyp_ctx()->ns_ep_id);
1437 	}
1438 }
1439 
1440 /*
1441  * Enable an SP to query the ID assigned to the SPMC.
1442  */
1443 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1444 				       bool secure_origin,
1445 				       uint64_t x1,
1446 				       uint64_t x2,
1447 				       uint64_t x3,
1448 				       uint64_t x4,
1449 				       void *cookie,
1450 				       void *handle,
1451 				       uint64_t flags)
1452 {
1453 	assert(x1 == 0UL);
1454 	assert(x2 == 0UL);
1455 	assert(x3 == 0UL);
1456 	assert(x4 == 0UL);
1457 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1458 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1459 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1460 
1461 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1462 }
1463 
1464 static uint64_t ffa_run_handler(uint32_t smc_fid,
1465 				bool secure_origin,
1466 				uint64_t x1,
1467 				uint64_t x2,
1468 				uint64_t x3,
1469 				uint64_t x4,
1470 				void *cookie,
1471 				void *handle,
1472 				uint64_t flags)
1473 {
1474 	struct secure_partition_desc *sp;
1475 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1476 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1477 	unsigned int idx;
1478 	unsigned int *rt_state;
1479 	unsigned int *rt_model;
1480 
1481 	/* Can only be called from the normal world. */
1482 	if (secure_origin) {
1483 		ERROR("FFA_RUN can only be called from NWd.\n");
1484 		return spmc_ffa_error_return(handle,
1485 					     FFA_ERROR_INVALID_PARAMETER);
1486 	}
1487 
1488 	/* Cannot run a Normal world partition. */
1489 	if (ffa_is_normal_world_id(target_id)) {
1490 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1491 		return spmc_ffa_error_return(handle,
1492 					     FFA_ERROR_INVALID_PARAMETER);
1493 	}
1494 
1495 	/* Check that the target SP exists. */
1496 	sp = spmc_get_sp_ctx(target_id);
1497 	if (sp == NULL) {
1498 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1499 		return spmc_ffa_error_return(handle,
1500 					     FFA_ERROR_INVALID_PARAMETER);
1501 	}
1502 
1503 	idx = get_ec_index(sp);
1504 
1505 	if (idx != vcpu_id) {
1506 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1507 		return spmc_ffa_error_return(handle,
1508 					     FFA_ERROR_INVALID_PARAMETER);
1509 	}
1510 	if (sp->runtime_el == S_EL0) {
1511 		spin_lock(&sp->rt_state_lock);
1512 	}
1513 	rt_state = &((sp->ec[idx]).rt_state);
1514 	rt_model = &((sp->ec[idx]).rt_model);
1515 	if (*rt_state == RT_STATE_RUNNING) {
1516 		if (sp->runtime_el == S_EL0) {
1517 			spin_unlock(&sp->rt_state_lock);
1518 		}
1519 		ERROR("Partition (0x%x) is already running.\n", target_id);
1520 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1521 	}
1522 
1523 	/*
1524 	 * Sanity check that if the execution context was not waiting then it
1525 	 * was either in the direct request or the run partition runtime model.
1526 	 */
1527 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1528 		assert(*rt_model == RT_MODEL_RUN ||
1529 		       *rt_model == RT_MODEL_DIR_REQ);
1530 	}
1531 
1532 	/*
1533 	 * If the context was waiting then update the partition runtime model.
1534 	 */
1535 	if (*rt_state == RT_STATE_WAITING) {
1536 		*rt_model = RT_MODEL_RUN;
1537 	}
1538 
1539 	/*
1540 	 * Forward the request to the correct SP vCPU after updating
1541 	 * its state.
1542 	 */
1543 	*rt_state = RT_STATE_RUNNING;
1544 
1545 	if (sp->runtime_el == S_EL0) {
1546 		spin_unlock(&sp->rt_state_lock);
1547 	}
1548 
1549 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1550 			       handle, cookie, flags, target_id, sp->ffa_version);
1551 }
1552 
1553 static uint64_t rx_release_handler(uint32_t smc_fid,
1554 				   bool secure_origin,
1555 				   uint64_t x1,
1556 				   uint64_t x2,
1557 				   uint64_t x3,
1558 				   uint64_t x4,
1559 				   void *cookie,
1560 				   void *handle,
1561 				   uint64_t flags)
1562 {
1563 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1564 
1565 	spin_lock(&mbox->lock);
1566 
1567 	if (mbox->state != MAILBOX_STATE_FULL) {
1568 		spin_unlock(&mbox->lock);
1569 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1570 	}
1571 
1572 	mbox->state = MAILBOX_STATE_EMPTY;
1573 	spin_unlock(&mbox->lock);
1574 
1575 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1576 }
1577 
1578 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1579 				     bool secure_origin,
1580 				     uint64_t x1,
1581 				     uint64_t x2,
1582 				     uint64_t x3,
1583 				     uint64_t x4,
1584 				     void *cookie,
1585 				     void *handle,
1586 				     uint64_t flags)
1587 {
1588 	/* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1589 	char chars[48] = {0};
1590 	size_t chars_max;
1591 	size_t chars_count = x1;
1592 
1593 	/* Does not support request from Nwd. */
1594 	if (!secure_origin) {
1595 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1596 	}
1597 
1598 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1599 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1600 		uint32_t *registers = (uint32_t *)chars;
1601 		registers[0] = (uint32_t)x2;
1602 		registers[1] = (uint32_t)x3;
1603 		registers[2] = (uint32_t)x4;
1604 		registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1605 		registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1606 		registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1607 		chars_max = 6 * sizeof(uint32_t);
1608 	} else {
1609 		uint64_t *registers = (uint64_t *)chars;
1610 		registers[0] = x2;
1611 		registers[1] = x3;
1612 		registers[2] = x4;
1613 		registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1614 		registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1615 		registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1616 		chars_max = 6 * sizeof(uint64_t);
1617 	}
1618 
1619 	if ((chars_count == 0) || (chars_count > chars_max)) {
1620 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1621 	}
1622 
1623 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1624 		putchar(chars[i]);
1625 	}
1626 
1627 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1628 }
1629 
1630 /*
1631  * Perform initial validation on the provided secondary entry point.
1632  * For now ensure it does not lie within the BL31 Image or the SP's
1633  * RX/TX buffers as these are mapped within EL3.
1634  * TODO: perform validation for additional invalid memory regions.
1635  */
1636 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1637 {
1638 	struct mailbox *mb;
1639 	uintptr_t buffer_size;
1640 	uintptr_t sp_rx_buffer;
1641 	uintptr_t sp_tx_buffer;
1642 	uintptr_t sp_rx_buffer_limit;
1643 	uintptr_t sp_tx_buffer_limit;
1644 
1645 	mb = &sp->mailbox;
1646 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1647 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1648 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1649 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1650 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1651 
1652 	/*
1653 	 * Check if the entry point lies within BL31, or the
1654 	 * SP's RX or TX buffer.
1655 	 */
1656 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1657 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1658 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1659 		return -EINVAL;
1660 	}
1661 	return 0;
1662 }
1663 
1664 /*******************************************************************************
1665  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1666  *  register an entry point for initialization during a secondary cold boot.
1667  ******************************************************************************/
1668 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1669 					    bool secure_origin,
1670 					    uint64_t x1,
1671 					    uint64_t x2,
1672 					    uint64_t x3,
1673 					    uint64_t x4,
1674 					    void *cookie,
1675 					    void *handle,
1676 					    uint64_t flags)
1677 {
1678 	struct secure_partition_desc *sp;
1679 	struct sp_exec_ctx *sp_ctx;
1680 
1681 	/* This request cannot originate from the Normal world. */
1682 	if (!secure_origin) {
1683 		WARN("%s: Can only be called from SWd.\n", __func__);
1684 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1685 	}
1686 
1687 	/* Get the context of the current SP. */
1688 	sp = spmc_get_current_sp_ctx();
1689 	if (sp == NULL) {
1690 		WARN("%s: Cannot find SP context.\n", __func__);
1691 		return spmc_ffa_error_return(handle,
1692 					     FFA_ERROR_INVALID_PARAMETER);
1693 	}
1694 
1695 	/* Only an S-EL1 SP should be invoking this ABI. */
1696 	if (sp->runtime_el != S_EL1) {
1697 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1698 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1699 	}
1700 
1701 	/* Ensure the SP is in its initialization state. */
1702 	sp_ctx = spmc_get_sp_ec(sp);
1703 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1704 		WARN("%s: Can only be called during SP initialization.\n",
1705 		     __func__);
1706 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1707 	}
1708 
1709 	/* Perform initial validation of the secondary entry point. */
1710 	if (validate_secondary_ep(x1, sp)) {
1711 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1712 		     __func__, x1);
1713 		return spmc_ffa_error_return(handle,
1714 					     FFA_ERROR_INVALID_PARAMETER);
1715 	}
1716 
1717 	/*
1718 	 * Update the secondary entrypoint in SP context.
1719 	 * We don't need a lock here as during partition initialization there
1720 	 * will only be a single core online.
1721 	 */
1722 	sp->secondary_ep = x1;
1723 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1724 
1725 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1726 }
1727 
1728 /*******************************************************************************
1729  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1730  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1731  * function converts a permission value from the FF-A format to the mmap_attr_t
1732  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1733  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1734  * ignored by the function xlat_change_mem_attributes_ctx().
1735  ******************************************************************************/
1736 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1737 {
1738 	unsigned int tf_attr = 0U;
1739 	unsigned int access;
1740 
1741 	/* Deal with data access permissions first. */
1742 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1743 
1744 	switch (access) {
1745 	case FFA_MEM_PERM_DATA_RW:
1746 		/* Return 0 if the execute is set with RW. */
1747 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1748 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1749 		}
1750 		break;
1751 
1752 	case FFA_MEM_PERM_DATA_RO:
1753 		tf_attr |= MT_RO | MT_USER;
1754 		/* Deal with the instruction access permissions next. */
1755 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1756 			tf_attr |= MT_EXECUTE;
1757 		} else {
1758 			tf_attr |= MT_EXECUTE_NEVER;
1759 		}
1760 		break;
1761 
1762 	case FFA_MEM_PERM_DATA_NA:
1763 	default:
1764 		return tf_attr;
1765 	}
1766 
1767 	return tf_attr;
1768 }
1769 
1770 /*******************************************************************************
1771  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1772  ******************************************************************************/
1773 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1774 					 bool secure_origin,
1775 					 uint64_t x1,
1776 					 uint64_t x2,
1777 					 uint64_t x3,
1778 					 uint64_t x4,
1779 					 void *cookie,
1780 					 void *handle,
1781 					 uint64_t flags)
1782 {
1783 	struct secure_partition_desc *sp;
1784 	unsigned int idx;
1785 	uintptr_t base_va = (uintptr_t) x1;
1786 	size_t size = (size_t)(x2 * PAGE_SIZE);
1787 	uint32_t tf_attr;
1788 	int ret;
1789 
1790 	/* This request cannot originate from the Normal world. */
1791 	if (!secure_origin) {
1792 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1793 	}
1794 
1795 	if (size == 0) {
1796 		return spmc_ffa_error_return(handle,
1797 					     FFA_ERROR_INVALID_PARAMETER);
1798 	}
1799 
1800 	/* Get the context of the current SP. */
1801 	sp = spmc_get_current_sp_ctx();
1802 	if (sp == NULL) {
1803 		return spmc_ffa_error_return(handle,
1804 					     FFA_ERROR_INVALID_PARAMETER);
1805 	}
1806 
1807 	/* A S-EL1 SP has no business invoking this ABI. */
1808 	if (sp->runtime_el == S_EL1) {
1809 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1810 	}
1811 
1812 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1813 		return spmc_ffa_error_return(handle,
1814 					     FFA_ERROR_INVALID_PARAMETER);
1815 	}
1816 
1817 	/* Get the execution context of the calling SP. */
1818 	idx = get_ec_index(sp);
1819 
1820 	/*
1821 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1822 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1823 	 * and can only be initialising on this cpu.
1824 	 */
1825 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1826 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1827 	}
1828 
1829 	VERBOSE("Setting memory permissions:\n");
1830 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1831 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1832 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1833 
1834 	/* Convert inbound permissions to TF-A permission attributes */
1835 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1836 	if (tf_attr == 0U) {
1837 		return spmc_ffa_error_return(handle,
1838 					     FFA_ERROR_INVALID_PARAMETER);
1839 	}
1840 
1841 	/* Request the change in permissions */
1842 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1843 					     base_va, size, tf_attr);
1844 	if (ret != 0) {
1845 		return spmc_ffa_error_return(handle,
1846 					     FFA_ERROR_INVALID_PARAMETER);
1847 	}
1848 
1849 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1850 }
1851 
1852 /*******************************************************************************
1853  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1854  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1855  * function converts a permission value from the mmap_attr_t format to the FF-A
1856  * format.
1857  ******************************************************************************/
1858 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1859 {
1860 	unsigned int perms = 0U;
1861 	unsigned int data_access;
1862 
1863 	if ((attr & MT_USER) == 0) {
1864 		/* No access from EL0. */
1865 		data_access = FFA_MEM_PERM_DATA_NA;
1866 	} else {
1867 		if ((attr & MT_RW) != 0) {
1868 			data_access = FFA_MEM_PERM_DATA_RW;
1869 		} else {
1870 			data_access = FFA_MEM_PERM_DATA_RO;
1871 		}
1872 	}
1873 
1874 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1875 		<< FFA_MEM_PERM_DATA_SHIFT;
1876 
1877 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1878 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1879 	}
1880 
1881 	return perms;
1882 }
1883 
1884 /*******************************************************************************
1885  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1886  ******************************************************************************/
1887 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1888 					 bool secure_origin,
1889 					 uint64_t x1,
1890 					 uint64_t x2,
1891 					 uint64_t x3,
1892 					 uint64_t x4,
1893 					 void *cookie,
1894 					 void *handle,
1895 					 uint64_t flags)
1896 {
1897 	struct secure_partition_desc *sp;
1898 	unsigned int idx;
1899 	uintptr_t base_va = (uintptr_t)x1;
1900 	uint64_t max_page_count = x2 + 1;
1901 	uint64_t page_count = 0;
1902 	uint32_t base_page_attr = 0;
1903 	uint32_t page_attr = 0;
1904 	unsigned int table_level;
1905 	int ret;
1906 
1907 	/* This request cannot originate from the Normal world. */
1908 	if (!secure_origin) {
1909 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1910 	}
1911 
1912 	/* Get the context of the current SP. */
1913 	sp = spmc_get_current_sp_ctx();
1914 	if (sp == NULL) {
1915 		return spmc_ffa_error_return(handle,
1916 					     FFA_ERROR_INVALID_PARAMETER);
1917 	}
1918 
1919 	/* A S-EL1 SP has no business invoking this ABI. */
1920 	if (sp->runtime_el == S_EL1) {
1921 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1922 	}
1923 
1924 	/* Get the execution context of the calling SP. */
1925 	idx = get_ec_index(sp);
1926 
1927 	/*
1928 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1929 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1930 	 * and can only be initialising on this cpu.
1931 	 */
1932 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1933 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1934 	}
1935 
1936 	base_va &= ~(PAGE_SIZE_MASK);
1937 
1938 	/* Request the permissions */
1939 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1940 			&base_page_attr, &table_level);
1941 	if (ret != 0) {
1942 		return spmc_ffa_error_return(handle,
1943 					     FFA_ERROR_INVALID_PARAMETER);
1944 	}
1945 
1946 	/*
1947 	 * Caculate how many pages in this block entry from base_va including
1948 	 * its page.
1949 	 */
1950 	page_count = ((XLAT_BLOCK_SIZE(table_level) -
1951 			(base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT);
1952 	base_va += XLAT_BLOCK_SIZE(table_level);
1953 
1954 	while ((page_count < max_page_count) && (base_va != 0x00)) {
1955 		ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1956 				&page_attr, &table_level);
1957 		if (ret != 0) {
1958 			return spmc_ffa_error_return(handle,
1959 						     FFA_ERROR_INVALID_PARAMETER);
1960 		}
1961 
1962 		if (page_attr != base_page_attr) {
1963 			break;
1964 		}
1965 
1966 		base_va += XLAT_BLOCK_SIZE(table_level);
1967 		page_count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT);
1968 	}
1969 
1970 	if (page_count > max_page_count) {
1971 		page_count = max_page_count;
1972 	}
1973 
1974 	/* Convert TF-A permission to FF-A permissions attributes. */
1975 	x2 = mmap_perm_to_ffa_perm(base_page_attr);
1976 
1977 	/* x3 should be page count - 1 */
1978 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, x2, --page_count);
1979 }
1980 
1981 /*******************************************************************************
1982  * This function will parse the Secure Partition Manifest. From manifest, it
1983  * will fetch details for preparing Secure partition image context and secure
1984  * partition image boot arguments if any.
1985  ******************************************************************************/
1986 static int sp_manifest_parse(void *sp_manifest, int offset,
1987 			     struct secure_partition_desc *sp,
1988 			     entry_point_info_t *ep_info,
1989 			     int32_t *boot_info_reg)
1990 {
1991 	int32_t ret, node;
1992 	uint32_t config_32;
1993 
1994 	/*
1995 	 * Look for the mandatory fields that are expected to be present in
1996 	 * the SP manifests.
1997 	 */
1998 	node = fdt_path_offset(sp_manifest, "/");
1999 	if (node < 0) {
2000 		ERROR("Did not find root node.\n");
2001 		return node;
2002 	}
2003 
2004 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
2005 				    ARRAY_SIZE(sp->uuid), sp->uuid);
2006 	if (ret != 0) {
2007 		ERROR("Missing Secure Partition UUID.\n");
2008 		return ret;
2009 	}
2010 
2011 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
2012 	if (ret != 0) {
2013 		ERROR("Missing SP Exception Level information.\n");
2014 		return ret;
2015 	}
2016 
2017 	sp->runtime_el = config_32;
2018 
2019 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
2020 	if (ret != 0) {
2021 		ERROR("Missing Secure Partition FF-A Version.\n");
2022 		return ret;
2023 	}
2024 
2025 	sp->ffa_version = config_32;
2026 
2027 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
2028 	if (ret != 0) {
2029 		ERROR("Missing Secure Partition Execution State.\n");
2030 		return ret;
2031 	}
2032 
2033 	sp->execution_state = config_32;
2034 
2035 	ret = fdt_read_uint32(sp_manifest, node,
2036 			      "messaging-method", &config_32);
2037 	if (ret != 0) {
2038 		ERROR("Missing Secure Partition messaging method.\n");
2039 		return ret;
2040 	}
2041 
2042 	/* Validate this entry, we currently only support direct messaging. */
2043 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
2044 			  FFA_PARTITION_DIRECT_REQ_SEND |
2045 			  FFA_PARTITION_DIRECT_REQ2_RECV |
2046 			  FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
2047 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
2048 		     config_32);
2049 		return -EINVAL;
2050 	}
2051 
2052 	sp->properties = config_32;
2053 
2054 	ret = fdt_read_uint32(sp_manifest, node,
2055 			      "execution-ctx-count", &config_32);
2056 
2057 	if (ret != 0) {
2058 		ERROR("Missing SP Execution Context Count.\n");
2059 		return ret;
2060 	}
2061 
2062 	/*
2063 	 * Ensure this field is set correctly in the manifest however
2064 	 * since this is currently a hardcoded value for S-EL1 partitions
2065 	 * we don't need to save it here, just validate.
2066 	 */
2067 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
2068 		ERROR("SP Execution Context Count (%u) must be %u.\n",
2069 			config_32, PLATFORM_CORE_COUNT);
2070 		return -EINVAL;
2071 	}
2072 
2073 	/*
2074 	 * Look for the optional fields that are expected to be present in
2075 	 * an SP manifest.
2076 	 */
2077 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
2078 	if (ret != 0) {
2079 		WARN("Missing Secure Partition ID.\n");
2080 	} else {
2081 		if (!is_ffa_secure_id_valid(config_32)) {
2082 			ERROR("Invalid Secure Partition ID (0x%x).\n",
2083 			      config_32);
2084 			return -EINVAL;
2085 		}
2086 		sp->sp_id = config_32;
2087 	}
2088 
2089 	ret = fdt_read_uint32(sp_manifest, node,
2090 			      "power-management-messages", &config_32);
2091 	if (ret != 0) {
2092 		WARN("Missing Power Management Messages entry.\n");
2093 	} else {
2094 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
2095 			ERROR("Power messages not supported for S-EL0 SP\n");
2096 			return -EINVAL;
2097 		}
2098 
2099 		/*
2100 		 * Ensure only the currently supported power messages have
2101 		 * been requested.
2102 		 */
2103 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
2104 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
2105 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
2106 			ERROR("Requested unsupported PM messages (%x)\n",
2107 			      config_32);
2108 			return -EINVAL;
2109 		}
2110 		sp->pwr_mgmt_msgs = config_32;
2111 	}
2112 
2113 	ret = fdt_read_uint32(sp_manifest, node,
2114 			      "gp-register-num", &config_32);
2115 	if (ret != 0) {
2116 		WARN("Missing boot information register.\n");
2117 	} else {
2118 		/* Check if a register number between 0-3 is specified. */
2119 		if (config_32 < 4) {
2120 			*boot_info_reg = config_32;
2121 		} else {
2122 			WARN("Incorrect boot information register (%u).\n",
2123 			     config_32);
2124 		}
2125 	}
2126 
2127 	ret = fdt_read_uint32(sp_manifest, node,
2128 			      "vm-availability-messages", &config_32);
2129 	if (ret != 0) {
2130 		WARN("Missing VM availability messaging.\n");
2131 	} else if ((sp->properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0) {
2132 		ERROR("VM availability messaging requested without "
2133 		      "direct message receive support.\n");
2134 		return -EINVAL;
2135 	} else {
2136 		/* Validate this entry. */
2137 		if ((config_32 & ~(FFA_VM_AVAILABILITY_CREATED |
2138 				  FFA_VM_AVAILABILITY_DESTROYED)) != 0U) {
2139 			WARN("Invalid VM availability messaging (0x%x)\n",
2140 			     config_32);
2141 			return -EINVAL;
2142 		}
2143 
2144 		if ((config_32 & FFA_VM_AVAILABILITY_CREATED) != 0U) {
2145 			sp->properties |= FFA_PARTITION_VM_CREATED;
2146 		}
2147 		if ((config_32 & FFA_VM_AVAILABILITY_DESTROYED) != 0U) {
2148 			sp->properties |= FFA_PARTITION_VM_DESTROYED;
2149 		}
2150 	}
2151 
2152 	return 0;
2153 }
2154 
2155 /*******************************************************************************
2156  * This function gets the Secure Partition Manifest base and maps the manifest
2157  * region.
2158  * Currently only one Secure Partition manifest is considered which is used to
2159  * prepare the context for the single Secure Partition.
2160  ******************************************************************************/
2161 static int find_and_prepare_sp_context(void)
2162 {
2163 	void *sp_manifest;
2164 	uintptr_t manifest_base;
2165 	uintptr_t manifest_base_align __maybe_unused;
2166 	entry_point_info_t *next_image_ep_info;
2167 	int32_t ret, boot_info_reg = -1;
2168 	struct secure_partition_desc *sp;
2169 	struct transfer_list_header *tl __maybe_unused;
2170 	struct transfer_list_entry *te __maybe_unused;
2171 
2172 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2173 	if (next_image_ep_info == NULL) {
2174 		WARN("No Secure Partition image provided by BL2.\n");
2175 		return -ENOENT;
2176 	}
2177 
2178 
2179 #if TRANSFER_LIST && !RESET_TO_BL31
2180 	tl = (struct transfer_list_header *)next_image_ep_info->args.arg3;
2181 	te = transfer_list_find(tl, TL_TAG_DT_FFA_MANIFEST);
2182 	if (te == NULL) {
2183 		WARN("Secure Partition manifest absent.\n");
2184 		return -ENOENT;
2185 	}
2186 
2187 	sp_manifest = (void *)transfer_list_entry_data(te);
2188 	manifest_base = (uintptr_t)sp_manifest;
2189 #else
2190 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2191 	if (sp_manifest == NULL) {
2192 		WARN("Secure Partition manifest absent.\n");
2193 		return -ENOENT;
2194 	}
2195 
2196 	manifest_base = (uintptr_t)sp_manifest;
2197 	manifest_base_align = page_align(manifest_base, DOWN);
2198 
2199 	/*
2200 	 * Map the secure partition manifest region in the EL3 translation
2201 	 * regime.
2202 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2203 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2204 	 * not completely accommodate the secure partition manifest region.
2205 	 */
2206 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2207 				      manifest_base_align,
2208 				      PAGE_SIZE * 2,
2209 				      MT_RO_DATA);
2210 	if (ret != 0) {
2211 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2212 		return ret;
2213 	}
2214 #endif
2215 
2216 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2217 					    "arm,ffa-manifest-1.0");
2218 	if (ret < 0) {
2219 		ERROR("Error happened in SP manifest reading.\n");
2220 		return -EINVAL;
2221 	}
2222 
2223 	/*
2224 	 * Store the size of the manifest so that it can be used later to pass
2225 	 * the manifest as boot information later.
2226 	 */
2227 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2228 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2229 	     next_image_ep_info->args.arg1);
2230 
2231 	/*
2232 	 * Select an SP descriptor for initialising the partition's execution
2233 	 * context on the primary CPU.
2234 	 */
2235 	sp = spmc_get_current_sp_ctx();
2236 
2237 #if SPMC_AT_EL3_SEL0_SP
2238 	/* Assign translation tables context. */
2239 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2240 
2241 #endif /* SPMC_AT_EL3_SEL0_SP */
2242 	/* Initialize entry point information for the SP */
2243 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2244 		       SECURE | EP_ST_ENABLE);
2245 
2246 	/* Parse the SP manifest. */
2247 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2248 				&boot_info_reg);
2249 	if (ret != 0) {
2250 		ERROR("Error in Secure Partition manifest parsing.\n");
2251 		return ret;
2252 	}
2253 
2254 	/* Perform any common initialisation. */
2255 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2256 
2257 	/* Perform any initialisation specific to S-EL1 SPs. */
2258 	if (sp->runtime_el == S_EL1) {
2259 		spmc_el1_sp_setup(sp, next_image_ep_info);
2260 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2261 	}
2262 #if SPMC_AT_EL3_SEL0_SP
2263 	/* Perform any initialisation specific to S-EL0 SPs. */
2264 	else if (sp->runtime_el == S_EL0) {
2265 		/* Setup spsr in endpoint info for common context management routine. */
2266 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2267 
2268 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2269 
2270 		/*
2271 		 * Perform any initialisation specific to S-EL0 not set by common
2272 		 * context management routine.
2273 		 */
2274 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2275 	}
2276 #endif /* SPMC_AT_EL3_SEL0_SP */
2277 	else {
2278 		ERROR("Unexpected runtime EL: %u\n", sp->runtime_el);
2279 		return -EINVAL;
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 /*******************************************************************************
2286  * This function takes an SP context pointer and performs a synchronous entry
2287  * into it.
2288  ******************************************************************************/
2289 static int32_t logical_sp_init(void)
2290 {
2291 	int32_t rc = 0;
2292 	struct el3_lp_desc *el3_lp_descs;
2293 
2294 	/* Perform initial validation of the Logical Partitions. */
2295 	rc = el3_sp_desc_validate();
2296 	if (rc != 0) {
2297 		ERROR("Logical Partition validation failed!\n");
2298 		return rc;
2299 	}
2300 
2301 	el3_lp_descs = get_el3_lp_array();
2302 
2303 	INFO("Logical Secure Partition init start.\n");
2304 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2305 		rc = el3_lp_descs[i].init();
2306 		if (rc != 0) {
2307 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2308 			      el3_lp_descs[i].sp_id);
2309 			return rc;
2310 		}
2311 		VERBOSE("Logical SP (0x%x) Initialized\n",
2312 			      el3_lp_descs[i].sp_id);
2313 	}
2314 
2315 	INFO("Logical Secure Partition init completed.\n");
2316 
2317 	return rc;
2318 }
2319 
2320 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2321 {
2322 	uint64_t rc;
2323 
2324 	assert(ec != NULL);
2325 
2326 	/* Assign the context of the SP to this CPU */
2327 	cm_set_context(&(ec->cpu_ctx), SECURE);
2328 
2329 	/* Restore the context assigned above */
2330 	cm_el1_sysregs_context_restore(SECURE);
2331 	cm_set_next_eret_context(SECURE);
2332 
2333 	/* Invalidate TLBs at EL1. */
2334 	tlbivmalle1();
2335 	dsbish();
2336 
2337 	/* Enter Secure Partition */
2338 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2339 
2340 	/* Save secure state */
2341 	cm_el1_sysregs_context_save(SECURE);
2342 
2343 	return rc;
2344 }
2345 
2346 /*******************************************************************************
2347  * SPMC Helper Functions.
2348  ******************************************************************************/
2349 static int32_t sp_init(void)
2350 {
2351 	uint64_t rc;
2352 	struct secure_partition_desc *sp;
2353 	struct sp_exec_ctx *ec;
2354 
2355 	sp = spmc_get_current_sp_ctx();
2356 	ec = spmc_get_sp_ec(sp);
2357 	ec->rt_model = RT_MODEL_INIT;
2358 	ec->rt_state = RT_STATE_RUNNING;
2359 
2360 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2361 
2362 	rc = spmc_sp_synchronous_entry(ec);
2363 	if (rc != 0) {
2364 		/* Indicate SP init was not successful. */
2365 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2366 		      sp->sp_id, rc);
2367 		return 0;
2368 	}
2369 
2370 	ec->rt_state = RT_STATE_WAITING;
2371 	INFO("Secure Partition initialized.\n");
2372 
2373 	return 1;
2374 }
2375 
2376 static void initalize_sp_descs(void)
2377 {
2378 	struct secure_partition_desc *sp;
2379 
2380 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2381 		sp = &sp_desc[i];
2382 		sp->sp_id = INV_SP_ID;
2383 		sp->mailbox.rx_buffer = NULL;
2384 		sp->mailbox.tx_buffer = NULL;
2385 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2386 		sp->secondary_ep = 0;
2387 	}
2388 }
2389 
2390 static void initalize_ns_ep_descs(void)
2391 {
2392 	struct ns_endpoint_desc *ns_ep;
2393 
2394 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2395 		ns_ep = &ns_ep_desc[i];
2396 		/*
2397 		 * Clashes with the Hypervisor ID but will not be a
2398 		 * problem in practice.
2399 		 */
2400 		ns_ep->ns_ep_id = 0;
2401 		ns_ep->ffa_version = 0;
2402 		ns_ep->mailbox.rx_buffer = NULL;
2403 		ns_ep->mailbox.tx_buffer = NULL;
2404 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2405 	}
2406 }
2407 
2408 /*******************************************************************************
2409  * Initialize SPMC attributes for the SPMD.
2410  ******************************************************************************/
2411 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2412 {
2413 	spmc_attrs->major_version = FFA_VERSION_SPMC_MAJOR;
2414 	spmc_attrs->minor_version = FFA_VERSION_SPMC_MINOR;
2415 	spmc_attrs->exec_state = MODE_RW_64;
2416 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2417 }
2418 
2419 /*******************************************************************************
2420  * Initialize contexts of all Secure Partitions.
2421  ******************************************************************************/
2422 int32_t spmc_setup(void)
2423 {
2424 	int32_t ret;
2425 	uint32_t flags;
2426 
2427 	/* Initialize endpoint descriptors */
2428 	initalize_sp_descs();
2429 	initalize_ns_ep_descs();
2430 
2431 	/*
2432 	 * Retrieve the information of the datastore for tracking shared memory
2433 	 * requests allocated by platform code and zero the region if available.
2434 	 */
2435 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2436 					    &spmc_shmem_obj_state.data_size);
2437 	if (ret != 0) {
2438 		ERROR("Failed to obtain memory descriptor backing store!\n");
2439 		return ret;
2440 	}
2441 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2442 
2443 	/* Setup logical SPs. */
2444 	ret = logical_sp_init();
2445 	if (ret != 0) {
2446 		ERROR("Failed to initialize Logical Partitions.\n");
2447 		return ret;
2448 	}
2449 
2450 	/* Perform physical SP setup. */
2451 
2452 	/* Disable MMU at EL1 (initialized by BL2) */
2453 	disable_mmu_icache_el1();
2454 
2455 	/* Initialize context of the SP */
2456 	INFO("Secure Partition context setup start.\n");
2457 
2458 	ret = find_and_prepare_sp_context();
2459 	if (ret != 0) {
2460 		ERROR("Error in SP finding and context preparation.\n");
2461 		return ret;
2462 	}
2463 
2464 	/* Register power management hooks with PSCI */
2465 	psci_register_spd_pm_hook(&spmc_pm);
2466 
2467 	/*
2468 	 * Register an interrupt handler for S-EL1 interrupts
2469 	 * when generated during code executing in the
2470 	 * non-secure state.
2471 	 */
2472 	flags = 0;
2473 	set_interrupt_rm_flag(flags, NON_SECURE);
2474 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2475 					      spmc_sp_interrupt_handler,
2476 					      flags);
2477 	if (ret != 0) {
2478 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2479 		panic();
2480 	}
2481 
2482 	/* Register init function for deferred init.  */
2483 	bl31_register_bl32_init(&sp_init);
2484 
2485 	INFO("Secure Partition setup done.\n");
2486 
2487 	return 0;
2488 }
2489 
2490 /*******************************************************************************
2491  * Secure Partition Manager SMC handler.
2492  ******************************************************************************/
2493 uint64_t spmc_smc_handler(uint32_t smc_fid,
2494 			  bool secure_origin,
2495 			  uint64_t x1,
2496 			  uint64_t x2,
2497 			  uint64_t x3,
2498 			  uint64_t x4,
2499 			  void *cookie,
2500 			  void *handle,
2501 			  uint64_t flags)
2502 {
2503 	switch (smc_fid) {
2504 
2505 	case FFA_VERSION:
2506 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2507 					   x4, cookie, handle, flags);
2508 
2509 	case FFA_SPM_ID_GET:
2510 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2511 					     x3, x4, cookie, handle, flags);
2512 
2513 	case FFA_ID_GET:
2514 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2515 					  x4, cookie, handle, flags);
2516 
2517 	case FFA_FEATURES:
2518 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2519 					    x4, cookie, handle, flags);
2520 
2521 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2522 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2523 						   x2, x3, x4, cookie, handle,
2524 						   flags);
2525 
2526 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2527 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2528 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
2529 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2530 					      x3, x4, cookie, handle, flags);
2531 
2532 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2533 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2534 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
2535 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2536 					       x3, x4, cookie, handle, flags);
2537 
2538 	case FFA_RXTX_MAP_SMC32:
2539 	case FFA_RXTX_MAP_SMC64:
2540 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2541 					cookie, handle, flags);
2542 
2543 	case FFA_RXTX_UNMAP:
2544 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2545 					  x4, cookie, handle, flags);
2546 
2547 	case FFA_PARTITION_INFO_GET:
2548 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2549 						  x2, x3, x4, cookie, handle,
2550 						  flags);
2551 
2552 	case FFA_RX_RELEASE:
2553 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2554 					  x4, cookie, handle, flags);
2555 
2556 	case FFA_MSG_WAIT:
2557 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2558 					cookie, handle, flags);
2559 
2560 	case FFA_ERROR:
2561 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2562 					cookie, handle, flags);
2563 
2564 	case FFA_MSG_RUN:
2565 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2566 				       cookie, handle, flags);
2567 
2568 	case FFA_MEM_SHARE_SMC32:
2569 	case FFA_MEM_SHARE_SMC64:
2570 	case FFA_MEM_LEND_SMC32:
2571 	case FFA_MEM_LEND_SMC64:
2572 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2573 					 cookie, handle, flags);
2574 
2575 	case FFA_MEM_FRAG_TX:
2576 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2577 					    x4, cookie, handle, flags);
2578 
2579 	case FFA_MEM_FRAG_RX:
2580 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2581 					    x4, cookie, handle, flags);
2582 
2583 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2584 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2585 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2586 						 x3, x4, cookie, handle, flags);
2587 
2588 	case FFA_MEM_RELINQUISH:
2589 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2590 					       x3, x4, cookie, handle, flags);
2591 
2592 	case FFA_MEM_RECLAIM:
2593 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2594 						x4, cookie, handle, flags);
2595 	case FFA_CONSOLE_LOG_SMC32:
2596 	case FFA_CONSOLE_LOG_SMC64:
2597 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2598 						x4, cookie, handle, flags);
2599 
2600 	case FFA_MEM_PERM_GET_SMC32:
2601 	case FFA_MEM_PERM_GET_SMC64:
2602 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2603 						x3, x4, cookie, handle, flags);
2604 
2605 	case FFA_MEM_PERM_SET_SMC32:
2606 	case FFA_MEM_PERM_SET_SMC64:
2607 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2608 						x3, x4, cookie, handle, flags);
2609 
2610 	default:
2611 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2612 		break;
2613 	}
2614 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2615 }
2616 
2617 /*******************************************************************************
2618  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2619  * validates the interrupt and upon success arranges entry into the SP for
2620  * handling the interrupt.
2621  ******************************************************************************/
2622 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2623 					  uint32_t flags,
2624 					  void *handle,
2625 					  void *cookie)
2626 {
2627 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2628 	struct sp_exec_ctx *ec;
2629 	uint32_t linear_id = plat_my_core_pos();
2630 
2631 	/* Sanity check for a NULL pointer dereference. */
2632 	assert(sp != NULL);
2633 
2634 	/* Check the security state when the exception was generated. */
2635 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2636 
2637 	/* Panic if not an S-EL1 Partition. */
2638 	if (sp->runtime_el != S_EL1) {
2639 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2640 		      linear_id);
2641 		panic();
2642 	}
2643 
2644 	/* Obtain a reference to the SP execution context. */
2645 	ec = spmc_get_sp_ec(sp);
2646 
2647 	/* Ensure that the execution context is in waiting state else panic. */
2648 	if (ec->rt_state != RT_STATE_WAITING) {
2649 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2650 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2651 		panic();
2652 	}
2653 
2654 	/* Update the runtime model and state of the partition. */
2655 	ec->rt_model = RT_MODEL_INTR;
2656 	ec->rt_state = RT_STATE_RUNNING;
2657 
2658 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2659 
2660 	/*
2661 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2662 	 * populated as the SP can determine this by itself.
2663 	 * The flags field is forced to 0 mainly to pass the SVE hint bit
2664 	 * cleared for consumption by the lower EL.
2665 	 */
2666 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2667 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2668 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2669 				     handle, 0ULL, sp->ffa_version);
2670 }
2671