xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision fa0df1bd76b176f7832031c1fa3a0044aacf4e37)
1 /*
2  * Copyright (c) 2022-2025, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 
32 #include <platform_def.h>
33 
34 /* FFA_MEM_PERM_* helpers */
35 #define FFA_MEM_PERM_MASK		U(7)
36 #define FFA_MEM_PERM_DATA_MASK		U(3)
37 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
38 #define FFA_MEM_PERM_DATA_NA		U(0)
39 #define FFA_MEM_PERM_DATA_RW		U(1)
40 #define FFA_MEM_PERM_DATA_RES		U(2)
41 #define FFA_MEM_PERM_DATA_RO		U(3)
42 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
43 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
44 
45 /* Declare the maximum number of SPs and El3 LPs. */
46 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
47 
48 /*
49  * Allocate a secure partition descriptor to describe each SP in the system that
50  * does not reside at EL3.
51  */
52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
53 
54 /*
55  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
56  * the system that interacts with a SP. It is used to track the Hypervisor
57  * buffer pair, version and ID for now. It could be extended to track VM
58  * properties when the SPMC supports indirect messaging.
59  */
60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
61 
62 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
63 					  uint32_t flags,
64 					  void *handle,
65 					  void *cookie);
66 
67 /*
68  * Helper function to obtain the array storing the EL3
69  * Logical Partition descriptors.
70  */
71 struct el3_lp_desc *get_el3_lp_array(void)
72 {
73 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
74 }
75 
76 /*
77  * Helper function to obtain the descriptor of the last SP to whom control was
78  * handed to on this physical cpu. Currently, we assume there is only one SP.
79  * TODO: Expand to track multiple partitions when required.
80  */
81 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
82 {
83 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
84 }
85 
86 /*
87  * Helper function to obtain the execution context of an SP on the
88  * current physical cpu.
89  */
90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
91 {
92 	return &(sp->ec[get_ec_index(sp)]);
93 }
94 
95 /* Helper function to get pointer to SP context from its ID. */
96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
97 {
98 	/* Check for Secure World Partitions. */
99 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
100 		if (sp_desc[i].sp_id == id) {
101 			return &(sp_desc[i]);
102 		}
103 	}
104 	return NULL;
105 }
106 
107 /*
108  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
109  * We assume that the first descriptor is reserved for this entity.
110  */
111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
112 {
113 	return &(ns_ep_desc[0]);
114 }
115 
116 /*
117  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
118  * or OS kernel in the normal world or the last SP that was run.
119  */
120 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
121 {
122 	/* Obtain the RX/TX buffer pair descriptor. */
123 	if (secure_origin) {
124 		return &(spmc_get_current_sp_ctx()->mailbox);
125 	} else {
126 		return &(spmc_get_hyp_ctx()->mailbox);
127 	}
128 }
129 
130 /******************************************************************************
131  * This function returns to the place where spmc_sp_synchronous_entry() was
132  * called originally.
133  ******************************************************************************/
134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
135 {
136 	/*
137 	 * The SPM must have initiated the original request through a
138 	 * synchronous entry into the secure partition. Jump back to the
139 	 * original C runtime context with the value of rc in x0;
140 	 */
141 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
142 
143 	panic();
144 }
145 
146 /*******************************************************************************
147  * Return FFA_ERROR with specified error code.
148  ******************************************************************************/
149 uint64_t spmc_ffa_error_return(void *handle, int error_code)
150 {
151 	SMC_RET8(handle, FFA_ERROR,
152 		 FFA_TARGET_INFO_MBZ, error_code,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
154 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
155 }
156 
157 /******************************************************************************
158  * Helper function to validate a secure partition ID to ensure it does not
159  * conflict with any other FF-A component and follows the convention to
160  * indicate it resides within the secure world.
161  ******************************************************************************/
162 bool is_ffa_secure_id_valid(uint16_t partition_id)
163 {
164 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
165 
166 	/* Ensure the ID is not the invalid partition ID. */
167 	if (partition_id == INV_SP_ID) {
168 		return false;
169 	}
170 
171 	/* Ensure the ID is not the SPMD ID. */
172 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
173 		return false;
174 	}
175 
176 	/*
177 	 * Ensure the ID follows the convention to indicate it resides
178 	 * in the secure world.
179 	 */
180 	if (!ffa_is_secure_world_id(partition_id)) {
181 		return false;
182 	}
183 
184 	/* Ensure we don't conflict with the SPMC partition ID. */
185 	if (partition_id == FFA_SPMC_ID) {
186 		return false;
187 	}
188 
189 	/* Ensure we do not already have an SP context with this ID. */
190 	if (spmc_get_sp_ctx(partition_id)) {
191 		return false;
192 	}
193 
194 	/* Ensure we don't clash with any Logical SP's. */
195 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
196 		if (el3_lp_descs[i].sp_id == partition_id) {
197 			return false;
198 		}
199 	}
200 
201 	return true;
202 }
203 
204 /*******************************************************************************
205  * This function either forwards the request to the other world or returns
206  * with an ERET depending on the source of the call.
207  * We can assume that the destination is for an entity at a lower exception
208  * level as any messages destined for a logical SP resident in EL3 will have
209  * already been taken care of by the SPMC before entering this function.
210  ******************************************************************************/
211 static uint64_t spmc_smc_return(uint32_t smc_fid,
212 				bool secure_origin,
213 				uint64_t x1,
214 				uint64_t x2,
215 				uint64_t x3,
216 				uint64_t x4,
217 				void *handle,
218 				void *cookie,
219 				uint64_t flags,
220 				uint16_t dst_id,
221 				uint32_t sp_ffa_version)
222 {
223 	/* If the destination is in the normal world always go via the SPMD. */
224 	if (ffa_is_normal_world_id(dst_id)) {
225 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
226 					cookie, handle, flags, sp_ffa_version);
227 	}
228 	/*
229 	 * If the caller is secure and we want to return to the secure world,
230 	 * ERET directly.
231 	 */
232 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
233 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
234 	}
235 	/* If we originated in the normal world then switch contexts. */
236 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
237 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
238 					     x3, x4, handle, flags, sp_ffa_version);
239 	} else {
240 		/* Unknown State. */
241 		panic();
242 	}
243 
244 	/* Shouldn't be Reached. */
245 	return 0;
246 }
247 
248 /*******************************************************************************
249  * FF-A ABI Handlers.
250  ******************************************************************************/
251 
252 /*******************************************************************************
253  * Helper function to validate arg2 as part of a direct message.
254  ******************************************************************************/
255 static inline bool direct_msg_validate_arg2(uint64_t x2)
256 {
257 	/* Check message type. */
258 	if (x2 & FFA_FWK_MSG_BIT) {
259 		/* We have a framework message, ensure it is a known message. */
260 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
261 			VERBOSE("Invalid message format 0x%lx.\n", x2);
262 			return false;
263 		}
264 	} else {
265 		/* We have a partition messages, ensure x2 is not set. */
266 		if (x2 != (uint64_t) 0) {
267 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
268 				x2);
269 			return false;
270 		}
271 	}
272 	return true;
273 }
274 
275 /*******************************************************************************
276  * Helper function to validate the destination ID of a direct response.
277  ******************************************************************************/
278 static bool direct_msg_validate_dst_id(uint16_t dst_id)
279 {
280 	struct secure_partition_desc *sp;
281 
282 	/* Check if we're targeting a normal world partition. */
283 	if (ffa_is_normal_world_id(dst_id)) {
284 		return true;
285 	}
286 
287 	/* Or directed to the SPMC itself.*/
288 	if (dst_id == FFA_SPMC_ID) {
289 		return true;
290 	}
291 
292 	/* Otherwise ensure the SP exists. */
293 	sp = spmc_get_sp_ctx(dst_id);
294 	if (sp != NULL) {
295 		return true;
296 	}
297 
298 	return false;
299 }
300 
301 /*******************************************************************************
302  * Helper function to validate the response from a Logical Partition.
303  ******************************************************************************/
304 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
305 					void *handle)
306 {
307 	/* Retrieve populated Direct Response Arguments. */
308 	uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0);
309 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
310 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
311 	uint16_t src_id = ffa_endpoint_source(x1);
312 	uint16_t dst_id = ffa_endpoint_destination(x1);
313 
314 	if (src_id != lp_id) {
315 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
316 		return false;
317 	}
318 
319 	/*
320 	 * Check the destination ID is valid and ensure the LP is responding to
321 	 * the original request.
322 	 */
323 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
324 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
325 		return false;
326 	}
327 
328 	if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) &&
329 			!direct_msg_validate_arg2(x2)) {
330 		ERROR("Invalid EL3 LP message encoding.\n");
331 		return false;
332 	}
333 	return true;
334 }
335 
336 /*******************************************************************************
337  * Helper function to check that partition can receive direct msg or not.
338  ******************************************************************************/
339 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum)
340 {
341 	if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ &&
342 			((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) ||
343 			(dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 &&
344 			((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) {
345 		return false;
346 	}
347 
348 	return true;
349 }
350 
351 /*******************************************************************************
352  * Helper function to obtain the FF-A version of the calling partition.
353  ******************************************************************************/
354 uint32_t get_partition_ffa_version(bool secure_origin)
355 {
356 	if (secure_origin) {
357 		return spmc_get_current_sp_ctx()->ffa_version;
358 	} else {
359 		return spmc_get_hyp_ctx()->ffa_version;
360 	}
361 }
362 
363 /*******************************************************************************
364  * Handle direct request messages and route to the appropriate destination.
365  ******************************************************************************/
366 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
367 				       bool secure_origin,
368 				       uint64_t x1,
369 				       uint64_t x2,
370 				       uint64_t x3,
371 				       uint64_t x4,
372 				       void *cookie,
373 				       void *handle,
374 				       uint64_t flags)
375 {
376 	uint16_t src_id = ffa_endpoint_source(x1);
377 	uint16_t dst_id = ffa_endpoint_destination(x1);
378 	uint16_t dir_req_funcid;
379 	struct el3_lp_desc *el3_lp_descs;
380 	struct secure_partition_desc *sp;
381 	unsigned int idx;
382 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
383 
384 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ?
385 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
386 
387 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ2) &&
388 			ffa_version < MAKE_FFA_VERSION(U(1), U(2))) {
389 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
390 	}
391 
392 	/*
393 	 * Sanity check for DIRECT_REQ:
394 	 * Check if arg2 has been populated correctly based on message type
395 	 */
396 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) &&
397 			!direct_msg_validate_arg2(x2)) {
398 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
399 	}
400 
401 	/* Validate Sender is either the current SP or from the normal world. */
402 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
403 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
404 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
405 		return spmc_ffa_error_return(handle,
406 					FFA_ERROR_INVALID_PARAMETER);
407 	}
408 
409 	el3_lp_descs = get_el3_lp_array();
410 
411 	/* Check if the request is destined for a Logical Partition. */
412 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
413 		if (el3_lp_descs[i].sp_id == dst_id) {
414 			if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) {
415 				return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
416 			}
417 
418 			uint64_t ret = el3_lp_descs[i].direct_req(
419 						smc_fid, secure_origin, x1, x2,
420 						x3, x4, cookie, handle, flags);
421 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
422 							 handle)) {
423 				panic();
424 			}
425 
426 			/* Message checks out. */
427 			return ret;
428 		}
429 	}
430 
431 	/*
432 	 * If the request was not targeted to a LSP and from the secure world
433 	 * then it is invalid since a SP cannot call into the Normal world and
434 	 * there is no other SP to call into. If there are other SPs in future
435 	 * then the partition runtime model would need to be validated as well.
436 	 */
437 	if (secure_origin) {
438 		VERBOSE("Direct request not supported to the Normal World.\n");
439 		return spmc_ffa_error_return(handle,
440 					     FFA_ERROR_INVALID_PARAMETER);
441 	}
442 
443 	/* Check if the SP ID is valid. */
444 	sp = spmc_get_sp_ctx(dst_id);
445 	if (sp == NULL) {
446 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
447 			dst_id);
448 		return spmc_ffa_error_return(handle,
449 					     FFA_ERROR_INVALID_PARAMETER);
450 	}
451 
452 	if (!direct_msg_receivable(sp->properties, dir_req_funcid)) {
453 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
454 	}
455 
456 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
457 	if (sp->runtime_el == S_EL0) {
458 		spin_lock(&sp->rt_state_lock);
459 	}
460 
461 	/*
462 	 * Check that the target execution context is in a waiting state before
463 	 * forwarding the direct request to it.
464 	 */
465 	idx = get_ec_index(sp);
466 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
467 		VERBOSE("SP context on core%u is not waiting (%u).\n",
468 			idx, sp->ec[idx].rt_model);
469 
470 		if (sp->runtime_el == S_EL0) {
471 			spin_unlock(&sp->rt_state_lock);
472 		}
473 
474 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
475 	}
476 
477 	/*
478 	 * Everything checks out so forward the request to the SP after updating
479 	 * its state and runtime model.
480 	 */
481 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
482 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
483 	sp->ec[idx].dir_req_origin_id = src_id;
484 	sp->ec[idx].dir_req_funcid = dir_req_funcid;
485 
486 	if (sp->runtime_el == S_EL0) {
487 		spin_unlock(&sp->rt_state_lock);
488 	}
489 
490 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
491 			       handle, cookie, flags, dst_id, sp->ffa_version);
492 }
493 
494 /*******************************************************************************
495  * Handle direct response messages and route to the appropriate destination.
496  ******************************************************************************/
497 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
498 					bool secure_origin,
499 					uint64_t x1,
500 					uint64_t x2,
501 					uint64_t x3,
502 					uint64_t x4,
503 					void *cookie,
504 					void *handle,
505 					uint64_t flags)
506 {
507 	uint16_t dst_id = ffa_endpoint_destination(x1);
508 	uint16_t dir_req_funcid;
509 	struct secure_partition_desc *sp;
510 	unsigned int idx;
511 
512 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ?
513 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
514 
515 	/* Check if arg2 has been populated correctly based on message type. */
516 	if (!direct_msg_validate_arg2(x2)) {
517 		return spmc_ffa_error_return(handle,
518 					     FFA_ERROR_INVALID_PARAMETER);
519 	}
520 
521 	/* Check that the response did not originate from the Normal world. */
522 	if (!secure_origin) {
523 		VERBOSE("Direct Response not supported from Normal World.\n");
524 		return spmc_ffa_error_return(handle,
525 					     FFA_ERROR_INVALID_PARAMETER);
526 	}
527 
528 	/*
529 	 * Check that the response is either targeted to the Normal world or the
530 	 * SPMC e.g. a PM response.
531 	 */
532 	if (!direct_msg_validate_dst_id(dst_id)) {
533 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
534 			dst_id);
535 		return spmc_ffa_error_return(handle,
536 					     FFA_ERROR_INVALID_PARAMETER);
537 	}
538 
539 	/* Obtain the SP descriptor and update its runtime state. */
540 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
541 	if (sp == NULL) {
542 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
543 			dst_id);
544 		return spmc_ffa_error_return(handle,
545 					     FFA_ERROR_INVALID_PARAMETER);
546 	}
547 
548 	if (sp->runtime_el == S_EL0) {
549 		spin_lock(&sp->rt_state_lock);
550 	}
551 
552 	/* Sanity check state is being tracked correctly in the SPMC. */
553 	idx = get_ec_index(sp);
554 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
555 
556 	/* Ensure SP execution context was in the right runtime model. */
557 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
558 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
559 			idx, sp->ec[idx].rt_model);
560 		if (sp->runtime_el == S_EL0) {
561 			spin_unlock(&sp->rt_state_lock);
562 		}
563 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
564 	}
565 
566 	if (dir_req_funcid != sp->ec[idx].dir_req_funcid) {
567 		WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n",
568 		     sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx);
569 		if (sp->runtime_el == S_EL0) {
570 			spin_unlock(&sp->rt_state_lock);
571 		}
572 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
573 	}
574 
575 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
576 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
577 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
578 		if (sp->runtime_el == S_EL0) {
579 			spin_unlock(&sp->rt_state_lock);
580 		}
581 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
582 	}
583 
584 	/* Update the state of the SP execution context. */
585 	sp->ec[idx].rt_state = RT_STATE_WAITING;
586 
587 	/* Clear the ongoing direct request ID. */
588 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
589 
590 	/* Clear the ongoing direct request message version. */
591 	sp->ec[idx].dir_req_funcid = 0U;
592 
593 	if (sp->runtime_el == S_EL0) {
594 		spin_unlock(&sp->rt_state_lock);
595 	}
596 
597 	/*
598 	 * If the receiver is not the SPMC then forward the response to the
599 	 * Normal world.
600 	 */
601 	if (dst_id == FFA_SPMC_ID) {
602 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
603 		/* Should not get here. */
604 		panic();
605 	}
606 
607 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
608 			       handle, cookie, flags, dst_id, sp->ffa_version);
609 }
610 
611 /*******************************************************************************
612  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
613  * cycles.
614  ******************************************************************************/
615 static uint64_t msg_wait_handler(uint32_t smc_fid,
616 				 bool secure_origin,
617 				 uint64_t x1,
618 				 uint64_t x2,
619 				 uint64_t x3,
620 				 uint64_t x4,
621 				 void *cookie,
622 				 void *handle,
623 				 uint64_t flags)
624 {
625 	struct secure_partition_desc *sp;
626 	unsigned int idx;
627 
628 	/*
629 	 * Check that the response did not originate from the Normal world as
630 	 * only the secure world can call this ABI.
631 	 */
632 	if (!secure_origin) {
633 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
634 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
635 	}
636 
637 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
638 	sp = spmc_get_current_sp_ctx();
639 	if (sp == NULL) {
640 		return spmc_ffa_error_return(handle,
641 					     FFA_ERROR_INVALID_PARAMETER);
642 	}
643 
644 	/*
645 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
646 	 */
647 	idx = get_ec_index(sp);
648 	if (sp->runtime_el == S_EL0) {
649 		spin_lock(&sp->rt_state_lock);
650 	}
651 
652 	/* Ensure SP execution context was in the right runtime model. */
653 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
654 		if (sp->runtime_el == S_EL0) {
655 			spin_unlock(&sp->rt_state_lock);
656 		}
657 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
658 	}
659 
660 	/* Sanity check the state is being tracked correctly in the SPMC. */
661 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
662 
663 	/*
664 	 * Perform a synchronous exit if the partition was initialising. The
665 	 * state is updated after the exit.
666 	 */
667 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
668 		if (sp->runtime_el == S_EL0) {
669 			spin_unlock(&sp->rt_state_lock);
670 		}
671 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
672 		/* Should not get here */
673 		panic();
674 	}
675 
676 	/* Update the state of the SP execution context. */
677 	sp->ec[idx].rt_state = RT_STATE_WAITING;
678 
679 	/* Resume normal world if a secure interrupt was handled. */
680 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
681 		if (sp->runtime_el == S_EL0) {
682 			spin_unlock(&sp->rt_state_lock);
683 		}
684 
685 		return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
686 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
687 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
688 					     handle, flags, sp->ffa_version);
689 	}
690 
691 	/* Protect the runtime state of a S-EL0 SP with a lock. */
692 	if (sp->runtime_el == S_EL0) {
693 		spin_unlock(&sp->rt_state_lock);
694 	}
695 
696 	/* Forward the response to the Normal world. */
697 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
698 			       handle, cookie, flags, FFA_NWD_ID, sp->ffa_version);
699 }
700 
701 static uint64_t ffa_error_handler(uint32_t smc_fid,
702 				 bool secure_origin,
703 				 uint64_t x1,
704 				 uint64_t x2,
705 				 uint64_t x3,
706 				 uint64_t x4,
707 				 void *cookie,
708 				 void *handle,
709 				 uint64_t flags)
710 {
711 	struct secure_partition_desc *sp;
712 	unsigned int idx;
713 	uint16_t dst_id = ffa_endpoint_destination(x1);
714 	bool cancel_dir_req = false;
715 
716 	/* Check that the response did not originate from the Normal world. */
717 	if (!secure_origin) {
718 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
719 	}
720 
721 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
722 	sp = spmc_get_current_sp_ctx();
723 	if (sp == NULL) {
724 		return spmc_ffa_error_return(handle,
725 					     FFA_ERROR_INVALID_PARAMETER);
726 	}
727 
728 	/* Get the execution context of the SP that invoked FFA_ERROR. */
729 	idx = get_ec_index(sp);
730 
731 	/*
732 	 * We only expect FFA_ERROR to be received during SP initialisation
733 	 * otherwise this is an invalid call.
734 	 */
735 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
736 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
737 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
738 		/* Should not get here. */
739 		panic();
740 	}
741 
742 	if (sp->runtime_el == S_EL0) {
743 		spin_lock(&sp->rt_state_lock);
744 	}
745 
746 	if (sp->ec[idx].rt_state == RT_STATE_RUNNING &&
747 			sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
748 		sp->ec[idx].rt_state = RT_STATE_WAITING;
749 		sp->ec[idx].dir_req_origin_id = INV_SP_ID;
750 		sp->ec[idx].dir_req_funcid = 0x00;
751 		cancel_dir_req = true;
752 	}
753 
754 	if (sp->runtime_el == S_EL0) {
755 		spin_unlock(&sp->rt_state_lock);
756 	}
757 
758 	if (cancel_dir_req) {
759 		if (dst_id == FFA_SPMC_ID) {
760 			spmc_sp_synchronous_exit(&sp->ec[idx], x4);
761 			/* Should not get here. */
762 			panic();
763 		} else
764 			return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
765 					       handle, cookie, flags, dst_id, sp->ffa_version);
766 	}
767 
768 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
769 }
770 
771 static uint64_t ffa_version_handler(uint32_t smc_fid,
772 				    bool secure_origin,
773 				    uint64_t x1,
774 				    uint64_t x2,
775 				    uint64_t x3,
776 				    uint64_t x4,
777 				    void *cookie,
778 				    void *handle,
779 				    uint64_t flags)
780 {
781 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
782 
783 	if (requested_version & FFA_VERSION_BIT31_MASK) {
784 		/* Invalid encoding, return an error. */
785 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
786 		/* Execution stops here. */
787 	}
788 
789 	/* Determine the caller to store the requested version. */
790 	if (secure_origin) {
791 		/*
792 		 * Ensure that the SP is reporting the same version as
793 		 * specified in its manifest. If these do not match there is
794 		 * something wrong with the SP.
795 		 * TODO: Should we abort the SP? For now assert this is not
796 		 *       case.
797 		 */
798 		assert(requested_version ==
799 		       spmc_get_current_sp_ctx()->ffa_version);
800 	} else {
801 		/*
802 		 * If this is called by the normal world, record this
803 		 * information in its descriptor.
804 		 */
805 		spmc_get_hyp_ctx()->ffa_version = requested_version;
806 	}
807 
808 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
809 					  FFA_VERSION_MINOR));
810 }
811 
812 static uint64_t rxtx_map_handler(uint32_t smc_fid,
813 				 bool secure_origin,
814 				 uint64_t x1,
815 				 uint64_t x2,
816 				 uint64_t x3,
817 				 uint64_t x4,
818 				 void *cookie,
819 				 void *handle,
820 				 uint64_t flags)
821 {
822 	int ret;
823 	uint32_t error_code;
824 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
825 	struct mailbox *mbox;
826 	uintptr_t tx_address = x1;
827 	uintptr_t rx_address = x2;
828 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
829 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
830 
831 	/*
832 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
833 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
834 	 * ABI on behalf of a VM and reject it if this is the case.
835 	 */
836 	if (tx_address == 0 || rx_address == 0) {
837 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
838 		return spmc_ffa_error_return(handle,
839 					     FFA_ERROR_INVALID_PARAMETER);
840 	}
841 
842 	/* Ensure the specified buffers are not the same. */
843 	if (tx_address == rx_address) {
844 		WARN("TX Buffer must not be the same as RX Buffer.\n");
845 		return spmc_ffa_error_return(handle,
846 					     FFA_ERROR_INVALID_PARAMETER);
847 	}
848 
849 	/* Ensure the buffer size is not 0. */
850 	if (buf_size == 0U) {
851 		WARN("Buffer size must not be 0\n");
852 		return spmc_ffa_error_return(handle,
853 					     FFA_ERROR_INVALID_PARAMETER);
854 	}
855 
856 	/*
857 	 * Ensure the buffer size is a multiple of the translation granule size
858 	 * in TF-A.
859 	 */
860 	if (buf_size % PAGE_SIZE != 0U) {
861 		WARN("Buffer size must be aligned to translation granule.\n");
862 		return spmc_ffa_error_return(handle,
863 					     FFA_ERROR_INVALID_PARAMETER);
864 	}
865 
866 	/* Obtain the RX/TX buffer pair descriptor. */
867 	mbox = spmc_get_mbox_desc(secure_origin);
868 
869 	spin_lock(&mbox->lock);
870 
871 	/* Check if buffers have already been mapped. */
872 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
873 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
874 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
875 		error_code = FFA_ERROR_DENIED;
876 		goto err;
877 	}
878 
879 	/* memmap the TX buffer as read only. */
880 	ret = mmap_add_dynamic_region(tx_address, /* PA */
881 			tx_address, /* VA */
882 			buf_size, /* size */
883 			mem_atts | MT_RO_DATA); /* attrs */
884 	if (ret != 0) {
885 		/* Return the correct error code. */
886 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
887 						FFA_ERROR_INVALID_PARAMETER;
888 		WARN("Unable to map TX buffer: %d\n", error_code);
889 		goto err;
890 	}
891 
892 	/* memmap the RX buffer as read write. */
893 	ret = mmap_add_dynamic_region(rx_address, /* PA */
894 			rx_address, /* VA */
895 			buf_size, /* size */
896 			mem_atts | MT_RW_DATA); /* attrs */
897 
898 	if (ret != 0) {
899 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
900 						FFA_ERROR_INVALID_PARAMETER;
901 		WARN("Unable to map RX buffer: %d\n", error_code);
902 		/* Unmap the TX buffer again. */
903 		mmap_remove_dynamic_region(tx_address, buf_size);
904 		goto err;
905 	}
906 
907 	mbox->tx_buffer = (void *) tx_address;
908 	mbox->rx_buffer = (void *) rx_address;
909 	mbox->rxtx_page_count = page_count;
910 	spin_unlock(&mbox->lock);
911 
912 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
913 	/* Execution stops here. */
914 err:
915 	spin_unlock(&mbox->lock);
916 	return spmc_ffa_error_return(handle, error_code);
917 }
918 
919 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
920 				   bool secure_origin,
921 				   uint64_t x1,
922 				   uint64_t x2,
923 				   uint64_t x3,
924 				   uint64_t x4,
925 				   void *cookie,
926 				   void *handle,
927 				   uint64_t flags)
928 {
929 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
930 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
931 
932 	/*
933 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
934 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
935 	 * ABI on behalf of a VM and reject it if this is the case.
936 	 */
937 	if (x1 != 0UL) {
938 		return spmc_ffa_error_return(handle,
939 					     FFA_ERROR_INVALID_PARAMETER);
940 	}
941 
942 	spin_lock(&mbox->lock);
943 
944 	/* Check if buffers are currently mapped. */
945 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
946 		spin_unlock(&mbox->lock);
947 		return spmc_ffa_error_return(handle,
948 					     FFA_ERROR_INVALID_PARAMETER);
949 	}
950 
951 	/* Unmap RX Buffer */
952 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
953 				       buf_size) != 0) {
954 		WARN("Unable to unmap RX buffer!\n");
955 	}
956 
957 	mbox->rx_buffer = 0;
958 
959 	/* Unmap TX Buffer */
960 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
961 				       buf_size) != 0) {
962 		WARN("Unable to unmap TX buffer!\n");
963 	}
964 
965 	mbox->tx_buffer = 0;
966 	mbox->rxtx_page_count = 0;
967 
968 	spin_unlock(&mbox->lock);
969 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
970 }
971 
972 /*
973  * Helper function to populate the properties field of a Partition Info Get
974  * descriptor.
975  */
976 static uint32_t
977 partition_info_get_populate_properties(uint32_t sp_properties,
978 				       enum sp_execution_state sp_ec_state)
979 {
980 	uint32_t properties = sp_properties;
981 	uint32_t ec_state;
982 
983 	/* Determine the execution state of the SP. */
984 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
985 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
986 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
987 
988 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
989 
990 	return properties;
991 }
992 
993 /*
994  * Collate the partition information in a v1.1 partition information
995  * descriptor format, this will be converter later if required.
996  */
997 static int partition_info_get_handler_v1_1(uint32_t *uuid,
998 					   struct ffa_partition_info_v1_1
999 						  *partitions,
1000 					   uint32_t max_partitions,
1001 					   uint32_t *partition_count)
1002 {
1003 	uint32_t index;
1004 	struct ffa_partition_info_v1_1 *desc;
1005 	bool null_uuid = is_null_uuid(uuid);
1006 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1007 
1008 	/* Deal with Logical Partitions. */
1009 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1010 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
1011 			/* Found a matching UUID, populate appropriately. */
1012 			if (*partition_count >= max_partitions) {
1013 				return FFA_ERROR_NO_MEMORY;
1014 			}
1015 
1016 			desc = &partitions[*partition_count];
1017 			desc->ep_id = el3_lp_descs[index].sp_id;
1018 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1019 			/* LSPs must be AArch64. */
1020 			desc->properties =
1021 				partition_info_get_populate_properties(
1022 					el3_lp_descs[index].properties,
1023 					SP_STATE_AARCH64);
1024 
1025 			if (null_uuid) {
1026 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
1027 			}
1028 			(*partition_count)++;
1029 		}
1030 	}
1031 
1032 	/* Deal with physical SP's. */
1033 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1034 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1035 			/* Found a matching UUID, populate appropriately. */
1036 			if (*partition_count >= max_partitions) {
1037 				return FFA_ERROR_NO_MEMORY;
1038 			}
1039 
1040 			desc = &partitions[*partition_count];
1041 			desc->ep_id = sp_desc[index].sp_id;
1042 			/*
1043 			 * Execution context count must match No. cores for
1044 			 * S-EL1 SPs.
1045 			 */
1046 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1047 			desc->properties =
1048 				partition_info_get_populate_properties(
1049 					sp_desc[index].properties,
1050 					sp_desc[index].execution_state);
1051 
1052 			if (null_uuid) {
1053 				copy_uuid(desc->uuid, sp_desc[index].uuid);
1054 			}
1055 			(*partition_count)++;
1056 		}
1057 	}
1058 	return 0;
1059 }
1060 
1061 /*
1062  * Handle the case where that caller only wants the count of partitions
1063  * matching a given UUID and does not want the corresponding descriptors
1064  * populated.
1065  */
1066 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
1067 {
1068 	uint32_t index = 0;
1069 	uint32_t partition_count = 0;
1070 	bool null_uuid = is_null_uuid(uuid);
1071 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1072 
1073 	/* Deal with Logical Partitions. */
1074 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1075 		if (null_uuid ||
1076 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
1077 			(partition_count)++;
1078 		}
1079 	}
1080 
1081 	/* Deal with physical SP's. */
1082 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1083 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1084 			(partition_count)++;
1085 		}
1086 	}
1087 	return partition_count;
1088 }
1089 
1090 /*
1091  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1092  * the corresponding descriptor format from the v1.1 descriptor array.
1093  */
1094 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1095 					     *partitions,
1096 					     struct mailbox *mbox,
1097 					     int partition_count)
1098 {
1099 	uint32_t index;
1100 	uint32_t buf_size;
1101 	uint32_t descriptor_size;
1102 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1103 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1104 
1105 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1106 	descriptor_size = partition_count *
1107 			  sizeof(struct ffa_partition_info_v1_0);
1108 
1109 	if (descriptor_size > buf_size) {
1110 		return FFA_ERROR_NO_MEMORY;
1111 	}
1112 
1113 	for (index = 0U; index < partition_count; index++) {
1114 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1115 		v1_0_partitions[index].execution_ctx_count =
1116 			partitions[index].execution_ctx_count;
1117 		/* Only report v1.0 properties. */
1118 		v1_0_partitions[index].properties =
1119 			(partitions[index].properties &
1120 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1121 	}
1122 	return 0;
1123 }
1124 
1125 /*
1126  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1127  * v1.0 implementations.
1128  */
1129 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1130 					   bool secure_origin,
1131 					   uint64_t x1,
1132 					   uint64_t x2,
1133 					   uint64_t x3,
1134 					   uint64_t x4,
1135 					   void *cookie,
1136 					   void *handle,
1137 					   uint64_t flags)
1138 {
1139 	int ret;
1140 	uint32_t partition_count = 0;
1141 	uint32_t size = 0;
1142 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1143 	struct mailbox *mbox;
1144 	uint64_t info_get_flags;
1145 	bool count_only;
1146 	uint32_t uuid[4];
1147 
1148 	uuid[0] = x1;
1149 	uuid[1] = x2;
1150 	uuid[2] = x3;
1151 	uuid[3] = x4;
1152 
1153 	/* Determine if the Partition descriptors should be populated. */
1154 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1155 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1156 
1157 	/* Handle the case where we don't need to populate the descriptors. */
1158 	if (count_only) {
1159 		partition_count = partition_info_get_handler_count_only(uuid);
1160 		if (partition_count == 0) {
1161 			return spmc_ffa_error_return(handle,
1162 						FFA_ERROR_INVALID_PARAMETER);
1163 		}
1164 	} else {
1165 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1166 
1167 		/*
1168 		 * Handle the case where the partition descriptors are required,
1169 		 * check we have the buffers available and populate the
1170 		 * appropriate structure version.
1171 		 */
1172 
1173 		/* Obtain the v1.1 format of the descriptors. */
1174 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1175 						      MAX_SP_LP_PARTITIONS,
1176 						      &partition_count);
1177 
1178 		/* Check if an error occurred during discovery. */
1179 		if (ret != 0) {
1180 			goto err;
1181 		}
1182 
1183 		/* If we didn't find any matches the UUID is unknown. */
1184 		if (partition_count == 0) {
1185 			ret = FFA_ERROR_INVALID_PARAMETER;
1186 			goto err;
1187 		}
1188 
1189 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1190 		mbox = spmc_get_mbox_desc(secure_origin);
1191 
1192 		/*
1193 		 * If the caller has not bothered registering its RX/TX pair
1194 		 * then return an error code.
1195 		 */
1196 		spin_lock(&mbox->lock);
1197 		if (mbox->rx_buffer == NULL) {
1198 			ret = FFA_ERROR_BUSY;
1199 			goto err_unlock;
1200 		}
1201 
1202 		/* Ensure the RX buffer is currently free. */
1203 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1204 			ret = FFA_ERROR_BUSY;
1205 			goto err_unlock;
1206 		}
1207 
1208 		/* Zero the RX buffer before populating. */
1209 		(void)memset(mbox->rx_buffer, 0,
1210 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1211 
1212 		/*
1213 		 * Depending on the FF-A version of the requesting partition
1214 		 * we may need to convert to a v1.0 format otherwise we can copy
1215 		 * directly.
1216 		 */
1217 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1218 			ret = partition_info_populate_v1_0(partitions,
1219 							   mbox,
1220 							   partition_count);
1221 			if (ret != 0) {
1222 				goto err_unlock;
1223 			}
1224 		} else {
1225 			uint32_t buf_size = mbox->rxtx_page_count *
1226 					    FFA_PAGE_SIZE;
1227 
1228 			/* Ensure the descriptor will fit in the buffer. */
1229 			size = sizeof(struct ffa_partition_info_v1_1);
1230 			if (partition_count * size  > buf_size) {
1231 				ret = FFA_ERROR_NO_MEMORY;
1232 				goto err_unlock;
1233 			}
1234 			memcpy(mbox->rx_buffer, partitions,
1235 			       partition_count * size);
1236 		}
1237 
1238 		mbox->state = MAILBOX_STATE_FULL;
1239 		spin_unlock(&mbox->lock);
1240 	}
1241 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1242 
1243 err_unlock:
1244 	spin_unlock(&mbox->lock);
1245 err:
1246 	return spmc_ffa_error_return(handle, ret);
1247 }
1248 
1249 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1250 {
1251 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1252 }
1253 
1254 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1255 					      uint32_t input_properties,
1256 					      void *handle)
1257 {
1258 	/*
1259 	 * If we're called by the normal world we don't support any
1260 	 * additional features.
1261 	 */
1262 	if (!secure_origin) {
1263 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1264 			return spmc_ffa_error_return(handle,
1265 						     FFA_ERROR_NOT_SUPPORTED);
1266 		}
1267 
1268 	} else {
1269 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1270 		/*
1271 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1272 		 * call. If v1.0 check and store whether the SP has requested
1273 		 * the use of the NS bit.
1274 		 */
1275 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1276 			if ((input_properties &
1277 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1278 				return spmc_ffa_error_return(handle,
1279 						       FFA_ERROR_NOT_SUPPORTED);
1280 			}
1281 			return ffa_feature_success(handle,
1282 						   FFA_FEATURES_RET_REQ_NS_BIT);
1283 		} else {
1284 			sp->ns_bit_requested = (input_properties &
1285 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1286 					       0U;
1287 		}
1288 		if (sp->ns_bit_requested) {
1289 			return ffa_feature_success(handle,
1290 						   FFA_FEATURES_RET_REQ_NS_BIT);
1291 		}
1292 	}
1293 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1294 }
1295 
1296 static uint64_t ffa_features_handler(uint32_t smc_fid,
1297 				     bool secure_origin,
1298 				     uint64_t x1,
1299 				     uint64_t x2,
1300 				     uint64_t x3,
1301 				     uint64_t x4,
1302 				     void *cookie,
1303 				     void *handle,
1304 				     uint64_t flags)
1305 {
1306 	uint32_t function_id = (uint32_t) x1;
1307 	uint32_t input_properties = (uint32_t) x2;
1308 
1309 	/* Check if a Feature ID was requested. */
1310 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1311 		/* We currently don't support any additional features. */
1312 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1313 	}
1314 
1315 	/*
1316 	 * Handle the cases where we have separate handlers due to additional
1317 	 * properties.
1318 	 */
1319 	switch (function_id) {
1320 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1321 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1322 		return ffa_features_retrieve_request(secure_origin,
1323 						     input_properties,
1324 						     handle);
1325 	}
1326 
1327 	/*
1328 	 * We don't currently support additional input properties for these
1329 	 * other ABIs therefore ensure this value is set to 0.
1330 	 */
1331 	if (input_properties != 0U) {
1332 		return spmc_ffa_error_return(handle,
1333 					     FFA_ERROR_NOT_SUPPORTED);
1334 	}
1335 
1336 	/* Report if any other FF-A ABI is supported. */
1337 	switch (function_id) {
1338 	/* Supported features from both worlds. */
1339 	case FFA_ERROR:
1340 	case FFA_SUCCESS_SMC32:
1341 	case FFA_INTERRUPT:
1342 	case FFA_SPM_ID_GET:
1343 	case FFA_ID_GET:
1344 	case FFA_FEATURES:
1345 	case FFA_VERSION:
1346 	case FFA_RX_RELEASE:
1347 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1348 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1349 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1350 	case FFA_PARTITION_INFO_GET:
1351 	case FFA_RXTX_MAP_SMC32:
1352 	case FFA_RXTX_MAP_SMC64:
1353 	case FFA_RXTX_UNMAP:
1354 	case FFA_MEM_FRAG_TX:
1355 	case FFA_MSG_RUN:
1356 
1357 		/*
1358 		 * We are relying on the fact that the other registers
1359 		 * will be set to 0 as these values align with the
1360 		 * currently implemented features of the SPMC. If this
1361 		 * changes this function must be extended to handle
1362 		 * reporting the additional functionality.
1363 		 */
1364 
1365 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1366 		/* Execution stops here. */
1367 
1368 	/* Supported ABIs only from the secure world. */
1369 	case FFA_MEM_PERM_GET_SMC32:
1370 	case FFA_MEM_PERM_GET_SMC64:
1371 	case FFA_MEM_PERM_SET_SMC32:
1372 	case FFA_MEM_PERM_SET_SMC64:
1373 	/* these ABIs are only supported from S-EL0 SPs */
1374 	#if !(SPMC_AT_EL3_SEL0_SP)
1375 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1376 	#endif
1377 	/* fall through */
1378 
1379 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1380 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1381 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1382 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1383 	case FFA_MEM_RELINQUISH:
1384 	case FFA_MSG_WAIT:
1385 	case FFA_CONSOLE_LOG_SMC32:
1386 	case FFA_CONSOLE_LOG_SMC64:
1387 		if (!secure_origin) {
1388 			return spmc_ffa_error_return(handle,
1389 				FFA_ERROR_NOT_SUPPORTED);
1390 		}
1391 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1392 		/* Execution stops here. */
1393 
1394 	/* Supported features only from the normal world. */
1395 	case FFA_MEM_SHARE_SMC32:
1396 	case FFA_MEM_SHARE_SMC64:
1397 	case FFA_MEM_LEND_SMC32:
1398 	case FFA_MEM_LEND_SMC64:
1399 	case FFA_MEM_RECLAIM:
1400 	case FFA_MEM_FRAG_RX:
1401 
1402 		if (secure_origin) {
1403 			return spmc_ffa_error_return(handle,
1404 					FFA_ERROR_NOT_SUPPORTED);
1405 		}
1406 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1407 		/* Execution stops here. */
1408 
1409 	default:
1410 		return spmc_ffa_error_return(handle,
1411 					FFA_ERROR_NOT_SUPPORTED);
1412 	}
1413 }
1414 
1415 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1416 				   bool secure_origin,
1417 				   uint64_t x1,
1418 				   uint64_t x2,
1419 				   uint64_t x3,
1420 				   uint64_t x4,
1421 				   void *cookie,
1422 				   void *handle,
1423 				   uint64_t flags)
1424 {
1425 	if (secure_origin) {
1426 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1427 			 spmc_get_current_sp_ctx()->sp_id);
1428 	} else {
1429 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1430 			 spmc_get_hyp_ctx()->ns_ep_id);
1431 	}
1432 }
1433 
1434 /*
1435  * Enable an SP to query the ID assigned to the SPMC.
1436  */
1437 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1438 				       bool secure_origin,
1439 				       uint64_t x1,
1440 				       uint64_t x2,
1441 				       uint64_t x3,
1442 				       uint64_t x4,
1443 				       void *cookie,
1444 				       void *handle,
1445 				       uint64_t flags)
1446 {
1447 	assert(x1 == 0UL);
1448 	assert(x2 == 0UL);
1449 	assert(x3 == 0UL);
1450 	assert(x4 == 0UL);
1451 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1452 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1453 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1454 
1455 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1456 }
1457 
1458 static uint64_t ffa_run_handler(uint32_t smc_fid,
1459 				bool secure_origin,
1460 				uint64_t x1,
1461 				uint64_t x2,
1462 				uint64_t x3,
1463 				uint64_t x4,
1464 				void *cookie,
1465 				void *handle,
1466 				uint64_t flags)
1467 {
1468 	struct secure_partition_desc *sp;
1469 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1470 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1471 	unsigned int idx;
1472 	unsigned int *rt_state;
1473 	unsigned int *rt_model;
1474 
1475 	/* Can only be called from the normal world. */
1476 	if (secure_origin) {
1477 		ERROR("FFA_RUN can only be called from NWd.\n");
1478 		return spmc_ffa_error_return(handle,
1479 					     FFA_ERROR_INVALID_PARAMETER);
1480 	}
1481 
1482 	/* Cannot run a Normal world partition. */
1483 	if (ffa_is_normal_world_id(target_id)) {
1484 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1485 		return spmc_ffa_error_return(handle,
1486 					     FFA_ERROR_INVALID_PARAMETER);
1487 	}
1488 
1489 	/* Check that the target SP exists. */
1490 	sp = spmc_get_sp_ctx(target_id);
1491 	if (sp == NULL) {
1492 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1493 		return spmc_ffa_error_return(handle,
1494 					     FFA_ERROR_INVALID_PARAMETER);
1495 	}
1496 
1497 	idx = get_ec_index(sp);
1498 
1499 	if (idx != vcpu_id) {
1500 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1501 		return spmc_ffa_error_return(handle,
1502 					     FFA_ERROR_INVALID_PARAMETER);
1503 	}
1504 	if (sp->runtime_el == S_EL0) {
1505 		spin_lock(&sp->rt_state_lock);
1506 	}
1507 	rt_state = &((sp->ec[idx]).rt_state);
1508 	rt_model = &((sp->ec[idx]).rt_model);
1509 	if (*rt_state == RT_STATE_RUNNING) {
1510 		if (sp->runtime_el == S_EL0) {
1511 			spin_unlock(&sp->rt_state_lock);
1512 		}
1513 		ERROR("Partition (0x%x) is already running.\n", target_id);
1514 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1515 	}
1516 
1517 	/*
1518 	 * Sanity check that if the execution context was not waiting then it
1519 	 * was either in the direct request or the run partition runtime model.
1520 	 */
1521 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1522 		assert(*rt_model == RT_MODEL_RUN ||
1523 		       *rt_model == RT_MODEL_DIR_REQ);
1524 	}
1525 
1526 	/*
1527 	 * If the context was waiting then update the partition runtime model.
1528 	 */
1529 	if (*rt_state == RT_STATE_WAITING) {
1530 		*rt_model = RT_MODEL_RUN;
1531 	}
1532 
1533 	/*
1534 	 * Forward the request to the correct SP vCPU after updating
1535 	 * its state.
1536 	 */
1537 	*rt_state = RT_STATE_RUNNING;
1538 
1539 	if (sp->runtime_el == S_EL0) {
1540 		spin_unlock(&sp->rt_state_lock);
1541 	}
1542 
1543 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1544 			       handle, cookie, flags, target_id, sp->ffa_version);
1545 }
1546 
1547 static uint64_t rx_release_handler(uint32_t smc_fid,
1548 				   bool secure_origin,
1549 				   uint64_t x1,
1550 				   uint64_t x2,
1551 				   uint64_t x3,
1552 				   uint64_t x4,
1553 				   void *cookie,
1554 				   void *handle,
1555 				   uint64_t flags)
1556 {
1557 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1558 
1559 	spin_lock(&mbox->lock);
1560 
1561 	if (mbox->state != MAILBOX_STATE_FULL) {
1562 		spin_unlock(&mbox->lock);
1563 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1564 	}
1565 
1566 	mbox->state = MAILBOX_STATE_EMPTY;
1567 	spin_unlock(&mbox->lock);
1568 
1569 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1570 }
1571 
1572 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1573 				     bool secure_origin,
1574 				     uint64_t x1,
1575 				     uint64_t x2,
1576 				     uint64_t x3,
1577 				     uint64_t x4,
1578 				     void *cookie,
1579 				     void *handle,
1580 				     uint64_t flags)
1581 {
1582 	/* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1583 	char chars[48] = {0};
1584 	size_t chars_max;
1585 	size_t chars_count = x1;
1586 
1587 	/* Does not support request from Nwd. */
1588 	if (!secure_origin) {
1589 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1590 	}
1591 
1592 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1593 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1594 		uint32_t *registers = (uint32_t *)chars;
1595 		registers[0] = (uint32_t)x2;
1596 		registers[1] = (uint32_t)x3;
1597 		registers[2] = (uint32_t)x4;
1598 		registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1599 		registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1600 		registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1601 		chars_max = 6 * sizeof(uint32_t);
1602 	} else {
1603 		uint64_t *registers = (uint64_t *)chars;
1604 		registers[0] = x2;
1605 		registers[1] = x3;
1606 		registers[2] = x4;
1607 		registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1608 		registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1609 		registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1610 		chars_max = 6 * sizeof(uint64_t);
1611 	}
1612 
1613 	if ((chars_count == 0) || (chars_count > chars_max)) {
1614 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1615 	}
1616 
1617 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1618 		putchar(chars[i]);
1619 	}
1620 
1621 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1622 }
1623 
1624 /*
1625  * Perform initial validation on the provided secondary entry point.
1626  * For now ensure it does not lie within the BL31 Image or the SP's
1627  * RX/TX buffers as these are mapped within EL3.
1628  * TODO: perform validation for additional invalid memory regions.
1629  */
1630 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1631 {
1632 	struct mailbox *mb;
1633 	uintptr_t buffer_size;
1634 	uintptr_t sp_rx_buffer;
1635 	uintptr_t sp_tx_buffer;
1636 	uintptr_t sp_rx_buffer_limit;
1637 	uintptr_t sp_tx_buffer_limit;
1638 
1639 	mb = &sp->mailbox;
1640 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1641 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1642 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1643 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1644 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1645 
1646 	/*
1647 	 * Check if the entry point lies within BL31, or the
1648 	 * SP's RX or TX buffer.
1649 	 */
1650 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1651 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1652 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1653 		return -EINVAL;
1654 	}
1655 	return 0;
1656 }
1657 
1658 /*******************************************************************************
1659  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1660  *  register an entry point for initialization during a secondary cold boot.
1661  ******************************************************************************/
1662 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1663 					    bool secure_origin,
1664 					    uint64_t x1,
1665 					    uint64_t x2,
1666 					    uint64_t x3,
1667 					    uint64_t x4,
1668 					    void *cookie,
1669 					    void *handle,
1670 					    uint64_t flags)
1671 {
1672 	struct secure_partition_desc *sp;
1673 	struct sp_exec_ctx *sp_ctx;
1674 
1675 	/* This request cannot originate from the Normal world. */
1676 	if (!secure_origin) {
1677 		WARN("%s: Can only be called from SWd.\n", __func__);
1678 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1679 	}
1680 
1681 	/* Get the context of the current SP. */
1682 	sp = spmc_get_current_sp_ctx();
1683 	if (sp == NULL) {
1684 		WARN("%s: Cannot find SP context.\n", __func__);
1685 		return spmc_ffa_error_return(handle,
1686 					     FFA_ERROR_INVALID_PARAMETER);
1687 	}
1688 
1689 	/* Only an S-EL1 SP should be invoking this ABI. */
1690 	if (sp->runtime_el != S_EL1) {
1691 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1692 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1693 	}
1694 
1695 	/* Ensure the SP is in its initialization state. */
1696 	sp_ctx = spmc_get_sp_ec(sp);
1697 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1698 		WARN("%s: Can only be called during SP initialization.\n",
1699 		     __func__);
1700 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1701 	}
1702 
1703 	/* Perform initial validation of the secondary entry point. */
1704 	if (validate_secondary_ep(x1, sp)) {
1705 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1706 		     __func__, x1);
1707 		return spmc_ffa_error_return(handle,
1708 					     FFA_ERROR_INVALID_PARAMETER);
1709 	}
1710 
1711 	/*
1712 	 * Update the secondary entrypoint in SP context.
1713 	 * We don't need a lock here as during partition initialization there
1714 	 * will only be a single core online.
1715 	 */
1716 	sp->secondary_ep = x1;
1717 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1718 
1719 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1720 }
1721 
1722 /*******************************************************************************
1723  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1724  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1725  * function converts a permission value from the FF-A format to the mmap_attr_t
1726  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1727  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1728  * ignored by the function xlat_change_mem_attributes_ctx().
1729  ******************************************************************************/
1730 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1731 {
1732 	unsigned int tf_attr = 0U;
1733 	unsigned int access;
1734 
1735 	/* Deal with data access permissions first. */
1736 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1737 
1738 	switch (access) {
1739 	case FFA_MEM_PERM_DATA_RW:
1740 		/* Return 0 if the execute is set with RW. */
1741 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1742 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1743 		}
1744 		break;
1745 
1746 	case FFA_MEM_PERM_DATA_RO:
1747 		tf_attr |= MT_RO | MT_USER;
1748 		/* Deal with the instruction access permissions next. */
1749 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1750 			tf_attr |= MT_EXECUTE;
1751 		} else {
1752 			tf_attr |= MT_EXECUTE_NEVER;
1753 		}
1754 		break;
1755 
1756 	case FFA_MEM_PERM_DATA_NA:
1757 	default:
1758 		return tf_attr;
1759 	}
1760 
1761 	return tf_attr;
1762 }
1763 
1764 /*******************************************************************************
1765  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1766  ******************************************************************************/
1767 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1768 					 bool secure_origin,
1769 					 uint64_t x1,
1770 					 uint64_t x2,
1771 					 uint64_t x3,
1772 					 uint64_t x4,
1773 					 void *cookie,
1774 					 void *handle,
1775 					 uint64_t flags)
1776 {
1777 	struct secure_partition_desc *sp;
1778 	unsigned int idx;
1779 	uintptr_t base_va = (uintptr_t) x1;
1780 	size_t size = (size_t)(x2 * PAGE_SIZE);
1781 	uint32_t tf_attr;
1782 	int ret;
1783 
1784 	/* This request cannot originate from the Normal world. */
1785 	if (!secure_origin) {
1786 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1787 	}
1788 
1789 	if (size == 0) {
1790 		return spmc_ffa_error_return(handle,
1791 					     FFA_ERROR_INVALID_PARAMETER);
1792 	}
1793 
1794 	/* Get the context of the current SP. */
1795 	sp = spmc_get_current_sp_ctx();
1796 	if (sp == NULL) {
1797 		return spmc_ffa_error_return(handle,
1798 					     FFA_ERROR_INVALID_PARAMETER);
1799 	}
1800 
1801 	/* A S-EL1 SP has no business invoking this ABI. */
1802 	if (sp->runtime_el == S_EL1) {
1803 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1804 	}
1805 
1806 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1807 		return spmc_ffa_error_return(handle,
1808 					     FFA_ERROR_INVALID_PARAMETER);
1809 	}
1810 
1811 	/* Get the execution context of the calling SP. */
1812 	idx = get_ec_index(sp);
1813 
1814 	/*
1815 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1816 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1817 	 * and can only be initialising on this cpu.
1818 	 */
1819 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1820 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1821 	}
1822 
1823 	VERBOSE("Setting memory permissions:\n");
1824 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1825 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1826 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1827 
1828 	/* Convert inbound permissions to TF-A permission attributes */
1829 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1830 	if (tf_attr == 0U) {
1831 		return spmc_ffa_error_return(handle,
1832 					     FFA_ERROR_INVALID_PARAMETER);
1833 	}
1834 
1835 	/* Request the change in permissions */
1836 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1837 					     base_va, size, tf_attr);
1838 	if (ret != 0) {
1839 		return spmc_ffa_error_return(handle,
1840 					     FFA_ERROR_INVALID_PARAMETER);
1841 	}
1842 
1843 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1844 }
1845 
1846 /*******************************************************************************
1847  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1848  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1849  * function converts a permission value from the mmap_attr_t format to the FF-A
1850  * format.
1851  ******************************************************************************/
1852 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1853 {
1854 	unsigned int perms = 0U;
1855 	unsigned int data_access;
1856 
1857 	if ((attr & MT_USER) == 0) {
1858 		/* No access from EL0. */
1859 		data_access = FFA_MEM_PERM_DATA_NA;
1860 	} else {
1861 		if ((attr & MT_RW) != 0) {
1862 			data_access = FFA_MEM_PERM_DATA_RW;
1863 		} else {
1864 			data_access = FFA_MEM_PERM_DATA_RO;
1865 		}
1866 	}
1867 
1868 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1869 		<< FFA_MEM_PERM_DATA_SHIFT;
1870 
1871 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1872 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1873 	}
1874 
1875 	return perms;
1876 }
1877 
1878 /*******************************************************************************
1879  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1880  ******************************************************************************/
1881 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1882 					 bool secure_origin,
1883 					 uint64_t x1,
1884 					 uint64_t x2,
1885 					 uint64_t x3,
1886 					 uint64_t x4,
1887 					 void *cookie,
1888 					 void *handle,
1889 					 uint64_t flags)
1890 {
1891 	struct secure_partition_desc *sp;
1892 	unsigned int idx;
1893 	uintptr_t base_va = (uintptr_t)x1;
1894 	uint64_t max_page_count = x2 + 1;
1895 	uint64_t page_count = 0;
1896 	uint32_t base_page_attr = 0;
1897 	uint32_t page_attr = 0;
1898 	unsigned int table_level;
1899 	int ret;
1900 
1901 	/* This request cannot originate from the Normal world. */
1902 	if (!secure_origin) {
1903 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1904 	}
1905 
1906 	/* Get the context of the current SP. */
1907 	sp = spmc_get_current_sp_ctx();
1908 	if (sp == NULL) {
1909 		return spmc_ffa_error_return(handle,
1910 					     FFA_ERROR_INVALID_PARAMETER);
1911 	}
1912 
1913 	/* A S-EL1 SP has no business invoking this ABI. */
1914 	if (sp->runtime_el == S_EL1) {
1915 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1916 	}
1917 
1918 	/* Get the execution context of the calling SP. */
1919 	idx = get_ec_index(sp);
1920 
1921 	/*
1922 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1923 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1924 	 * and can only be initialising on this cpu.
1925 	 */
1926 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1927 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1928 	}
1929 
1930 	base_va &= ~(PAGE_SIZE_MASK);
1931 
1932 	/* Request the permissions */
1933 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1934 			&base_page_attr, &table_level);
1935 	if (ret != 0) {
1936 		return spmc_ffa_error_return(handle,
1937 					     FFA_ERROR_INVALID_PARAMETER);
1938 	}
1939 
1940 	/*
1941 	 * Caculate how many pages in this block entry from base_va including
1942 	 * its page.
1943 	 */
1944 	page_count = ((XLAT_BLOCK_SIZE(table_level) -
1945 			(base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT);
1946 	base_va += XLAT_BLOCK_SIZE(table_level);
1947 
1948 	while ((page_count < max_page_count) && (base_va != 0x00)) {
1949 		ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va,
1950 				&page_attr, &table_level);
1951 		if (ret != 0) {
1952 			return spmc_ffa_error_return(handle,
1953 						     FFA_ERROR_INVALID_PARAMETER);
1954 		}
1955 
1956 		if (page_attr != base_page_attr) {
1957 			break;
1958 		}
1959 
1960 		base_va += XLAT_BLOCK_SIZE(table_level);
1961 		page_count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT);
1962 	}
1963 
1964 	if (page_count > max_page_count) {
1965 		page_count = max_page_count;
1966 	}
1967 
1968 	/* Convert TF-A permission to FF-A permissions attributes. */
1969 	x2 = mmap_perm_to_ffa_perm(base_page_attr);
1970 
1971 	/* x3 should be page count - 1 */
1972 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, x2, --page_count);
1973 }
1974 
1975 /*******************************************************************************
1976  * This function will parse the Secure Partition Manifest. From manifest, it
1977  * will fetch details for preparing Secure partition image context and secure
1978  * partition image boot arguments if any.
1979  ******************************************************************************/
1980 static int sp_manifest_parse(void *sp_manifest, int offset,
1981 			     struct secure_partition_desc *sp,
1982 			     entry_point_info_t *ep_info,
1983 			     int32_t *boot_info_reg)
1984 {
1985 	int32_t ret, node;
1986 	uint32_t config_32;
1987 
1988 	/*
1989 	 * Look for the mandatory fields that are expected to be present in
1990 	 * the SP manifests.
1991 	 */
1992 	node = fdt_path_offset(sp_manifest, "/");
1993 	if (node < 0) {
1994 		ERROR("Did not find root node.\n");
1995 		return node;
1996 	}
1997 
1998 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1999 				    ARRAY_SIZE(sp->uuid), sp->uuid);
2000 	if (ret != 0) {
2001 		ERROR("Missing Secure Partition UUID.\n");
2002 		return ret;
2003 	}
2004 
2005 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
2006 	if (ret != 0) {
2007 		ERROR("Missing SP Exception Level information.\n");
2008 		return ret;
2009 	}
2010 
2011 	sp->runtime_el = config_32;
2012 
2013 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
2014 	if (ret != 0) {
2015 		ERROR("Missing Secure Partition FF-A Version.\n");
2016 		return ret;
2017 	}
2018 
2019 	sp->ffa_version = config_32;
2020 
2021 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
2022 	if (ret != 0) {
2023 		ERROR("Missing Secure Partition Execution State.\n");
2024 		return ret;
2025 	}
2026 
2027 	sp->execution_state = config_32;
2028 
2029 	ret = fdt_read_uint32(sp_manifest, node,
2030 			      "messaging-method", &config_32);
2031 	if (ret != 0) {
2032 		ERROR("Missing Secure Partition messaging method.\n");
2033 		return ret;
2034 	}
2035 
2036 	/* Validate this entry, we currently only support direct messaging. */
2037 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
2038 			  FFA_PARTITION_DIRECT_REQ_SEND |
2039 			  FFA_PARTITION_DIRECT_REQ2_RECV |
2040 			  FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
2041 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
2042 		     config_32);
2043 		return -EINVAL;
2044 	}
2045 
2046 	sp->properties = config_32;
2047 
2048 	ret = fdt_read_uint32(sp_manifest, node,
2049 			      "execution-ctx-count", &config_32);
2050 
2051 	if (ret != 0) {
2052 		ERROR("Missing SP Execution Context Count.\n");
2053 		return ret;
2054 	}
2055 
2056 	/*
2057 	 * Ensure this field is set correctly in the manifest however
2058 	 * since this is currently a hardcoded value for S-EL1 partitions
2059 	 * we don't need to save it here, just validate.
2060 	 */
2061 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
2062 		ERROR("SP Execution Context Count (%u) must be %u.\n",
2063 			config_32, PLATFORM_CORE_COUNT);
2064 		return -EINVAL;
2065 	}
2066 
2067 	/*
2068 	 * Look for the optional fields that are expected to be present in
2069 	 * an SP manifest.
2070 	 */
2071 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
2072 	if (ret != 0) {
2073 		WARN("Missing Secure Partition ID.\n");
2074 	} else {
2075 		if (!is_ffa_secure_id_valid(config_32)) {
2076 			ERROR("Invalid Secure Partition ID (0x%x).\n",
2077 			      config_32);
2078 			return -EINVAL;
2079 		}
2080 		sp->sp_id = config_32;
2081 	}
2082 
2083 	ret = fdt_read_uint32(sp_manifest, node,
2084 			      "power-management-messages", &config_32);
2085 	if (ret != 0) {
2086 		WARN("Missing Power Management Messages entry.\n");
2087 	} else {
2088 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
2089 			ERROR("Power messages not supported for S-EL0 SP\n");
2090 			return -EINVAL;
2091 		}
2092 
2093 		/*
2094 		 * Ensure only the currently supported power messages have
2095 		 * been requested.
2096 		 */
2097 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
2098 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
2099 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
2100 			ERROR("Requested unsupported PM messages (%x)\n",
2101 			      config_32);
2102 			return -EINVAL;
2103 		}
2104 		sp->pwr_mgmt_msgs = config_32;
2105 	}
2106 
2107 	ret = fdt_read_uint32(sp_manifest, node,
2108 			      "gp-register-num", &config_32);
2109 	if (ret != 0) {
2110 		WARN("Missing boot information register.\n");
2111 	} else {
2112 		/* Check if a register number between 0-3 is specified. */
2113 		if (config_32 < 4) {
2114 			*boot_info_reg = config_32;
2115 		} else {
2116 			WARN("Incorrect boot information register (%u).\n",
2117 			     config_32);
2118 		}
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 /*******************************************************************************
2125  * This function gets the Secure Partition Manifest base and maps the manifest
2126  * region.
2127  * Currently only one Secure Partition manifest is considered which is used to
2128  * prepare the context for the single Secure Partition.
2129  ******************************************************************************/
2130 static int find_and_prepare_sp_context(void)
2131 {
2132 	void *sp_manifest;
2133 	uintptr_t manifest_base;
2134 	uintptr_t manifest_base_align;
2135 	entry_point_info_t *next_image_ep_info;
2136 	int32_t ret, boot_info_reg = -1;
2137 	struct secure_partition_desc *sp;
2138 
2139 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2140 	if (next_image_ep_info == NULL) {
2141 		WARN("No Secure Partition image provided by BL2.\n");
2142 		return -ENOENT;
2143 	}
2144 
2145 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2146 	if (sp_manifest == NULL) {
2147 		WARN("Secure Partition manifest absent.\n");
2148 		return -ENOENT;
2149 	}
2150 
2151 	manifest_base = (uintptr_t)sp_manifest;
2152 	manifest_base_align = page_align(manifest_base, DOWN);
2153 
2154 	/*
2155 	 * Map the secure partition manifest region in the EL3 translation
2156 	 * regime.
2157 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2158 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2159 	 * not completely accommodate the secure partition manifest region.
2160 	 */
2161 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2162 				      manifest_base_align,
2163 				      PAGE_SIZE * 2,
2164 				      MT_RO_DATA);
2165 	if (ret != 0) {
2166 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2167 		return ret;
2168 	}
2169 
2170 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2171 					    "arm,ffa-manifest-1.0");
2172 	if (ret < 0) {
2173 		ERROR("Error happened in SP manifest reading.\n");
2174 		return -EINVAL;
2175 	}
2176 
2177 	/*
2178 	 * Store the size of the manifest so that it can be used later to pass
2179 	 * the manifest as boot information later.
2180 	 */
2181 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2182 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2183 	     next_image_ep_info->args.arg1);
2184 
2185 	/*
2186 	 * Select an SP descriptor for initialising the partition's execution
2187 	 * context on the primary CPU.
2188 	 */
2189 	sp = spmc_get_current_sp_ctx();
2190 
2191 #if SPMC_AT_EL3_SEL0_SP
2192 	/* Assign translation tables context. */
2193 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2194 
2195 #endif /* SPMC_AT_EL3_SEL0_SP */
2196 	/* Initialize entry point information for the SP */
2197 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2198 		       SECURE | EP_ST_ENABLE);
2199 
2200 	/* Parse the SP manifest. */
2201 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2202 				&boot_info_reg);
2203 	if (ret != 0) {
2204 		ERROR("Error in Secure Partition manifest parsing.\n");
2205 		return ret;
2206 	}
2207 
2208 	/* Perform any common initialisation. */
2209 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2210 
2211 	/* Perform any initialisation specific to S-EL1 SPs. */
2212 	if (sp->runtime_el == S_EL1) {
2213 		spmc_el1_sp_setup(sp, next_image_ep_info);
2214 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2215 	}
2216 #if SPMC_AT_EL3_SEL0_SP
2217 	/* Perform any initialisation specific to S-EL0 SPs. */
2218 	else if (sp->runtime_el == S_EL0) {
2219 		/* Setup spsr in endpoint info for common context management routine. */
2220 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2221 
2222 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2223 
2224 		/*
2225 		 * Perform any initialisation specific to S-EL0 not set by common
2226 		 * context management routine.
2227 		 */
2228 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2229 	}
2230 #endif /* SPMC_AT_EL3_SEL0_SP */
2231 	else {
2232 		ERROR("Unexpected runtime EL: %u\n", sp->runtime_el);
2233 		return -EINVAL;
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 /*******************************************************************************
2240  * This function takes an SP context pointer and performs a synchronous entry
2241  * into it.
2242  ******************************************************************************/
2243 static int32_t logical_sp_init(void)
2244 {
2245 	int32_t rc = 0;
2246 	struct el3_lp_desc *el3_lp_descs;
2247 
2248 	/* Perform initial validation of the Logical Partitions. */
2249 	rc = el3_sp_desc_validate();
2250 	if (rc != 0) {
2251 		ERROR("Logical Partition validation failed!\n");
2252 		return rc;
2253 	}
2254 
2255 	el3_lp_descs = get_el3_lp_array();
2256 
2257 	INFO("Logical Secure Partition init start.\n");
2258 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2259 		rc = el3_lp_descs[i].init();
2260 		if (rc != 0) {
2261 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2262 			      el3_lp_descs[i].sp_id);
2263 			return rc;
2264 		}
2265 		VERBOSE("Logical SP (0x%x) Initialized\n",
2266 			      el3_lp_descs[i].sp_id);
2267 	}
2268 
2269 	INFO("Logical Secure Partition init completed.\n");
2270 
2271 	return rc;
2272 }
2273 
2274 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2275 {
2276 	uint64_t rc;
2277 
2278 	assert(ec != NULL);
2279 
2280 	/* Assign the context of the SP to this CPU */
2281 	cm_set_context(&(ec->cpu_ctx), SECURE);
2282 
2283 	/* Restore the context assigned above */
2284 	cm_el1_sysregs_context_restore(SECURE);
2285 	cm_set_next_eret_context(SECURE);
2286 
2287 	/* Invalidate TLBs at EL1. */
2288 	tlbivmalle1();
2289 	dsbish();
2290 
2291 	/* Enter Secure Partition */
2292 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2293 
2294 	/* Save secure state */
2295 	cm_el1_sysregs_context_save(SECURE);
2296 
2297 	return rc;
2298 }
2299 
2300 /*******************************************************************************
2301  * SPMC Helper Functions.
2302  ******************************************************************************/
2303 static int32_t sp_init(void)
2304 {
2305 	uint64_t rc;
2306 	struct secure_partition_desc *sp;
2307 	struct sp_exec_ctx *ec;
2308 
2309 	sp = spmc_get_current_sp_ctx();
2310 	ec = spmc_get_sp_ec(sp);
2311 	ec->rt_model = RT_MODEL_INIT;
2312 	ec->rt_state = RT_STATE_RUNNING;
2313 
2314 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2315 
2316 	rc = spmc_sp_synchronous_entry(ec);
2317 	if (rc != 0) {
2318 		/* Indicate SP init was not successful. */
2319 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2320 		      sp->sp_id, rc);
2321 		return 0;
2322 	}
2323 
2324 	ec->rt_state = RT_STATE_WAITING;
2325 	INFO("Secure Partition initialized.\n");
2326 
2327 	return 1;
2328 }
2329 
2330 static void initalize_sp_descs(void)
2331 {
2332 	struct secure_partition_desc *sp;
2333 
2334 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2335 		sp = &sp_desc[i];
2336 		sp->sp_id = INV_SP_ID;
2337 		sp->mailbox.rx_buffer = NULL;
2338 		sp->mailbox.tx_buffer = NULL;
2339 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2340 		sp->secondary_ep = 0;
2341 	}
2342 }
2343 
2344 static void initalize_ns_ep_descs(void)
2345 {
2346 	struct ns_endpoint_desc *ns_ep;
2347 
2348 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2349 		ns_ep = &ns_ep_desc[i];
2350 		/*
2351 		 * Clashes with the Hypervisor ID but will not be a
2352 		 * problem in practice.
2353 		 */
2354 		ns_ep->ns_ep_id = 0;
2355 		ns_ep->ffa_version = 0;
2356 		ns_ep->mailbox.rx_buffer = NULL;
2357 		ns_ep->mailbox.tx_buffer = NULL;
2358 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2359 	}
2360 }
2361 
2362 /*******************************************************************************
2363  * Initialize SPMC attributes for the SPMD.
2364  ******************************************************************************/
2365 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2366 {
2367 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2368 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2369 	spmc_attrs->exec_state = MODE_RW_64;
2370 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2371 }
2372 
2373 /*******************************************************************************
2374  * Initialize contexts of all Secure Partitions.
2375  ******************************************************************************/
2376 int32_t spmc_setup(void)
2377 {
2378 	int32_t ret;
2379 	uint32_t flags;
2380 
2381 	/* Initialize endpoint descriptors */
2382 	initalize_sp_descs();
2383 	initalize_ns_ep_descs();
2384 
2385 	/*
2386 	 * Retrieve the information of the datastore for tracking shared memory
2387 	 * requests allocated by platform code and zero the region if available.
2388 	 */
2389 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2390 					    &spmc_shmem_obj_state.data_size);
2391 	if (ret != 0) {
2392 		ERROR("Failed to obtain memory descriptor backing store!\n");
2393 		return ret;
2394 	}
2395 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2396 
2397 	/* Setup logical SPs. */
2398 	ret = logical_sp_init();
2399 	if (ret != 0) {
2400 		ERROR("Failed to initialize Logical Partitions.\n");
2401 		return ret;
2402 	}
2403 
2404 	/* Perform physical SP setup. */
2405 
2406 	/* Disable MMU at EL1 (initialized by BL2) */
2407 	disable_mmu_icache_el1();
2408 
2409 	/* Initialize context of the SP */
2410 	INFO("Secure Partition context setup start.\n");
2411 
2412 	ret = find_and_prepare_sp_context();
2413 	if (ret != 0) {
2414 		ERROR("Error in SP finding and context preparation.\n");
2415 		return ret;
2416 	}
2417 
2418 	/* Register power management hooks with PSCI */
2419 	psci_register_spd_pm_hook(&spmc_pm);
2420 
2421 	/*
2422 	 * Register an interrupt handler for S-EL1 interrupts
2423 	 * when generated during code executing in the
2424 	 * non-secure state.
2425 	 */
2426 	flags = 0;
2427 	set_interrupt_rm_flag(flags, NON_SECURE);
2428 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2429 					      spmc_sp_interrupt_handler,
2430 					      flags);
2431 	if (ret != 0) {
2432 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2433 		panic();
2434 	}
2435 
2436 	/* Register init function for deferred init.  */
2437 	bl31_register_bl32_init(&sp_init);
2438 
2439 	INFO("Secure Partition setup done.\n");
2440 
2441 	return 0;
2442 }
2443 
2444 /*******************************************************************************
2445  * Secure Partition Manager SMC handler.
2446  ******************************************************************************/
2447 uint64_t spmc_smc_handler(uint32_t smc_fid,
2448 			  bool secure_origin,
2449 			  uint64_t x1,
2450 			  uint64_t x2,
2451 			  uint64_t x3,
2452 			  uint64_t x4,
2453 			  void *cookie,
2454 			  void *handle,
2455 			  uint64_t flags)
2456 {
2457 	switch (smc_fid) {
2458 
2459 	case FFA_VERSION:
2460 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2461 					   x4, cookie, handle, flags);
2462 
2463 	case FFA_SPM_ID_GET:
2464 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2465 					     x3, x4, cookie, handle, flags);
2466 
2467 	case FFA_ID_GET:
2468 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2469 					  x4, cookie, handle, flags);
2470 
2471 	case FFA_FEATURES:
2472 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2473 					    x4, cookie, handle, flags);
2474 
2475 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2476 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2477 						   x2, x3, x4, cookie, handle,
2478 						   flags);
2479 
2480 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2481 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2482 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
2483 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2484 					      x3, x4, cookie, handle, flags);
2485 
2486 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2487 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2488 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
2489 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2490 					       x3, x4, cookie, handle, flags);
2491 
2492 	case FFA_RXTX_MAP_SMC32:
2493 	case FFA_RXTX_MAP_SMC64:
2494 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2495 					cookie, handle, flags);
2496 
2497 	case FFA_RXTX_UNMAP:
2498 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2499 					  x4, cookie, handle, flags);
2500 
2501 	case FFA_PARTITION_INFO_GET:
2502 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2503 						  x2, x3, x4, cookie, handle,
2504 						  flags);
2505 
2506 	case FFA_RX_RELEASE:
2507 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2508 					  x4, cookie, handle, flags);
2509 
2510 	case FFA_MSG_WAIT:
2511 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2512 					cookie, handle, flags);
2513 
2514 	case FFA_ERROR:
2515 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2516 					cookie, handle, flags);
2517 
2518 	case FFA_MSG_RUN:
2519 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2520 				       cookie, handle, flags);
2521 
2522 	case FFA_MEM_SHARE_SMC32:
2523 	case FFA_MEM_SHARE_SMC64:
2524 	case FFA_MEM_LEND_SMC32:
2525 	case FFA_MEM_LEND_SMC64:
2526 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2527 					 cookie, handle, flags);
2528 
2529 	case FFA_MEM_FRAG_TX:
2530 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2531 					    x4, cookie, handle, flags);
2532 
2533 	case FFA_MEM_FRAG_RX:
2534 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2535 					    x4, cookie, handle, flags);
2536 
2537 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2538 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2539 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2540 						 x3, x4, cookie, handle, flags);
2541 
2542 	case FFA_MEM_RELINQUISH:
2543 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2544 					       x3, x4, cookie, handle, flags);
2545 
2546 	case FFA_MEM_RECLAIM:
2547 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2548 						x4, cookie, handle, flags);
2549 	case FFA_CONSOLE_LOG_SMC32:
2550 	case FFA_CONSOLE_LOG_SMC64:
2551 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2552 						x4, cookie, handle, flags);
2553 
2554 	case FFA_MEM_PERM_GET_SMC32:
2555 	case FFA_MEM_PERM_GET_SMC64:
2556 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2557 						x3, x4, cookie, handle, flags);
2558 
2559 	case FFA_MEM_PERM_SET_SMC32:
2560 	case FFA_MEM_PERM_SET_SMC64:
2561 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2562 						x3, x4, cookie, handle, flags);
2563 
2564 	default:
2565 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2566 		break;
2567 	}
2568 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2569 }
2570 
2571 /*******************************************************************************
2572  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2573  * validates the interrupt and upon success arranges entry into the SP for
2574  * handling the interrupt.
2575  ******************************************************************************/
2576 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2577 					  uint32_t flags,
2578 					  void *handle,
2579 					  void *cookie)
2580 {
2581 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2582 	struct sp_exec_ctx *ec;
2583 	uint32_t linear_id = plat_my_core_pos();
2584 
2585 	/* Sanity check for a NULL pointer dereference. */
2586 	assert(sp != NULL);
2587 
2588 	/* Check the security state when the exception was generated. */
2589 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2590 
2591 	/* Panic if not an S-EL1 Partition. */
2592 	if (sp->runtime_el != S_EL1) {
2593 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2594 		      linear_id);
2595 		panic();
2596 	}
2597 
2598 	/* Obtain a reference to the SP execution context. */
2599 	ec = spmc_get_sp_ec(sp);
2600 
2601 	/* Ensure that the execution context is in waiting state else panic. */
2602 	if (ec->rt_state != RT_STATE_WAITING) {
2603 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2604 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2605 		panic();
2606 	}
2607 
2608 	/* Update the runtime model and state of the partition. */
2609 	ec->rt_model = RT_MODEL_INTR;
2610 	ec->rt_state = RT_STATE_RUNNING;
2611 
2612 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2613 
2614 	/*
2615 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2616 	 * populated as the SP can determine this by itself.
2617 	 * The flags field is forced to 0 mainly to pass the SVE hint bit
2618 	 * cleared for consumption by the lower EL.
2619 	 */
2620 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2621 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2622 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2623 				     handle, 0ULL, sp->ffa_version);
2624 }
2625