xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 2d3b44e3073e8d6ec49dde45ec353d6f41290917)
1 /*
2  * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 
32 #include <platform_def.h>
33 
34 /* FFA_MEM_PERM_* helpers */
35 #define FFA_MEM_PERM_MASK		U(7)
36 #define FFA_MEM_PERM_DATA_MASK		U(3)
37 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
38 #define FFA_MEM_PERM_DATA_NA		U(0)
39 #define FFA_MEM_PERM_DATA_RW		U(1)
40 #define FFA_MEM_PERM_DATA_RES		U(2)
41 #define FFA_MEM_PERM_DATA_RO		U(3)
42 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
43 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
44 
45 /* Declare the maximum number of SPs and El3 LPs. */
46 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
47 
48 /*
49  * Allocate a secure partition descriptor to describe each SP in the system that
50  * does not reside at EL3.
51  */
52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
53 
54 /*
55  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
56  * the system that interacts with a SP. It is used to track the Hypervisor
57  * buffer pair, version and ID for now. It could be extended to track VM
58  * properties when the SPMC supports indirect messaging.
59  */
60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
61 
62 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
63 					  uint32_t flags,
64 					  void *handle,
65 					  void *cookie);
66 
67 /*
68  * Helper function to obtain the array storing the EL3
69  * Logical Partition descriptors.
70  */
71 struct el3_lp_desc *get_el3_lp_array(void)
72 {
73 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
74 }
75 
76 /*
77  * Helper function to obtain the descriptor of the last SP to whom control was
78  * handed to on this physical cpu. Currently, we assume there is only one SP.
79  * TODO: Expand to track multiple partitions when required.
80  */
81 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
82 {
83 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
84 }
85 
86 /*
87  * Helper function to obtain the execution context of an SP on the
88  * current physical cpu.
89  */
90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
91 {
92 	return &(sp->ec[get_ec_index(sp)]);
93 }
94 
95 /* Helper function to get pointer to SP context from its ID. */
96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
97 {
98 	/* Check for Secure World Partitions. */
99 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
100 		if (sp_desc[i].sp_id == id) {
101 			return &(sp_desc[i]);
102 		}
103 	}
104 	return NULL;
105 }
106 
107 /*
108  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
109  * We assume that the first descriptor is reserved for this entity.
110  */
111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
112 {
113 	return &(ns_ep_desc[0]);
114 }
115 
116 /*
117  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
118  * or OS kernel in the normal world or the last SP that was run.
119  */
120 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
121 {
122 	/* Obtain the RX/TX buffer pair descriptor. */
123 	if (secure_origin) {
124 		return &(spmc_get_current_sp_ctx()->mailbox);
125 	} else {
126 		return &(spmc_get_hyp_ctx()->mailbox);
127 	}
128 }
129 
130 /******************************************************************************
131  * This function returns to the place where spmc_sp_synchronous_entry() was
132  * called originally.
133  ******************************************************************************/
134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
135 {
136 	/*
137 	 * The SPM must have initiated the original request through a
138 	 * synchronous entry into the secure partition. Jump back to the
139 	 * original C runtime context with the value of rc in x0;
140 	 */
141 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
142 
143 	panic();
144 }
145 
146 /*******************************************************************************
147  * Return FFA_ERROR with specified error code.
148  ******************************************************************************/
149 uint64_t spmc_ffa_error_return(void *handle, int error_code)
150 {
151 	SMC_RET8(handle, FFA_ERROR,
152 		 FFA_TARGET_INFO_MBZ, error_code,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
154 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
155 }
156 
157 /******************************************************************************
158  * Helper function to validate a secure partition ID to ensure it does not
159  * conflict with any other FF-A component and follows the convention to
160  * indicate it resides within the secure world.
161  ******************************************************************************/
162 bool is_ffa_secure_id_valid(uint16_t partition_id)
163 {
164 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
165 
166 	/* Ensure the ID is not the invalid partition ID. */
167 	if (partition_id == INV_SP_ID) {
168 		return false;
169 	}
170 
171 	/* Ensure the ID is not the SPMD ID. */
172 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
173 		return false;
174 	}
175 
176 	/*
177 	 * Ensure the ID follows the convention to indicate it resides
178 	 * in the secure world.
179 	 */
180 	if (!ffa_is_secure_world_id(partition_id)) {
181 		return false;
182 	}
183 
184 	/* Ensure we don't conflict with the SPMC partition ID. */
185 	if (partition_id == FFA_SPMC_ID) {
186 		return false;
187 	}
188 
189 	/* Ensure we do not already have an SP context with this ID. */
190 	if (spmc_get_sp_ctx(partition_id)) {
191 		return false;
192 	}
193 
194 	/* Ensure we don't clash with any Logical SP's. */
195 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
196 		if (el3_lp_descs[i].sp_id == partition_id) {
197 			return false;
198 		}
199 	}
200 
201 	return true;
202 }
203 
204 /*******************************************************************************
205  * This function either forwards the request to the other world or returns
206  * with an ERET depending on the source of the call.
207  * We can assume that the destination is for an entity at a lower exception
208  * level as any messages destined for a logical SP resident in EL3 will have
209  * already been taken care of by the SPMC before entering this function.
210  ******************************************************************************/
211 static uint64_t spmc_smc_return(uint32_t smc_fid,
212 				bool secure_origin,
213 				uint64_t x1,
214 				uint64_t x2,
215 				uint64_t x3,
216 				uint64_t x4,
217 				void *handle,
218 				void *cookie,
219 				uint64_t flags,
220 				uint16_t dst_id)
221 {
222 	/* If the destination is in the normal world always go via the SPMD. */
223 	if (ffa_is_normal_world_id(dst_id)) {
224 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
225 					cookie, handle, flags);
226 	}
227 	/*
228 	 * If the caller is secure and we want to return to the secure world,
229 	 * ERET directly.
230 	 */
231 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
232 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
233 	}
234 	/* If we originated in the normal world then switch contexts. */
235 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
236 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
237 					     x3, x4, handle, flags);
238 	} else {
239 		/* Unknown State. */
240 		panic();
241 	}
242 
243 	/* Shouldn't be Reached. */
244 	return 0;
245 }
246 
247 /*******************************************************************************
248  * FF-A ABI Handlers.
249  ******************************************************************************/
250 
251 /*******************************************************************************
252  * Helper function to validate arg2 as part of a direct message.
253  ******************************************************************************/
254 static inline bool direct_msg_validate_arg2(uint64_t x2)
255 {
256 	/* Check message type. */
257 	if (x2 & FFA_FWK_MSG_BIT) {
258 		/* We have a framework message, ensure it is a known message. */
259 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
260 			VERBOSE("Invalid message format 0x%lx.\n", x2);
261 			return false;
262 		}
263 	} else {
264 		/* We have a partition messages, ensure x2 is not set. */
265 		if (x2 != (uint64_t) 0) {
266 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
267 				x2);
268 			return false;
269 		}
270 	}
271 	return true;
272 }
273 
274 /*******************************************************************************
275  * Helper function to validate the destination ID of a direct response.
276  ******************************************************************************/
277 static bool direct_msg_validate_dst_id(uint16_t dst_id)
278 {
279 	struct secure_partition_desc *sp;
280 
281 	/* Check if we're targeting a normal world partition. */
282 	if (ffa_is_normal_world_id(dst_id)) {
283 		return true;
284 	}
285 
286 	/* Or directed to the SPMC itself.*/
287 	if (dst_id == FFA_SPMC_ID) {
288 		return true;
289 	}
290 
291 	/* Otherwise ensure the SP exists. */
292 	sp = spmc_get_sp_ctx(dst_id);
293 	if (sp != NULL) {
294 		return true;
295 	}
296 
297 	return false;
298 }
299 
300 /*******************************************************************************
301  * Helper function to validate the response from a Logical Partition.
302  ******************************************************************************/
303 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
304 					void *handle)
305 {
306 	/* Retrieve populated Direct Response Arguments. */
307 	uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0);
308 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
309 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
310 	uint16_t src_id = ffa_endpoint_source(x1);
311 	uint16_t dst_id = ffa_endpoint_destination(x1);
312 
313 	if (src_id != lp_id) {
314 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
315 		return false;
316 	}
317 
318 	/*
319 	 * Check the destination ID is valid and ensure the LP is responding to
320 	 * the original request.
321 	 */
322 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
323 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
324 		return false;
325 	}
326 
327 	if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) &&
328 			!direct_msg_validate_arg2(x2)) {
329 		ERROR("Invalid EL3 LP message encoding.\n");
330 		return false;
331 	}
332 	return true;
333 }
334 
335 /*******************************************************************************
336  * Helper function to check that partition can receive direct msg or not.
337  ******************************************************************************/
338 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum)
339 {
340 	if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ &&
341 			((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) ||
342 			(dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 &&
343 			((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) {
344 		return false;
345 	}
346 
347 	return true;
348 }
349 
350 /*******************************************************************************
351  * Handle direct request messages and route to the appropriate destination.
352  ******************************************************************************/
353 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
354 				       bool secure_origin,
355 				       uint64_t x1,
356 				       uint64_t x2,
357 				       uint64_t x3,
358 				       uint64_t x4,
359 				       void *cookie,
360 				       void *handle,
361 				       uint64_t flags)
362 {
363 	uint16_t src_id = ffa_endpoint_source(x1);
364 	uint16_t dst_id = ffa_endpoint_destination(x1);
365 	uint16_t dir_req_funcid;
366 	struct el3_lp_desc *el3_lp_descs;
367 	struct secure_partition_desc *sp;
368 	unsigned int idx;
369 
370 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ?
371 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
372 
373 	/*
374 	 * Sanity check for DIRECT_REQ:
375 	 * Check if arg2 has been populated correctly based on message type
376 	 */
377 	if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) &&
378 			!direct_msg_validate_arg2(x2)) {
379 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
380 	}
381 
382 	/* Validate Sender is either the current SP or from the normal world. */
383 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
384 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
385 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
386 		return spmc_ffa_error_return(handle,
387 					FFA_ERROR_INVALID_PARAMETER);
388 	}
389 
390 	el3_lp_descs = get_el3_lp_array();
391 
392 	/* Check if the request is destined for a Logical Partition. */
393 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
394 		if (el3_lp_descs[i].sp_id == dst_id) {
395 			if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) {
396 				return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
397 			}
398 
399 			uint64_t ret = el3_lp_descs[i].direct_req(
400 						smc_fid, secure_origin, x1, x2,
401 						x3, x4, cookie, handle, flags);
402 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
403 							 handle)) {
404 				panic();
405 			}
406 
407 			/* Message checks out. */
408 			return ret;
409 		}
410 	}
411 
412 	/*
413 	 * If the request was not targeted to a LSP and from the secure world
414 	 * then it is invalid since a SP cannot call into the Normal world and
415 	 * there is no other SP to call into. If there are other SPs in future
416 	 * then the partition runtime model would need to be validated as well.
417 	 */
418 	if (secure_origin) {
419 		VERBOSE("Direct request not supported to the Normal World.\n");
420 		return spmc_ffa_error_return(handle,
421 					     FFA_ERROR_INVALID_PARAMETER);
422 	}
423 
424 	/* Check if the SP ID is valid. */
425 	sp = spmc_get_sp_ctx(dst_id);
426 	if (sp == NULL) {
427 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
428 			dst_id);
429 		return spmc_ffa_error_return(handle,
430 					     FFA_ERROR_INVALID_PARAMETER);
431 	}
432 
433 	if (!direct_msg_receivable(sp->properties, dir_req_funcid)) {
434 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
435 	}
436 
437 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
438 	if (sp->runtime_el == S_EL0) {
439 		spin_lock(&sp->rt_state_lock);
440 	}
441 
442 	/*
443 	 * Check that the target execution context is in a waiting state before
444 	 * forwarding the direct request to it.
445 	 */
446 	idx = get_ec_index(sp);
447 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
448 		VERBOSE("SP context on core%u is not waiting (%u).\n",
449 			idx, sp->ec[idx].rt_model);
450 
451 		if (sp->runtime_el == S_EL0) {
452 			spin_unlock(&sp->rt_state_lock);
453 		}
454 
455 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
456 	}
457 
458 	/*
459 	 * Everything checks out so forward the request to the SP after updating
460 	 * its state and runtime model.
461 	 */
462 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
463 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
464 	sp->ec[idx].dir_req_origin_id = src_id;
465 	sp->ec[idx].dir_req_funcid = dir_req_funcid;
466 
467 	if (sp->runtime_el == S_EL0) {
468 		spin_unlock(&sp->rt_state_lock);
469 	}
470 
471 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
472 			       handle, cookie, flags, dst_id);
473 }
474 
475 /*******************************************************************************
476  * Handle direct response messages and route to the appropriate destination.
477  ******************************************************************************/
478 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
479 					bool secure_origin,
480 					uint64_t x1,
481 					uint64_t x2,
482 					uint64_t x3,
483 					uint64_t x4,
484 					void *cookie,
485 					void *handle,
486 					uint64_t flags)
487 {
488 	uint16_t dst_id = ffa_endpoint_destination(x1);
489 	uint16_t dir_req_funcid;
490 	struct secure_partition_desc *sp;
491 	unsigned int idx;
492 
493 	dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ?
494 		FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
495 
496 	/* Check if arg2 has been populated correctly based on message type. */
497 	if (!direct_msg_validate_arg2(x2)) {
498 		return spmc_ffa_error_return(handle,
499 					     FFA_ERROR_INVALID_PARAMETER);
500 	}
501 
502 	/* Check that the response did not originate from the Normal world. */
503 	if (!secure_origin) {
504 		VERBOSE("Direct Response not supported from Normal World.\n");
505 		return spmc_ffa_error_return(handle,
506 					     FFA_ERROR_INVALID_PARAMETER);
507 	}
508 
509 	/*
510 	 * Check that the response is either targeted to the Normal world or the
511 	 * SPMC e.g. a PM response.
512 	 */
513 	if (!direct_msg_validate_dst_id(dst_id)) {
514 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
515 			dst_id);
516 		return spmc_ffa_error_return(handle,
517 					     FFA_ERROR_INVALID_PARAMETER);
518 	}
519 
520 	/* Obtain the SP descriptor and update its runtime state. */
521 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
522 	if (sp == NULL) {
523 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
524 			dst_id);
525 		return spmc_ffa_error_return(handle,
526 					     FFA_ERROR_INVALID_PARAMETER);
527 	}
528 
529 	if (sp->runtime_el == S_EL0) {
530 		spin_lock(&sp->rt_state_lock);
531 	}
532 
533 	/* Sanity check state is being tracked correctly in the SPMC. */
534 	idx = get_ec_index(sp);
535 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
536 
537 	/* Ensure SP execution context was in the right runtime model. */
538 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
539 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
540 			idx, sp->ec[idx].rt_model);
541 		if (sp->runtime_el == S_EL0) {
542 			spin_unlock(&sp->rt_state_lock);
543 		}
544 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
545 	}
546 
547 	if (dir_req_funcid != sp->ec[idx].dir_req_funcid) {
548 		WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n",
549 		     sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx);
550 		if (sp->runtime_el == S_EL0) {
551 			spin_unlock(&sp->rt_state_lock);
552 		}
553 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
554 	}
555 
556 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
557 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
558 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
559 		if (sp->runtime_el == S_EL0) {
560 			spin_unlock(&sp->rt_state_lock);
561 		}
562 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
563 	}
564 
565 	/* Update the state of the SP execution context. */
566 	sp->ec[idx].rt_state = RT_STATE_WAITING;
567 
568 	/* Clear the ongoing direct request ID. */
569 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
570 
571 	/* Clear the ongoing direct request message version. */
572 	sp->ec[idx].dir_req_funcid = 0U;
573 
574 	if (sp->runtime_el == S_EL0) {
575 		spin_unlock(&sp->rt_state_lock);
576 	}
577 
578 	/*
579 	 * If the receiver is not the SPMC then forward the response to the
580 	 * Normal world.
581 	 */
582 	if (dst_id == FFA_SPMC_ID) {
583 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
584 		/* Should not get here. */
585 		panic();
586 	}
587 
588 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
589 			       handle, cookie, flags, dst_id);
590 }
591 
592 /*******************************************************************************
593  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
594  * cycles.
595  ******************************************************************************/
596 static uint64_t msg_wait_handler(uint32_t smc_fid,
597 				 bool secure_origin,
598 				 uint64_t x1,
599 				 uint64_t x2,
600 				 uint64_t x3,
601 				 uint64_t x4,
602 				 void *cookie,
603 				 void *handle,
604 				 uint64_t flags)
605 {
606 	struct secure_partition_desc *sp;
607 	unsigned int idx;
608 
609 	/*
610 	 * Check that the response did not originate from the Normal world as
611 	 * only the secure world can call this ABI.
612 	 */
613 	if (!secure_origin) {
614 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
615 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
616 	}
617 
618 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
619 	sp = spmc_get_current_sp_ctx();
620 	if (sp == NULL) {
621 		return spmc_ffa_error_return(handle,
622 					     FFA_ERROR_INVALID_PARAMETER);
623 	}
624 
625 	/*
626 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
627 	 */
628 	idx = get_ec_index(sp);
629 	if (sp->runtime_el == S_EL0) {
630 		spin_lock(&sp->rt_state_lock);
631 	}
632 
633 	/* Ensure SP execution context was in the right runtime model. */
634 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
635 		if (sp->runtime_el == S_EL0) {
636 			spin_unlock(&sp->rt_state_lock);
637 		}
638 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
639 	}
640 
641 	/* Sanity check the state is being tracked correctly in the SPMC. */
642 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
643 
644 	/*
645 	 * Perform a synchronous exit if the partition was initialising. The
646 	 * state is updated after the exit.
647 	 */
648 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
649 		if (sp->runtime_el == S_EL0) {
650 			spin_unlock(&sp->rt_state_lock);
651 		}
652 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
653 		/* Should not get here */
654 		panic();
655 	}
656 
657 	/* Update the state of the SP execution context. */
658 	sp->ec[idx].rt_state = RT_STATE_WAITING;
659 
660 	/* Resume normal world if a secure interrupt was handled. */
661 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
662 		if (sp->runtime_el == S_EL0) {
663 			spin_unlock(&sp->rt_state_lock);
664 		}
665 
666 		return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
667 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
668 					     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
669 					     handle, flags);
670 	}
671 
672 	/* Protect the runtime state of a S-EL0 SP with a lock. */
673 	if (sp->runtime_el == S_EL0) {
674 		spin_unlock(&sp->rt_state_lock);
675 	}
676 
677 	/* Forward the response to the Normal world. */
678 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
679 			       handle, cookie, flags, FFA_NWD_ID);
680 }
681 
682 static uint64_t ffa_error_handler(uint32_t smc_fid,
683 				 bool secure_origin,
684 				 uint64_t x1,
685 				 uint64_t x2,
686 				 uint64_t x3,
687 				 uint64_t x4,
688 				 void *cookie,
689 				 void *handle,
690 				 uint64_t flags)
691 {
692 	struct secure_partition_desc *sp;
693 	unsigned int idx;
694 	uint16_t dst_id = ffa_endpoint_destination(x1);
695 	bool cancel_dir_req = false;
696 
697 	/* Check that the response did not originate from the Normal world. */
698 	if (!secure_origin) {
699 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
700 	}
701 
702 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
703 	sp = spmc_get_current_sp_ctx();
704 	if (sp == NULL) {
705 		return spmc_ffa_error_return(handle,
706 					     FFA_ERROR_INVALID_PARAMETER);
707 	}
708 
709 	/* Get the execution context of the SP that invoked FFA_ERROR. */
710 	idx = get_ec_index(sp);
711 
712 	/*
713 	 * We only expect FFA_ERROR to be received during SP initialisation
714 	 * otherwise this is an invalid call.
715 	 */
716 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
717 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
718 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
719 		/* Should not get here. */
720 		panic();
721 	}
722 
723 	if (sp->runtime_el == S_EL0) {
724 		spin_lock(&sp->rt_state_lock);
725 	}
726 
727 	if (sp->ec[idx].rt_state == RT_STATE_RUNNING &&
728 			sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
729 		sp->ec[idx].rt_state = RT_STATE_WAITING;
730 		sp->ec[idx].dir_req_origin_id = INV_SP_ID;
731 		sp->ec[idx].dir_req_funcid = 0x00;
732 		cancel_dir_req = true;
733 	}
734 
735 	if (sp->runtime_el == S_EL0) {
736 		spin_unlock(&sp->rt_state_lock);
737 	}
738 
739 	if (cancel_dir_req) {
740 		if (dst_id == FFA_SPMC_ID) {
741 			spmc_sp_synchronous_exit(&sp->ec[idx], x4);
742 			/* Should not get here. */
743 			panic();
744 		} else
745 			return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
746 					       handle, cookie, flags, dst_id);
747 	}
748 
749 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
750 }
751 
752 static uint64_t ffa_version_handler(uint32_t smc_fid,
753 				    bool secure_origin,
754 				    uint64_t x1,
755 				    uint64_t x2,
756 				    uint64_t x3,
757 				    uint64_t x4,
758 				    void *cookie,
759 				    void *handle,
760 				    uint64_t flags)
761 {
762 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
763 
764 	if (requested_version & FFA_VERSION_BIT31_MASK) {
765 		/* Invalid encoding, return an error. */
766 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
767 		/* Execution stops here. */
768 	}
769 
770 	/* Determine the caller to store the requested version. */
771 	if (secure_origin) {
772 		/*
773 		 * Ensure that the SP is reporting the same version as
774 		 * specified in its manifest. If these do not match there is
775 		 * something wrong with the SP.
776 		 * TODO: Should we abort the SP? For now assert this is not
777 		 *       case.
778 		 */
779 		assert(requested_version ==
780 		       spmc_get_current_sp_ctx()->ffa_version);
781 	} else {
782 		/*
783 		 * If this is called by the normal world, record this
784 		 * information in its descriptor.
785 		 */
786 		spmc_get_hyp_ctx()->ffa_version = requested_version;
787 	}
788 
789 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
790 					  FFA_VERSION_MINOR));
791 }
792 
793 /*******************************************************************************
794  * Helper function to obtain the FF-A version of the calling partition.
795  ******************************************************************************/
796 uint32_t get_partition_ffa_version(bool secure_origin)
797 {
798 	if (secure_origin) {
799 		return spmc_get_current_sp_ctx()->ffa_version;
800 	} else {
801 		return spmc_get_hyp_ctx()->ffa_version;
802 	}
803 }
804 
805 static uint64_t rxtx_map_handler(uint32_t smc_fid,
806 				 bool secure_origin,
807 				 uint64_t x1,
808 				 uint64_t x2,
809 				 uint64_t x3,
810 				 uint64_t x4,
811 				 void *cookie,
812 				 void *handle,
813 				 uint64_t flags)
814 {
815 	int ret;
816 	uint32_t error_code;
817 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
818 	struct mailbox *mbox;
819 	uintptr_t tx_address = x1;
820 	uintptr_t rx_address = x2;
821 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
822 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
823 
824 	/*
825 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
826 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
827 	 * ABI on behalf of a VM and reject it if this is the case.
828 	 */
829 	if (tx_address == 0 || rx_address == 0) {
830 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
831 		return spmc_ffa_error_return(handle,
832 					     FFA_ERROR_INVALID_PARAMETER);
833 	}
834 
835 	/* Ensure the specified buffers are not the same. */
836 	if (tx_address == rx_address) {
837 		WARN("TX Buffer must not be the same as RX Buffer.\n");
838 		return spmc_ffa_error_return(handle,
839 					     FFA_ERROR_INVALID_PARAMETER);
840 	}
841 
842 	/* Ensure the buffer size is not 0. */
843 	if (buf_size == 0U) {
844 		WARN("Buffer size must not be 0\n");
845 		return spmc_ffa_error_return(handle,
846 					     FFA_ERROR_INVALID_PARAMETER);
847 	}
848 
849 	/*
850 	 * Ensure the buffer size is a multiple of the translation granule size
851 	 * in TF-A.
852 	 */
853 	if (buf_size % PAGE_SIZE != 0U) {
854 		WARN("Buffer size must be aligned to translation granule.\n");
855 		return spmc_ffa_error_return(handle,
856 					     FFA_ERROR_INVALID_PARAMETER);
857 	}
858 
859 	/* Obtain the RX/TX buffer pair descriptor. */
860 	mbox = spmc_get_mbox_desc(secure_origin);
861 
862 	spin_lock(&mbox->lock);
863 
864 	/* Check if buffers have already been mapped. */
865 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
866 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
867 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
868 		error_code = FFA_ERROR_DENIED;
869 		goto err;
870 	}
871 
872 	/* memmap the TX buffer as read only. */
873 	ret = mmap_add_dynamic_region(tx_address, /* PA */
874 			tx_address, /* VA */
875 			buf_size, /* size */
876 			mem_atts | MT_RO_DATA); /* attrs */
877 	if (ret != 0) {
878 		/* Return the correct error code. */
879 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
880 						FFA_ERROR_INVALID_PARAMETER;
881 		WARN("Unable to map TX buffer: %d\n", error_code);
882 		goto err;
883 	}
884 
885 	/* memmap the RX buffer as read write. */
886 	ret = mmap_add_dynamic_region(rx_address, /* PA */
887 			rx_address, /* VA */
888 			buf_size, /* size */
889 			mem_atts | MT_RW_DATA); /* attrs */
890 
891 	if (ret != 0) {
892 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
893 						FFA_ERROR_INVALID_PARAMETER;
894 		WARN("Unable to map RX buffer: %d\n", error_code);
895 		/* Unmap the TX buffer again. */
896 		mmap_remove_dynamic_region(tx_address, buf_size);
897 		goto err;
898 	}
899 
900 	mbox->tx_buffer = (void *) tx_address;
901 	mbox->rx_buffer = (void *) rx_address;
902 	mbox->rxtx_page_count = page_count;
903 	spin_unlock(&mbox->lock);
904 
905 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
906 	/* Execution stops here. */
907 err:
908 	spin_unlock(&mbox->lock);
909 	return spmc_ffa_error_return(handle, error_code);
910 }
911 
912 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
913 				   bool secure_origin,
914 				   uint64_t x1,
915 				   uint64_t x2,
916 				   uint64_t x3,
917 				   uint64_t x4,
918 				   void *cookie,
919 				   void *handle,
920 				   uint64_t flags)
921 {
922 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
923 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
924 
925 	/*
926 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
927 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
928 	 * ABI on behalf of a VM and reject it if this is the case.
929 	 */
930 	if (x1 != 0UL) {
931 		return spmc_ffa_error_return(handle,
932 					     FFA_ERROR_INVALID_PARAMETER);
933 	}
934 
935 	spin_lock(&mbox->lock);
936 
937 	/* Check if buffers are currently mapped. */
938 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
939 		spin_unlock(&mbox->lock);
940 		return spmc_ffa_error_return(handle,
941 					     FFA_ERROR_INVALID_PARAMETER);
942 	}
943 
944 	/* Unmap RX Buffer */
945 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
946 				       buf_size) != 0) {
947 		WARN("Unable to unmap RX buffer!\n");
948 	}
949 
950 	mbox->rx_buffer = 0;
951 
952 	/* Unmap TX Buffer */
953 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
954 				       buf_size) != 0) {
955 		WARN("Unable to unmap TX buffer!\n");
956 	}
957 
958 	mbox->tx_buffer = 0;
959 	mbox->rxtx_page_count = 0;
960 
961 	spin_unlock(&mbox->lock);
962 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
963 }
964 
965 /*
966  * Helper function to populate the properties field of a Partition Info Get
967  * descriptor.
968  */
969 static uint32_t
970 partition_info_get_populate_properties(uint32_t sp_properties,
971 				       enum sp_execution_state sp_ec_state)
972 {
973 	uint32_t properties = sp_properties;
974 	uint32_t ec_state;
975 
976 	/* Determine the execution state of the SP. */
977 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
978 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
979 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
980 
981 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
982 
983 	return properties;
984 }
985 
986 /*
987  * Collate the partition information in a v1.1 partition information
988  * descriptor format, this will be converter later if required.
989  */
990 static int partition_info_get_handler_v1_1(uint32_t *uuid,
991 					   struct ffa_partition_info_v1_1
992 						  *partitions,
993 					   uint32_t max_partitions,
994 					   uint32_t *partition_count)
995 {
996 	uint32_t index;
997 	struct ffa_partition_info_v1_1 *desc;
998 	bool null_uuid = is_null_uuid(uuid);
999 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1000 
1001 	/* Deal with Logical Partitions. */
1002 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1003 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
1004 			/* Found a matching UUID, populate appropriately. */
1005 			if (*partition_count >= max_partitions) {
1006 				return FFA_ERROR_NO_MEMORY;
1007 			}
1008 
1009 			desc = &partitions[*partition_count];
1010 			desc->ep_id = el3_lp_descs[index].sp_id;
1011 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1012 			/* LSPs must be AArch64. */
1013 			desc->properties =
1014 				partition_info_get_populate_properties(
1015 					el3_lp_descs[index].properties,
1016 					SP_STATE_AARCH64);
1017 
1018 			if (null_uuid) {
1019 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
1020 			}
1021 			(*partition_count)++;
1022 		}
1023 	}
1024 
1025 	/* Deal with physical SP's. */
1026 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1027 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1028 			/* Found a matching UUID, populate appropriately. */
1029 			if (*partition_count >= max_partitions) {
1030 				return FFA_ERROR_NO_MEMORY;
1031 			}
1032 
1033 			desc = &partitions[*partition_count];
1034 			desc->ep_id = sp_desc[index].sp_id;
1035 			/*
1036 			 * Execution context count must match No. cores for
1037 			 * S-EL1 SPs.
1038 			 */
1039 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1040 			desc->properties =
1041 				partition_info_get_populate_properties(
1042 					sp_desc[index].properties,
1043 					sp_desc[index].execution_state);
1044 
1045 			if (null_uuid) {
1046 				copy_uuid(desc->uuid, sp_desc[index].uuid);
1047 			}
1048 			(*partition_count)++;
1049 		}
1050 	}
1051 	return 0;
1052 }
1053 
1054 /*
1055  * Handle the case where that caller only wants the count of partitions
1056  * matching a given UUID and does not want the corresponding descriptors
1057  * populated.
1058  */
1059 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
1060 {
1061 	uint32_t index = 0;
1062 	uint32_t partition_count = 0;
1063 	bool null_uuid = is_null_uuid(uuid);
1064 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1065 
1066 	/* Deal with Logical Partitions. */
1067 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1068 		if (null_uuid ||
1069 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
1070 			(partition_count)++;
1071 		}
1072 	}
1073 
1074 	/* Deal with physical SP's. */
1075 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1076 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1077 			(partition_count)++;
1078 		}
1079 	}
1080 	return partition_count;
1081 }
1082 
1083 /*
1084  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1085  * the corresponding descriptor format from the v1.1 descriptor array.
1086  */
1087 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1088 					     *partitions,
1089 					     struct mailbox *mbox,
1090 					     int partition_count)
1091 {
1092 	uint32_t index;
1093 	uint32_t buf_size;
1094 	uint32_t descriptor_size;
1095 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1096 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1097 
1098 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1099 	descriptor_size = partition_count *
1100 			  sizeof(struct ffa_partition_info_v1_0);
1101 
1102 	if (descriptor_size > buf_size) {
1103 		return FFA_ERROR_NO_MEMORY;
1104 	}
1105 
1106 	for (index = 0U; index < partition_count; index++) {
1107 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1108 		v1_0_partitions[index].execution_ctx_count =
1109 			partitions[index].execution_ctx_count;
1110 		/* Only report v1.0 properties. */
1111 		v1_0_partitions[index].properties =
1112 			(partitions[index].properties &
1113 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1114 	}
1115 	return 0;
1116 }
1117 
1118 /*
1119  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1120  * v1.0 implementations.
1121  */
1122 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1123 					   bool secure_origin,
1124 					   uint64_t x1,
1125 					   uint64_t x2,
1126 					   uint64_t x3,
1127 					   uint64_t x4,
1128 					   void *cookie,
1129 					   void *handle,
1130 					   uint64_t flags)
1131 {
1132 	int ret;
1133 	uint32_t partition_count = 0;
1134 	uint32_t size = 0;
1135 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1136 	struct mailbox *mbox;
1137 	uint64_t info_get_flags;
1138 	bool count_only;
1139 	uint32_t uuid[4];
1140 
1141 	uuid[0] = x1;
1142 	uuid[1] = x2;
1143 	uuid[2] = x3;
1144 	uuid[3] = x4;
1145 
1146 	/* Determine if the Partition descriptors should be populated. */
1147 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1148 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1149 
1150 	/* Handle the case where we don't need to populate the descriptors. */
1151 	if (count_only) {
1152 		partition_count = partition_info_get_handler_count_only(uuid);
1153 		if (partition_count == 0) {
1154 			return spmc_ffa_error_return(handle,
1155 						FFA_ERROR_INVALID_PARAMETER);
1156 		}
1157 	} else {
1158 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1159 
1160 		/*
1161 		 * Handle the case where the partition descriptors are required,
1162 		 * check we have the buffers available and populate the
1163 		 * appropriate structure version.
1164 		 */
1165 
1166 		/* Obtain the v1.1 format of the descriptors. */
1167 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1168 						      MAX_SP_LP_PARTITIONS,
1169 						      &partition_count);
1170 
1171 		/* Check if an error occurred during discovery. */
1172 		if (ret != 0) {
1173 			goto err;
1174 		}
1175 
1176 		/* If we didn't find any matches the UUID is unknown. */
1177 		if (partition_count == 0) {
1178 			ret = FFA_ERROR_INVALID_PARAMETER;
1179 			goto err;
1180 		}
1181 
1182 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1183 		mbox = spmc_get_mbox_desc(secure_origin);
1184 
1185 		/*
1186 		 * If the caller has not bothered registering its RX/TX pair
1187 		 * then return an error code.
1188 		 */
1189 		spin_lock(&mbox->lock);
1190 		if (mbox->rx_buffer == NULL) {
1191 			ret = FFA_ERROR_BUSY;
1192 			goto err_unlock;
1193 		}
1194 
1195 		/* Ensure the RX buffer is currently free. */
1196 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1197 			ret = FFA_ERROR_BUSY;
1198 			goto err_unlock;
1199 		}
1200 
1201 		/* Zero the RX buffer before populating. */
1202 		(void)memset(mbox->rx_buffer, 0,
1203 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1204 
1205 		/*
1206 		 * Depending on the FF-A version of the requesting partition
1207 		 * we may need to convert to a v1.0 format otherwise we can copy
1208 		 * directly.
1209 		 */
1210 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1211 			ret = partition_info_populate_v1_0(partitions,
1212 							   mbox,
1213 							   partition_count);
1214 			if (ret != 0) {
1215 				goto err_unlock;
1216 			}
1217 		} else {
1218 			uint32_t buf_size = mbox->rxtx_page_count *
1219 					    FFA_PAGE_SIZE;
1220 
1221 			/* Ensure the descriptor will fit in the buffer. */
1222 			size = sizeof(struct ffa_partition_info_v1_1);
1223 			if (partition_count * size  > buf_size) {
1224 				ret = FFA_ERROR_NO_MEMORY;
1225 				goto err_unlock;
1226 			}
1227 			memcpy(mbox->rx_buffer, partitions,
1228 			       partition_count * size);
1229 		}
1230 
1231 		mbox->state = MAILBOX_STATE_FULL;
1232 		spin_unlock(&mbox->lock);
1233 	}
1234 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1235 
1236 err_unlock:
1237 	spin_unlock(&mbox->lock);
1238 err:
1239 	return spmc_ffa_error_return(handle, ret);
1240 }
1241 
1242 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1243 {
1244 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1245 }
1246 
1247 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1248 					      uint32_t input_properties,
1249 					      void *handle)
1250 {
1251 	/*
1252 	 * If we're called by the normal world we don't support any
1253 	 * additional features.
1254 	 */
1255 	if (!secure_origin) {
1256 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1257 			return spmc_ffa_error_return(handle,
1258 						     FFA_ERROR_NOT_SUPPORTED);
1259 		}
1260 
1261 	} else {
1262 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1263 		/*
1264 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1265 		 * call. If v1.0 check and store whether the SP has requested
1266 		 * the use of the NS bit.
1267 		 */
1268 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1269 			if ((input_properties &
1270 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1271 				return spmc_ffa_error_return(handle,
1272 						       FFA_ERROR_NOT_SUPPORTED);
1273 			}
1274 			return ffa_feature_success(handle,
1275 						   FFA_FEATURES_RET_REQ_NS_BIT);
1276 		} else {
1277 			sp->ns_bit_requested = (input_properties &
1278 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1279 					       0U;
1280 		}
1281 		if (sp->ns_bit_requested) {
1282 			return ffa_feature_success(handle,
1283 						   FFA_FEATURES_RET_REQ_NS_BIT);
1284 		}
1285 	}
1286 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1287 }
1288 
1289 static uint64_t ffa_features_handler(uint32_t smc_fid,
1290 				     bool secure_origin,
1291 				     uint64_t x1,
1292 				     uint64_t x2,
1293 				     uint64_t x3,
1294 				     uint64_t x4,
1295 				     void *cookie,
1296 				     void *handle,
1297 				     uint64_t flags)
1298 {
1299 	uint32_t function_id = (uint32_t) x1;
1300 	uint32_t input_properties = (uint32_t) x2;
1301 
1302 	/* Check if a Feature ID was requested. */
1303 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1304 		/* We currently don't support any additional features. */
1305 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1306 	}
1307 
1308 	/*
1309 	 * Handle the cases where we have separate handlers due to additional
1310 	 * properties.
1311 	 */
1312 	switch (function_id) {
1313 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1314 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1315 		return ffa_features_retrieve_request(secure_origin,
1316 						     input_properties,
1317 						     handle);
1318 	}
1319 
1320 	/*
1321 	 * We don't currently support additional input properties for these
1322 	 * other ABIs therefore ensure this value is set to 0.
1323 	 */
1324 	if (input_properties != 0U) {
1325 		return spmc_ffa_error_return(handle,
1326 					     FFA_ERROR_NOT_SUPPORTED);
1327 	}
1328 
1329 	/* Report if any other FF-A ABI is supported. */
1330 	switch (function_id) {
1331 	/* Supported features from both worlds. */
1332 	case FFA_ERROR:
1333 	case FFA_SUCCESS_SMC32:
1334 	case FFA_INTERRUPT:
1335 	case FFA_SPM_ID_GET:
1336 	case FFA_ID_GET:
1337 	case FFA_FEATURES:
1338 	case FFA_VERSION:
1339 	case FFA_RX_RELEASE:
1340 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1341 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1342 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1343 	case FFA_PARTITION_INFO_GET:
1344 	case FFA_RXTX_MAP_SMC32:
1345 	case FFA_RXTX_MAP_SMC64:
1346 	case FFA_RXTX_UNMAP:
1347 	case FFA_MEM_FRAG_TX:
1348 	case FFA_MSG_RUN:
1349 
1350 		/*
1351 		 * We are relying on the fact that the other registers
1352 		 * will be set to 0 as these values align with the
1353 		 * currently implemented features of the SPMC. If this
1354 		 * changes this function must be extended to handle
1355 		 * reporting the additional functionality.
1356 		 */
1357 
1358 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1359 		/* Execution stops here. */
1360 
1361 	/* Supported ABIs only from the secure world. */
1362 	case FFA_MEM_PERM_GET_SMC32:
1363 	case FFA_MEM_PERM_GET_SMC64:
1364 	case FFA_MEM_PERM_SET_SMC32:
1365 	case FFA_MEM_PERM_SET_SMC64:
1366 	/* these ABIs are only supported from S-EL0 SPs */
1367 	#if !(SPMC_AT_EL3_SEL0_SP)
1368 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1369 	#endif
1370 	/* fall through */
1371 
1372 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1373 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1374 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1375 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1376 	case FFA_MEM_RELINQUISH:
1377 	case FFA_MSG_WAIT:
1378 	case FFA_CONSOLE_LOG_SMC32:
1379 	case FFA_CONSOLE_LOG_SMC64:
1380 		if (!secure_origin) {
1381 			return spmc_ffa_error_return(handle,
1382 				FFA_ERROR_NOT_SUPPORTED);
1383 		}
1384 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1385 		/* Execution stops here. */
1386 
1387 	/* Supported features only from the normal world. */
1388 	case FFA_MEM_SHARE_SMC32:
1389 	case FFA_MEM_SHARE_SMC64:
1390 	case FFA_MEM_LEND_SMC32:
1391 	case FFA_MEM_LEND_SMC64:
1392 	case FFA_MEM_RECLAIM:
1393 	case FFA_MEM_FRAG_RX:
1394 
1395 		if (secure_origin) {
1396 			return spmc_ffa_error_return(handle,
1397 					FFA_ERROR_NOT_SUPPORTED);
1398 		}
1399 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1400 		/* Execution stops here. */
1401 
1402 	default:
1403 		return spmc_ffa_error_return(handle,
1404 					FFA_ERROR_NOT_SUPPORTED);
1405 	}
1406 }
1407 
1408 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1409 				   bool secure_origin,
1410 				   uint64_t x1,
1411 				   uint64_t x2,
1412 				   uint64_t x3,
1413 				   uint64_t x4,
1414 				   void *cookie,
1415 				   void *handle,
1416 				   uint64_t flags)
1417 {
1418 	if (secure_origin) {
1419 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1420 			 spmc_get_current_sp_ctx()->sp_id);
1421 	} else {
1422 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1423 			 spmc_get_hyp_ctx()->ns_ep_id);
1424 	}
1425 }
1426 
1427 /*
1428  * Enable an SP to query the ID assigned to the SPMC.
1429  */
1430 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1431 				       bool secure_origin,
1432 				       uint64_t x1,
1433 				       uint64_t x2,
1434 				       uint64_t x3,
1435 				       uint64_t x4,
1436 				       void *cookie,
1437 				       void *handle,
1438 				       uint64_t flags)
1439 {
1440 	assert(x1 == 0UL);
1441 	assert(x2 == 0UL);
1442 	assert(x3 == 0UL);
1443 	assert(x4 == 0UL);
1444 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1445 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1446 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1447 
1448 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1449 }
1450 
1451 static uint64_t ffa_run_handler(uint32_t smc_fid,
1452 				bool secure_origin,
1453 				uint64_t x1,
1454 				uint64_t x2,
1455 				uint64_t x3,
1456 				uint64_t x4,
1457 				void *cookie,
1458 				void *handle,
1459 				uint64_t flags)
1460 {
1461 	struct secure_partition_desc *sp;
1462 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1463 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1464 	unsigned int idx;
1465 	unsigned int *rt_state;
1466 	unsigned int *rt_model;
1467 
1468 	/* Can only be called from the normal world. */
1469 	if (secure_origin) {
1470 		ERROR("FFA_RUN can only be called from NWd.\n");
1471 		return spmc_ffa_error_return(handle,
1472 					     FFA_ERROR_INVALID_PARAMETER);
1473 	}
1474 
1475 	/* Cannot run a Normal world partition. */
1476 	if (ffa_is_normal_world_id(target_id)) {
1477 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1478 		return spmc_ffa_error_return(handle,
1479 					     FFA_ERROR_INVALID_PARAMETER);
1480 	}
1481 
1482 	/* Check that the target SP exists. */
1483 	sp = spmc_get_sp_ctx(target_id);
1484 	if (sp == NULL) {
1485 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1486 		return spmc_ffa_error_return(handle,
1487 					     FFA_ERROR_INVALID_PARAMETER);
1488 	}
1489 
1490 	idx = get_ec_index(sp);
1491 
1492 	if (idx != vcpu_id) {
1493 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1494 		return spmc_ffa_error_return(handle,
1495 					     FFA_ERROR_INVALID_PARAMETER);
1496 	}
1497 	if (sp->runtime_el == S_EL0) {
1498 		spin_lock(&sp->rt_state_lock);
1499 	}
1500 	rt_state = &((sp->ec[idx]).rt_state);
1501 	rt_model = &((sp->ec[idx]).rt_model);
1502 	if (*rt_state == RT_STATE_RUNNING) {
1503 		if (sp->runtime_el == S_EL0) {
1504 			spin_unlock(&sp->rt_state_lock);
1505 		}
1506 		ERROR("Partition (0x%x) is already running.\n", target_id);
1507 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1508 	}
1509 
1510 	/*
1511 	 * Sanity check that if the execution context was not waiting then it
1512 	 * was either in the direct request or the run partition runtime model.
1513 	 */
1514 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1515 		assert(*rt_model == RT_MODEL_RUN ||
1516 		       *rt_model == RT_MODEL_DIR_REQ);
1517 	}
1518 
1519 	/*
1520 	 * If the context was waiting then update the partition runtime model.
1521 	 */
1522 	if (*rt_state == RT_STATE_WAITING) {
1523 		*rt_model = RT_MODEL_RUN;
1524 	}
1525 
1526 	/*
1527 	 * Forward the request to the correct SP vCPU after updating
1528 	 * its state.
1529 	 */
1530 	*rt_state = RT_STATE_RUNNING;
1531 
1532 	if (sp->runtime_el == S_EL0) {
1533 		spin_unlock(&sp->rt_state_lock);
1534 	}
1535 
1536 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1537 			       handle, cookie, flags, target_id);
1538 }
1539 
1540 static uint64_t rx_release_handler(uint32_t smc_fid,
1541 				   bool secure_origin,
1542 				   uint64_t x1,
1543 				   uint64_t x2,
1544 				   uint64_t x3,
1545 				   uint64_t x4,
1546 				   void *cookie,
1547 				   void *handle,
1548 				   uint64_t flags)
1549 {
1550 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1551 
1552 	spin_lock(&mbox->lock);
1553 
1554 	if (mbox->state != MAILBOX_STATE_FULL) {
1555 		spin_unlock(&mbox->lock);
1556 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1557 	}
1558 
1559 	mbox->state = MAILBOX_STATE_EMPTY;
1560 	spin_unlock(&mbox->lock);
1561 
1562 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1563 }
1564 
1565 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1566 				     bool secure_origin,
1567 				     uint64_t x1,
1568 				     uint64_t x2,
1569 				     uint64_t x3,
1570 				     uint64_t x4,
1571 				     void *cookie,
1572 				     void *handle,
1573 				     uint64_t flags)
1574 {
1575 	/* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1576 	char chars[48] = {0};
1577 	size_t chars_max;
1578 	size_t chars_count = x1;
1579 
1580 	/* Does not support request from Nwd. */
1581 	if (!secure_origin) {
1582 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1583 	}
1584 
1585 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1586 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1587 		uint32_t *registers = (uint32_t *)chars;
1588 		registers[0] = (uint32_t)x2;
1589 		registers[1] = (uint32_t)x3;
1590 		registers[2] = (uint32_t)x4;
1591 		registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1592 		registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1593 		registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1594 		chars_max = 6 * sizeof(uint32_t);
1595 	} else {
1596 		uint64_t *registers = (uint64_t *)chars;
1597 		registers[0] = x2;
1598 		registers[1] = x3;
1599 		registers[2] = x4;
1600 		registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1601 		registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1602 		registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1603 		chars_max = 6 * sizeof(uint64_t);
1604 	}
1605 
1606 	if ((chars_count == 0) || (chars_count > chars_max)) {
1607 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1608 	}
1609 
1610 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1611 		putchar(chars[i]);
1612 	}
1613 
1614 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1615 }
1616 
1617 /*
1618  * Perform initial validation on the provided secondary entry point.
1619  * For now ensure it does not lie within the BL31 Image or the SP's
1620  * RX/TX buffers as these are mapped within EL3.
1621  * TODO: perform validation for additional invalid memory regions.
1622  */
1623 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1624 {
1625 	struct mailbox *mb;
1626 	uintptr_t buffer_size;
1627 	uintptr_t sp_rx_buffer;
1628 	uintptr_t sp_tx_buffer;
1629 	uintptr_t sp_rx_buffer_limit;
1630 	uintptr_t sp_tx_buffer_limit;
1631 
1632 	mb = &sp->mailbox;
1633 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1634 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1635 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1636 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1637 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1638 
1639 	/*
1640 	 * Check if the entry point lies within BL31, or the
1641 	 * SP's RX or TX buffer.
1642 	 */
1643 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1644 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1645 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1646 		return -EINVAL;
1647 	}
1648 	return 0;
1649 }
1650 
1651 /*******************************************************************************
1652  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1653  *  register an entry point for initialization during a secondary cold boot.
1654  ******************************************************************************/
1655 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1656 					    bool secure_origin,
1657 					    uint64_t x1,
1658 					    uint64_t x2,
1659 					    uint64_t x3,
1660 					    uint64_t x4,
1661 					    void *cookie,
1662 					    void *handle,
1663 					    uint64_t flags)
1664 {
1665 	struct secure_partition_desc *sp;
1666 	struct sp_exec_ctx *sp_ctx;
1667 
1668 	/* This request cannot originate from the Normal world. */
1669 	if (!secure_origin) {
1670 		WARN("%s: Can only be called from SWd.\n", __func__);
1671 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1672 	}
1673 
1674 	/* Get the context of the current SP. */
1675 	sp = spmc_get_current_sp_ctx();
1676 	if (sp == NULL) {
1677 		WARN("%s: Cannot find SP context.\n", __func__);
1678 		return spmc_ffa_error_return(handle,
1679 					     FFA_ERROR_INVALID_PARAMETER);
1680 	}
1681 
1682 	/* Only an S-EL1 SP should be invoking this ABI. */
1683 	if (sp->runtime_el != S_EL1) {
1684 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1685 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1686 	}
1687 
1688 	/* Ensure the SP is in its initialization state. */
1689 	sp_ctx = spmc_get_sp_ec(sp);
1690 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1691 		WARN("%s: Can only be called during SP initialization.\n",
1692 		     __func__);
1693 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1694 	}
1695 
1696 	/* Perform initial validation of the secondary entry point. */
1697 	if (validate_secondary_ep(x1, sp)) {
1698 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1699 		     __func__, x1);
1700 		return spmc_ffa_error_return(handle,
1701 					     FFA_ERROR_INVALID_PARAMETER);
1702 	}
1703 
1704 	/*
1705 	 * Update the secondary entrypoint in SP context.
1706 	 * We don't need a lock here as during partition initialization there
1707 	 * will only be a single core online.
1708 	 */
1709 	sp->secondary_ep = x1;
1710 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1711 
1712 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1713 }
1714 
1715 /*******************************************************************************
1716  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1717  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1718  * function converts a permission value from the FF-A format to the mmap_attr_t
1719  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1720  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1721  * ignored by the function xlat_change_mem_attributes_ctx().
1722  ******************************************************************************/
1723 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1724 {
1725 	unsigned int tf_attr = 0U;
1726 	unsigned int access;
1727 
1728 	/* Deal with data access permissions first. */
1729 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1730 
1731 	switch (access) {
1732 	case FFA_MEM_PERM_DATA_RW:
1733 		/* Return 0 if the execute is set with RW. */
1734 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1735 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1736 		}
1737 		break;
1738 
1739 	case FFA_MEM_PERM_DATA_RO:
1740 		tf_attr |= MT_RO | MT_USER;
1741 		/* Deal with the instruction access permissions next. */
1742 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1743 			tf_attr |= MT_EXECUTE;
1744 		} else {
1745 			tf_attr |= MT_EXECUTE_NEVER;
1746 		}
1747 		break;
1748 
1749 	case FFA_MEM_PERM_DATA_NA:
1750 	default:
1751 		return tf_attr;
1752 	}
1753 
1754 	return tf_attr;
1755 }
1756 
1757 /*******************************************************************************
1758  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1759  ******************************************************************************/
1760 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1761 					 bool secure_origin,
1762 					 uint64_t x1,
1763 					 uint64_t x2,
1764 					 uint64_t x3,
1765 					 uint64_t x4,
1766 					 void *cookie,
1767 					 void *handle,
1768 					 uint64_t flags)
1769 {
1770 	struct secure_partition_desc *sp;
1771 	unsigned int idx;
1772 	uintptr_t base_va = (uintptr_t) x1;
1773 	size_t size = (size_t)(x2 * PAGE_SIZE);
1774 	uint32_t tf_attr;
1775 	int ret;
1776 
1777 	/* This request cannot originate from the Normal world. */
1778 	if (!secure_origin) {
1779 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1780 	}
1781 
1782 	if (size == 0) {
1783 		return spmc_ffa_error_return(handle,
1784 					     FFA_ERROR_INVALID_PARAMETER);
1785 	}
1786 
1787 	/* Get the context of the current SP. */
1788 	sp = spmc_get_current_sp_ctx();
1789 	if (sp == NULL) {
1790 		return spmc_ffa_error_return(handle,
1791 					     FFA_ERROR_INVALID_PARAMETER);
1792 	}
1793 
1794 	/* A S-EL1 SP has no business invoking this ABI. */
1795 	if (sp->runtime_el == S_EL1) {
1796 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1797 	}
1798 
1799 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1800 		return spmc_ffa_error_return(handle,
1801 					     FFA_ERROR_INVALID_PARAMETER);
1802 	}
1803 
1804 	/* Get the execution context of the calling SP. */
1805 	idx = get_ec_index(sp);
1806 
1807 	/*
1808 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1809 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1810 	 * and can only be initialising on this cpu.
1811 	 */
1812 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1813 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1814 	}
1815 
1816 	VERBOSE("Setting memory permissions:\n");
1817 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1818 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1819 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1820 
1821 	/* Convert inbound permissions to TF-A permission attributes */
1822 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1823 	if (tf_attr == 0U) {
1824 		return spmc_ffa_error_return(handle,
1825 					     FFA_ERROR_INVALID_PARAMETER);
1826 	}
1827 
1828 	/* Request the change in permissions */
1829 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1830 					     base_va, size, tf_attr);
1831 	if (ret != 0) {
1832 		return spmc_ffa_error_return(handle,
1833 					     FFA_ERROR_INVALID_PARAMETER);
1834 	}
1835 
1836 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1837 }
1838 
1839 /*******************************************************************************
1840  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1841  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1842  * function converts a permission value from the mmap_attr_t format to the FF-A
1843  * format.
1844  ******************************************************************************/
1845 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1846 {
1847 	unsigned int perms = 0U;
1848 	unsigned int data_access;
1849 
1850 	if ((attr & MT_USER) == 0) {
1851 		/* No access from EL0. */
1852 		data_access = FFA_MEM_PERM_DATA_NA;
1853 	} else {
1854 		if ((attr & MT_RW) != 0) {
1855 			data_access = FFA_MEM_PERM_DATA_RW;
1856 		} else {
1857 			data_access = FFA_MEM_PERM_DATA_RO;
1858 		}
1859 	}
1860 
1861 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1862 		<< FFA_MEM_PERM_DATA_SHIFT;
1863 
1864 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1865 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1866 	}
1867 
1868 	return perms;
1869 }
1870 
1871 /*******************************************************************************
1872  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1873  ******************************************************************************/
1874 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1875 					 bool secure_origin,
1876 					 uint64_t x1,
1877 					 uint64_t x2,
1878 					 uint64_t x3,
1879 					 uint64_t x4,
1880 					 void *cookie,
1881 					 void *handle,
1882 					 uint64_t flags)
1883 {
1884 	struct secure_partition_desc *sp;
1885 	unsigned int idx;
1886 	uintptr_t base_va = (uintptr_t)x1;
1887 	uint32_t tf_attr = 0;
1888 	int ret;
1889 
1890 	/* This request cannot originate from the Normal world. */
1891 	if (!secure_origin) {
1892 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1893 	}
1894 
1895 	/* Get the context of the current SP. */
1896 	sp = spmc_get_current_sp_ctx();
1897 	if (sp == NULL) {
1898 		return spmc_ffa_error_return(handle,
1899 					     FFA_ERROR_INVALID_PARAMETER);
1900 	}
1901 
1902 	/* A S-EL1 SP has no business invoking this ABI. */
1903 	if (sp->runtime_el == S_EL1) {
1904 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1905 	}
1906 
1907 	/* Get the execution context of the calling SP. */
1908 	idx = get_ec_index(sp);
1909 
1910 	/*
1911 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1912 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1913 	 * and can only be initialising on this cpu.
1914 	 */
1915 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1916 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1917 	}
1918 
1919 	/* Request the permissions */
1920 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1921 	if (ret != 0) {
1922 		return spmc_ffa_error_return(handle,
1923 					     FFA_ERROR_INVALID_PARAMETER);
1924 	}
1925 
1926 	/* Convert TF-A permission to FF-A permissions attributes. */
1927 	x2 = mmap_perm_to_ffa_perm(tf_attr);
1928 
1929 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1930 }
1931 
1932 /*******************************************************************************
1933  * This function will parse the Secure Partition Manifest. From manifest, it
1934  * will fetch details for preparing Secure partition image context and secure
1935  * partition image boot arguments if any.
1936  ******************************************************************************/
1937 static int sp_manifest_parse(void *sp_manifest, int offset,
1938 			     struct secure_partition_desc *sp,
1939 			     entry_point_info_t *ep_info,
1940 			     int32_t *boot_info_reg)
1941 {
1942 	int32_t ret, node;
1943 	uint32_t config_32;
1944 
1945 	/*
1946 	 * Look for the mandatory fields that are expected to be present in
1947 	 * the SP manifests.
1948 	 */
1949 	node = fdt_path_offset(sp_manifest, "/");
1950 	if (node < 0) {
1951 		ERROR("Did not find root node.\n");
1952 		return node;
1953 	}
1954 
1955 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1956 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1957 	if (ret != 0) {
1958 		ERROR("Missing Secure Partition UUID.\n");
1959 		return ret;
1960 	}
1961 
1962 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1963 	if (ret != 0) {
1964 		ERROR("Missing SP Exception Level information.\n");
1965 		return ret;
1966 	}
1967 
1968 	sp->runtime_el = config_32;
1969 
1970 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1971 	if (ret != 0) {
1972 		ERROR("Missing Secure Partition FF-A Version.\n");
1973 		return ret;
1974 	}
1975 
1976 	sp->ffa_version = config_32;
1977 
1978 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1979 	if (ret != 0) {
1980 		ERROR("Missing Secure Partition Execution State.\n");
1981 		return ret;
1982 	}
1983 
1984 	sp->execution_state = config_32;
1985 
1986 	ret = fdt_read_uint32(sp_manifest, node,
1987 			      "messaging-method", &config_32);
1988 	if (ret != 0) {
1989 		ERROR("Missing Secure Partition messaging method.\n");
1990 		return ret;
1991 	}
1992 
1993 	/* Validate this entry, we currently only support direct messaging. */
1994 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1995 			  FFA_PARTITION_DIRECT_REQ_SEND |
1996 			  FFA_PARTITION_DIRECT_REQ2_RECV |
1997 			  FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
1998 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1999 		     config_32);
2000 		return -EINVAL;
2001 	}
2002 
2003 	sp->properties = config_32;
2004 
2005 	ret = fdt_read_uint32(sp_manifest, node,
2006 			      "execution-ctx-count", &config_32);
2007 
2008 	if (ret != 0) {
2009 		ERROR("Missing SP Execution Context Count.\n");
2010 		return ret;
2011 	}
2012 
2013 	/*
2014 	 * Ensure this field is set correctly in the manifest however
2015 	 * since this is currently a hardcoded value for S-EL1 partitions
2016 	 * we don't need to save it here, just validate.
2017 	 */
2018 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
2019 		ERROR("SP Execution Context Count (%u) must be %u.\n",
2020 			config_32, PLATFORM_CORE_COUNT);
2021 		return -EINVAL;
2022 	}
2023 
2024 	/*
2025 	 * Look for the optional fields that are expected to be present in
2026 	 * an SP manifest.
2027 	 */
2028 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
2029 	if (ret != 0) {
2030 		WARN("Missing Secure Partition ID.\n");
2031 	} else {
2032 		if (!is_ffa_secure_id_valid(config_32)) {
2033 			ERROR("Invalid Secure Partition ID (0x%x).\n",
2034 			      config_32);
2035 			return -EINVAL;
2036 		}
2037 		sp->sp_id = config_32;
2038 	}
2039 
2040 	ret = fdt_read_uint32(sp_manifest, node,
2041 			      "power-management-messages", &config_32);
2042 	if (ret != 0) {
2043 		WARN("Missing Power Management Messages entry.\n");
2044 	} else {
2045 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
2046 			ERROR("Power messages not supported for S-EL0 SP\n");
2047 			return -EINVAL;
2048 		}
2049 
2050 		/*
2051 		 * Ensure only the currently supported power messages have
2052 		 * been requested.
2053 		 */
2054 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
2055 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
2056 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
2057 			ERROR("Requested unsupported PM messages (%x)\n",
2058 			      config_32);
2059 			return -EINVAL;
2060 		}
2061 		sp->pwr_mgmt_msgs = config_32;
2062 	}
2063 
2064 	ret = fdt_read_uint32(sp_manifest, node,
2065 			      "gp-register-num", &config_32);
2066 	if (ret != 0) {
2067 		WARN("Missing boot information register.\n");
2068 	} else {
2069 		/* Check if a register number between 0-3 is specified. */
2070 		if (config_32 < 4) {
2071 			*boot_info_reg = config_32;
2072 		} else {
2073 			WARN("Incorrect boot information register (%u).\n",
2074 			     config_32);
2075 		}
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 /*******************************************************************************
2082  * This function gets the Secure Partition Manifest base and maps the manifest
2083  * region.
2084  * Currently only one Secure Partition manifest is considered which is used to
2085  * prepare the context for the single Secure Partition.
2086  ******************************************************************************/
2087 static int find_and_prepare_sp_context(void)
2088 {
2089 	void *sp_manifest;
2090 	uintptr_t manifest_base;
2091 	uintptr_t manifest_base_align;
2092 	entry_point_info_t *next_image_ep_info;
2093 	int32_t ret, boot_info_reg = -1;
2094 	struct secure_partition_desc *sp;
2095 
2096 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2097 	if (next_image_ep_info == NULL) {
2098 		WARN("No Secure Partition image provided by BL2.\n");
2099 		return -ENOENT;
2100 	}
2101 
2102 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2103 	if (sp_manifest == NULL) {
2104 		WARN("Secure Partition manifest absent.\n");
2105 		return -ENOENT;
2106 	}
2107 
2108 	manifest_base = (uintptr_t)sp_manifest;
2109 	manifest_base_align = page_align(manifest_base, DOWN);
2110 
2111 	/*
2112 	 * Map the secure partition manifest region in the EL3 translation
2113 	 * regime.
2114 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2115 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2116 	 * not completely accommodate the secure partition manifest region.
2117 	 */
2118 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2119 				      manifest_base_align,
2120 				      PAGE_SIZE * 2,
2121 				      MT_RO_DATA);
2122 	if (ret != 0) {
2123 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2124 		return ret;
2125 	}
2126 
2127 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2128 					    "arm,ffa-manifest-1.0");
2129 	if (ret < 0) {
2130 		ERROR("Error happened in SP manifest reading.\n");
2131 		return -EINVAL;
2132 	}
2133 
2134 	/*
2135 	 * Store the size of the manifest so that it can be used later to pass
2136 	 * the manifest as boot information later.
2137 	 */
2138 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2139 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2140 	     next_image_ep_info->args.arg1);
2141 
2142 	/*
2143 	 * Select an SP descriptor for initialising the partition's execution
2144 	 * context on the primary CPU.
2145 	 */
2146 	sp = spmc_get_current_sp_ctx();
2147 
2148 #if SPMC_AT_EL3_SEL0_SP
2149 	/* Assign translation tables context. */
2150 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2151 
2152 #endif /* SPMC_AT_EL3_SEL0_SP */
2153 	/* Initialize entry point information for the SP */
2154 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2155 		       SECURE | EP_ST_ENABLE);
2156 
2157 	/* Parse the SP manifest. */
2158 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2159 				&boot_info_reg);
2160 	if (ret != 0) {
2161 		ERROR("Error in Secure Partition manifest parsing.\n");
2162 		return ret;
2163 	}
2164 
2165 	/* Perform any common initialisation. */
2166 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2167 
2168 	/* Perform any initialisation specific to S-EL1 SPs. */
2169 	if (sp->runtime_el == S_EL1) {
2170 		spmc_el1_sp_setup(sp, next_image_ep_info);
2171 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2172 	}
2173 #if SPMC_AT_EL3_SEL0_SP
2174 	/* Perform any initialisation specific to S-EL0 SPs. */
2175 	else if (sp->runtime_el == S_EL0) {
2176 		/* Setup spsr in endpoint info for common context management routine. */
2177 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2178 
2179 		spmc_sp_common_ep_commit(sp, next_image_ep_info);
2180 
2181 		/*
2182 		 * Perform any initialisation specific to S-EL0 not set by common
2183 		 * context management routine.
2184 		 */
2185 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2186 	}
2187 #endif /* SPMC_AT_EL3_SEL0_SP */
2188 	else {
2189 		ERROR("Unexpected runtime EL: %u\n", sp->runtime_el);
2190 		return -EINVAL;
2191 	}
2192 
2193 	return 0;
2194 }
2195 
2196 /*******************************************************************************
2197  * This function takes an SP context pointer and performs a synchronous entry
2198  * into it.
2199  ******************************************************************************/
2200 static int32_t logical_sp_init(void)
2201 {
2202 	int32_t rc = 0;
2203 	struct el3_lp_desc *el3_lp_descs;
2204 
2205 	/* Perform initial validation of the Logical Partitions. */
2206 	rc = el3_sp_desc_validate();
2207 	if (rc != 0) {
2208 		ERROR("Logical Partition validation failed!\n");
2209 		return rc;
2210 	}
2211 
2212 	el3_lp_descs = get_el3_lp_array();
2213 
2214 	INFO("Logical Secure Partition init start.\n");
2215 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2216 		rc = el3_lp_descs[i].init();
2217 		if (rc != 0) {
2218 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2219 			      el3_lp_descs[i].sp_id);
2220 			return rc;
2221 		}
2222 		VERBOSE("Logical SP (0x%x) Initialized\n",
2223 			      el3_lp_descs[i].sp_id);
2224 	}
2225 
2226 	INFO("Logical Secure Partition init completed.\n");
2227 
2228 	return rc;
2229 }
2230 
2231 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2232 {
2233 	uint64_t rc;
2234 
2235 	assert(ec != NULL);
2236 
2237 	/* Assign the context of the SP to this CPU */
2238 	cm_set_context(&(ec->cpu_ctx), SECURE);
2239 
2240 	/* Restore the context assigned above */
2241 	cm_el1_sysregs_context_restore(SECURE);
2242 	cm_set_next_eret_context(SECURE);
2243 
2244 	/* Invalidate TLBs at EL1. */
2245 	tlbivmalle1();
2246 	dsbish();
2247 
2248 	/* Enter Secure Partition */
2249 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2250 
2251 	/* Save secure state */
2252 	cm_el1_sysregs_context_save(SECURE);
2253 
2254 	return rc;
2255 }
2256 
2257 /*******************************************************************************
2258  * SPMC Helper Functions.
2259  ******************************************************************************/
2260 static int32_t sp_init(void)
2261 {
2262 	uint64_t rc;
2263 	struct secure_partition_desc *sp;
2264 	struct sp_exec_ctx *ec;
2265 
2266 	sp = spmc_get_current_sp_ctx();
2267 	ec = spmc_get_sp_ec(sp);
2268 	ec->rt_model = RT_MODEL_INIT;
2269 	ec->rt_state = RT_STATE_RUNNING;
2270 
2271 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2272 
2273 	rc = spmc_sp_synchronous_entry(ec);
2274 	if (rc != 0) {
2275 		/* Indicate SP init was not successful. */
2276 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2277 		      sp->sp_id, rc);
2278 		return 0;
2279 	}
2280 
2281 	ec->rt_state = RT_STATE_WAITING;
2282 	INFO("Secure Partition initialized.\n");
2283 
2284 	return 1;
2285 }
2286 
2287 static void initalize_sp_descs(void)
2288 {
2289 	struct secure_partition_desc *sp;
2290 
2291 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2292 		sp = &sp_desc[i];
2293 		sp->sp_id = INV_SP_ID;
2294 		sp->mailbox.rx_buffer = NULL;
2295 		sp->mailbox.tx_buffer = NULL;
2296 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2297 		sp->secondary_ep = 0;
2298 	}
2299 }
2300 
2301 static void initalize_ns_ep_descs(void)
2302 {
2303 	struct ns_endpoint_desc *ns_ep;
2304 
2305 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2306 		ns_ep = &ns_ep_desc[i];
2307 		/*
2308 		 * Clashes with the Hypervisor ID but will not be a
2309 		 * problem in practice.
2310 		 */
2311 		ns_ep->ns_ep_id = 0;
2312 		ns_ep->ffa_version = 0;
2313 		ns_ep->mailbox.rx_buffer = NULL;
2314 		ns_ep->mailbox.tx_buffer = NULL;
2315 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2316 	}
2317 }
2318 
2319 /*******************************************************************************
2320  * Initialize SPMC attributes for the SPMD.
2321  ******************************************************************************/
2322 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2323 {
2324 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2325 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2326 	spmc_attrs->exec_state = MODE_RW_64;
2327 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2328 }
2329 
2330 /*******************************************************************************
2331  * Initialize contexts of all Secure Partitions.
2332  ******************************************************************************/
2333 int32_t spmc_setup(void)
2334 {
2335 	int32_t ret;
2336 	uint32_t flags;
2337 
2338 	/* Initialize endpoint descriptors */
2339 	initalize_sp_descs();
2340 	initalize_ns_ep_descs();
2341 
2342 	/*
2343 	 * Retrieve the information of the datastore for tracking shared memory
2344 	 * requests allocated by platform code and zero the region if available.
2345 	 */
2346 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2347 					    &spmc_shmem_obj_state.data_size);
2348 	if (ret != 0) {
2349 		ERROR("Failed to obtain memory descriptor backing store!\n");
2350 		return ret;
2351 	}
2352 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2353 
2354 	/* Setup logical SPs. */
2355 	ret = logical_sp_init();
2356 	if (ret != 0) {
2357 		ERROR("Failed to initialize Logical Partitions.\n");
2358 		return ret;
2359 	}
2360 
2361 	/* Perform physical SP setup. */
2362 
2363 	/* Disable MMU at EL1 (initialized by BL2) */
2364 	disable_mmu_icache_el1();
2365 
2366 	/* Initialize context of the SP */
2367 	INFO("Secure Partition context setup start.\n");
2368 
2369 	ret = find_and_prepare_sp_context();
2370 	if (ret != 0) {
2371 		ERROR("Error in SP finding and context preparation.\n");
2372 		return ret;
2373 	}
2374 
2375 	/* Register power management hooks with PSCI */
2376 	psci_register_spd_pm_hook(&spmc_pm);
2377 
2378 	/*
2379 	 * Register an interrupt handler for S-EL1 interrupts
2380 	 * when generated during code executing in the
2381 	 * non-secure state.
2382 	 */
2383 	flags = 0;
2384 	set_interrupt_rm_flag(flags, NON_SECURE);
2385 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2386 					      spmc_sp_interrupt_handler,
2387 					      flags);
2388 	if (ret != 0) {
2389 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2390 		panic();
2391 	}
2392 
2393 	/* Register init function for deferred init.  */
2394 	bl31_register_bl32_init(&sp_init);
2395 
2396 	INFO("Secure Partition setup done.\n");
2397 
2398 	return 0;
2399 }
2400 
2401 /*******************************************************************************
2402  * Secure Partition Manager SMC handler.
2403  ******************************************************************************/
2404 uint64_t spmc_smc_handler(uint32_t smc_fid,
2405 			  bool secure_origin,
2406 			  uint64_t x1,
2407 			  uint64_t x2,
2408 			  uint64_t x3,
2409 			  uint64_t x4,
2410 			  void *cookie,
2411 			  void *handle,
2412 			  uint64_t flags)
2413 {
2414 	switch (smc_fid) {
2415 
2416 	case FFA_VERSION:
2417 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2418 					   x4, cookie, handle, flags);
2419 
2420 	case FFA_SPM_ID_GET:
2421 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2422 					     x3, x4, cookie, handle, flags);
2423 
2424 	case FFA_ID_GET:
2425 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2426 					  x4, cookie, handle, flags);
2427 
2428 	case FFA_FEATURES:
2429 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2430 					    x4, cookie, handle, flags);
2431 
2432 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2433 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2434 						   x2, x3, x4, cookie, handle,
2435 						   flags);
2436 
2437 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2438 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2439 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
2440 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2441 					      x3, x4, cookie, handle, flags);
2442 
2443 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2444 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2445 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
2446 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2447 					       x3, x4, cookie, handle, flags);
2448 
2449 	case FFA_RXTX_MAP_SMC32:
2450 	case FFA_RXTX_MAP_SMC64:
2451 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2452 					cookie, handle, flags);
2453 
2454 	case FFA_RXTX_UNMAP:
2455 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2456 					  x4, cookie, handle, flags);
2457 
2458 	case FFA_PARTITION_INFO_GET:
2459 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2460 						  x2, x3, x4, cookie, handle,
2461 						  flags);
2462 
2463 	case FFA_RX_RELEASE:
2464 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2465 					  x4, cookie, handle, flags);
2466 
2467 	case FFA_MSG_WAIT:
2468 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2469 					cookie, handle, flags);
2470 
2471 	case FFA_ERROR:
2472 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2473 					cookie, handle, flags);
2474 
2475 	case FFA_MSG_RUN:
2476 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2477 				       cookie, handle, flags);
2478 
2479 	case FFA_MEM_SHARE_SMC32:
2480 	case FFA_MEM_SHARE_SMC64:
2481 	case FFA_MEM_LEND_SMC32:
2482 	case FFA_MEM_LEND_SMC64:
2483 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2484 					 cookie, handle, flags);
2485 
2486 	case FFA_MEM_FRAG_TX:
2487 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2488 					    x4, cookie, handle, flags);
2489 
2490 	case FFA_MEM_FRAG_RX:
2491 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2492 					    x4, cookie, handle, flags);
2493 
2494 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2495 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2496 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2497 						 x3, x4, cookie, handle, flags);
2498 
2499 	case FFA_MEM_RELINQUISH:
2500 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2501 					       x3, x4, cookie, handle, flags);
2502 
2503 	case FFA_MEM_RECLAIM:
2504 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2505 						x4, cookie, handle, flags);
2506 	case FFA_CONSOLE_LOG_SMC32:
2507 	case FFA_CONSOLE_LOG_SMC64:
2508 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2509 						x4, cookie, handle, flags);
2510 
2511 	case FFA_MEM_PERM_GET_SMC32:
2512 	case FFA_MEM_PERM_GET_SMC64:
2513 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2514 						x3, x4, cookie, handle, flags);
2515 
2516 	case FFA_MEM_PERM_SET_SMC32:
2517 	case FFA_MEM_PERM_SET_SMC64:
2518 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2519 						x3, x4, cookie, handle, flags);
2520 
2521 	default:
2522 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2523 		break;
2524 	}
2525 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2526 }
2527 
2528 /*******************************************************************************
2529  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2530  * validates the interrupt and upon success arranges entry into the SP for
2531  * handling the interrupt.
2532  ******************************************************************************/
2533 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2534 					  uint32_t flags,
2535 					  void *handle,
2536 					  void *cookie)
2537 {
2538 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2539 	struct sp_exec_ctx *ec;
2540 	uint32_t linear_id = plat_my_core_pos();
2541 
2542 	/* Sanity check for a NULL pointer dereference. */
2543 	assert(sp != NULL);
2544 
2545 	/* Check the security state when the exception was generated. */
2546 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2547 
2548 	/* Panic if not an S-EL1 Partition. */
2549 	if (sp->runtime_el != S_EL1) {
2550 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2551 		      linear_id);
2552 		panic();
2553 	}
2554 
2555 	/* Obtain a reference to the SP execution context. */
2556 	ec = spmc_get_sp_ec(sp);
2557 
2558 	/* Ensure that the execution context is in waiting state else panic. */
2559 	if (ec->rt_state != RT_STATE_WAITING) {
2560 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2561 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2562 		panic();
2563 	}
2564 
2565 	/* Update the runtime model and state of the partition. */
2566 	ec->rt_model = RT_MODEL_INTR;
2567 	ec->rt_state = RT_STATE_RUNNING;
2568 
2569 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2570 
2571 	/*
2572 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2573 	 * populated as the SP can determine this by itself.
2574 	 * The flags field is forced to 0 mainly to pass the SVE hint bit
2575 	 * cleared for consumption by the lower EL.
2576 	 */
2577 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2578 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2579 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2580 				     handle, 0ULL);
2581 }
2582