xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision ffb7742125def3e0acca4c7e4d3215af5ce25a31)
1 /*
2  * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31 
32 #include <platform_def.h>
33 
34 /* FFA_MEM_PERM_* helpers */
35 #define FFA_MEM_PERM_MASK		U(7)
36 #define FFA_MEM_PERM_DATA_MASK		U(3)
37 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
38 #define FFA_MEM_PERM_DATA_NA		U(0)
39 #define FFA_MEM_PERM_DATA_RW		U(1)
40 #define FFA_MEM_PERM_DATA_RES		U(2)
41 #define FFA_MEM_PERM_DATA_RO		U(3)
42 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
43 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
44 
45 /* Declare the maximum number of SPs and El3 LPs. */
46 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
47 
48 /*
49  * Allocate a secure partition descriptor to describe each SP in the system that
50  * does not reside at EL3.
51  */
52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
53 
54 /*
55  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
56  * the system that interacts with a SP. It is used to track the Hypervisor
57  * buffer pair, version and ID for now. It could be extended to track VM
58  * properties when the SPMC supports indirect messaging.
59  */
60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
61 
62 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
63 					  uint32_t flags,
64 					  void *handle,
65 					  void *cookie);
66 
67 /*
68  * Helper function to obtain the array storing the EL3
69  * Logical Partition descriptors.
70  */
71 struct el3_lp_desc *get_el3_lp_array(void)
72 {
73 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
74 }
75 
76 /*
77  * Helper function to obtain the descriptor of the last SP to whom control was
78  * handed to on this physical cpu. Currently, we assume there is only one SP.
79  * TODO: Expand to track multiple partitions when required.
80  */
81 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
82 {
83 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
84 }
85 
86 /*
87  * Helper function to obtain the execution context of an SP on the
88  * current physical cpu.
89  */
90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
91 {
92 	return &(sp->ec[get_ec_index(sp)]);
93 }
94 
95 /* Helper function to get pointer to SP context from its ID. */
96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
97 {
98 	/* Check for Secure World Partitions. */
99 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
100 		if (sp_desc[i].sp_id == id) {
101 			return &(sp_desc[i]);
102 		}
103 	}
104 	return NULL;
105 }
106 
107 /*
108  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
109  * We assume that the first descriptor is reserved for this entity.
110  */
111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
112 {
113 	return &(ns_ep_desc[0]);
114 }
115 
116 /*
117  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
118  * or OS kernel in the normal world or the last SP that was run.
119  */
120 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
121 {
122 	/* Obtain the RX/TX buffer pair descriptor. */
123 	if (secure_origin) {
124 		return &(spmc_get_current_sp_ctx()->mailbox);
125 	} else {
126 		return &(spmc_get_hyp_ctx()->mailbox);
127 	}
128 }
129 
130 /******************************************************************************
131  * This function returns to the place where spmc_sp_synchronous_entry() was
132  * called originally.
133  ******************************************************************************/
134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
135 {
136 	/*
137 	 * The SPM must have initiated the original request through a
138 	 * synchronous entry into the secure partition. Jump back to the
139 	 * original C runtime context with the value of rc in x0;
140 	 */
141 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
142 
143 	panic();
144 }
145 
146 /*******************************************************************************
147  * Return FFA_ERROR with specified error code.
148  ******************************************************************************/
149 uint64_t spmc_ffa_error_return(void *handle, int error_code)
150 {
151 	SMC_RET8(handle, FFA_ERROR,
152 		 FFA_TARGET_INFO_MBZ, error_code,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
154 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
155 }
156 
157 /******************************************************************************
158  * Helper function to validate a secure partition ID to ensure it does not
159  * conflict with any other FF-A component and follows the convention to
160  * indicate it resides within the secure world.
161  ******************************************************************************/
162 bool is_ffa_secure_id_valid(uint16_t partition_id)
163 {
164 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
165 
166 	/* Ensure the ID is not the invalid partition ID. */
167 	if (partition_id == INV_SP_ID) {
168 		return false;
169 	}
170 
171 	/* Ensure the ID is not the SPMD ID. */
172 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
173 		return false;
174 	}
175 
176 	/*
177 	 * Ensure the ID follows the convention to indicate it resides
178 	 * in the secure world.
179 	 */
180 	if (!ffa_is_secure_world_id(partition_id)) {
181 		return false;
182 	}
183 
184 	/* Ensure we don't conflict with the SPMC partition ID. */
185 	if (partition_id == FFA_SPMC_ID) {
186 		return false;
187 	}
188 
189 	/* Ensure we do not already have an SP context with this ID. */
190 	if (spmc_get_sp_ctx(partition_id)) {
191 		return false;
192 	}
193 
194 	/* Ensure we don't clash with any Logical SP's. */
195 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
196 		if (el3_lp_descs[i].sp_id == partition_id) {
197 			return false;
198 		}
199 	}
200 
201 	return true;
202 }
203 
204 /*******************************************************************************
205  * This function either forwards the request to the other world or returns
206  * with an ERET depending on the source of the call.
207  * We can assume that the destination is for an entity at a lower exception
208  * level as any messages destined for a logical SP resident in EL3 will have
209  * already been taken care of by the SPMC before entering this function.
210  ******************************************************************************/
211 static uint64_t spmc_smc_return(uint32_t smc_fid,
212 				bool secure_origin,
213 				uint64_t x1,
214 				uint64_t x2,
215 				uint64_t x3,
216 				uint64_t x4,
217 				void *handle,
218 				void *cookie,
219 				uint64_t flags,
220 				uint16_t dst_id)
221 {
222 	/* If the destination is in the normal world always go via the SPMD. */
223 	if (ffa_is_normal_world_id(dst_id)) {
224 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
225 					cookie, handle, flags);
226 	}
227 	/*
228 	 * If the caller is secure and we want to return to the secure world,
229 	 * ERET directly.
230 	 */
231 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
232 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
233 	}
234 	/* If we originated in the normal world then switch contexts. */
235 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
236 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
237 					     x3, x4, handle);
238 	} else {
239 		/* Unknown State. */
240 		panic();
241 	}
242 
243 	/* Shouldn't be Reached. */
244 	return 0;
245 }
246 
247 /*******************************************************************************
248  * FF-A ABI Handlers.
249  ******************************************************************************/
250 
251 /*******************************************************************************
252  * Helper function to validate arg2 as part of a direct message.
253  ******************************************************************************/
254 static inline bool direct_msg_validate_arg2(uint64_t x2)
255 {
256 	/* Check message type. */
257 	if (x2 & FFA_FWK_MSG_BIT) {
258 		/* We have a framework message, ensure it is a known message. */
259 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
260 			VERBOSE("Invalid message format 0x%lx.\n", x2);
261 			return false;
262 		}
263 	} else {
264 		/* We have a partition messages, ensure x2 is not set. */
265 		if (x2 != (uint64_t) 0) {
266 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
267 				x2);
268 			return false;
269 		}
270 	}
271 	return true;
272 }
273 
274 /*******************************************************************************
275  * Helper function to validate the destination ID of a direct response.
276  ******************************************************************************/
277 static bool direct_msg_validate_dst_id(uint16_t dst_id)
278 {
279 	struct secure_partition_desc *sp;
280 
281 	/* Check if we're targeting a normal world partition. */
282 	if (ffa_is_normal_world_id(dst_id)) {
283 		return true;
284 	}
285 
286 	/* Or directed to the SPMC itself.*/
287 	if (dst_id == FFA_SPMC_ID) {
288 		return true;
289 	}
290 
291 	/* Otherwise ensure the SP exists. */
292 	sp = spmc_get_sp_ctx(dst_id);
293 	if (sp != NULL) {
294 		return true;
295 	}
296 
297 	return false;
298 }
299 
300 /*******************************************************************************
301  * Helper function to validate the response from a Logical Partition.
302  ******************************************************************************/
303 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
304 					void *handle)
305 {
306 	/* Retrieve populated Direct Response Arguments. */
307 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
308 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
309 	uint16_t src_id = ffa_endpoint_source(x1);
310 	uint16_t dst_id = ffa_endpoint_destination(x1);
311 
312 	if (src_id != lp_id) {
313 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
314 		return false;
315 	}
316 
317 	/*
318 	 * Check the destination ID is valid and ensure the LP is responding to
319 	 * the original request.
320 	 */
321 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
322 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
323 		return false;
324 	}
325 
326 	if (!direct_msg_validate_arg2(x2)) {
327 		ERROR("Invalid EL3 LP message encoding.\n");
328 		return false;
329 	}
330 	return true;
331 }
332 
333 /*******************************************************************************
334  * Handle direct request messages and route to the appropriate destination.
335  ******************************************************************************/
336 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
337 				       bool secure_origin,
338 				       uint64_t x1,
339 				       uint64_t x2,
340 				       uint64_t x3,
341 				       uint64_t x4,
342 				       void *cookie,
343 				       void *handle,
344 				       uint64_t flags)
345 {
346 	uint16_t src_id = ffa_endpoint_source(x1);
347 	uint16_t dst_id = ffa_endpoint_destination(x1);
348 	struct el3_lp_desc *el3_lp_descs;
349 	struct secure_partition_desc *sp;
350 	unsigned int idx;
351 
352 	/* Check if arg2 has been populated correctly based on message type. */
353 	if (!direct_msg_validate_arg2(x2)) {
354 		return spmc_ffa_error_return(handle,
355 					     FFA_ERROR_INVALID_PARAMETER);
356 	}
357 
358 	/* Validate Sender is either the current SP or from the normal world. */
359 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
360 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
361 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
362 		return spmc_ffa_error_return(handle,
363 					FFA_ERROR_INVALID_PARAMETER);
364 	}
365 
366 	el3_lp_descs = get_el3_lp_array();
367 
368 	/* Check if the request is destined for a Logical Partition. */
369 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
370 		if (el3_lp_descs[i].sp_id == dst_id) {
371 			uint64_t ret = el3_lp_descs[i].direct_req(
372 						smc_fid, secure_origin, x1, x2,
373 						x3, x4, cookie, handle, flags);
374 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
375 							 handle)) {
376 				panic();
377 			}
378 
379 			/* Message checks out. */
380 			return ret;
381 		}
382 	}
383 
384 	/*
385 	 * If the request was not targeted to a LSP and from the secure world
386 	 * then it is invalid since a SP cannot call into the Normal world and
387 	 * there is no other SP to call into. If there are other SPs in future
388 	 * then the partition runtime model would need to be validated as well.
389 	 */
390 	if (secure_origin) {
391 		VERBOSE("Direct request not supported to the Normal World.\n");
392 		return spmc_ffa_error_return(handle,
393 					     FFA_ERROR_INVALID_PARAMETER);
394 	}
395 
396 	/* Check if the SP ID is valid. */
397 	sp = spmc_get_sp_ctx(dst_id);
398 	if (sp == NULL) {
399 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
400 			dst_id);
401 		return spmc_ffa_error_return(handle,
402 					     FFA_ERROR_INVALID_PARAMETER);
403 	}
404 
405 	/* Protect the runtime state of a UP S-EL0 SP with a lock. */
406 	if (sp->runtime_el == S_EL0) {
407 		spin_lock(&sp->rt_state_lock);
408 	}
409 
410 	/*
411 	 * Check that the target execution context is in a waiting state before
412 	 * forwarding the direct request to it.
413 	 */
414 	idx = get_ec_index(sp);
415 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
416 		VERBOSE("SP context on core%u is not waiting (%u).\n",
417 			idx, sp->ec[idx].rt_model);
418 
419 		if (sp->runtime_el == S_EL0) {
420 			spin_unlock(&sp->rt_state_lock);
421 		}
422 
423 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
424 	}
425 
426 	/*
427 	 * Everything checks out so forward the request to the SP after updating
428 	 * its state and runtime model.
429 	 */
430 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
431 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
432 	sp->ec[idx].dir_req_origin_id = src_id;
433 
434 	if (sp->runtime_el == S_EL0) {
435 		spin_unlock(&sp->rt_state_lock);
436 	}
437 
438 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
439 			       handle, cookie, flags, dst_id);
440 }
441 
442 /*******************************************************************************
443  * Handle direct response messages and route to the appropriate destination.
444  ******************************************************************************/
445 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
446 					bool secure_origin,
447 					uint64_t x1,
448 					uint64_t x2,
449 					uint64_t x3,
450 					uint64_t x4,
451 					void *cookie,
452 					void *handle,
453 					uint64_t flags)
454 {
455 	uint16_t dst_id = ffa_endpoint_destination(x1);
456 	struct secure_partition_desc *sp;
457 	unsigned int idx;
458 
459 	/* Check if arg2 has been populated correctly based on message type. */
460 	if (!direct_msg_validate_arg2(x2)) {
461 		return spmc_ffa_error_return(handle,
462 					     FFA_ERROR_INVALID_PARAMETER);
463 	}
464 
465 	/* Check that the response did not originate from the Normal world. */
466 	if (!secure_origin) {
467 		VERBOSE("Direct Response not supported from Normal World.\n");
468 		return spmc_ffa_error_return(handle,
469 					     FFA_ERROR_INVALID_PARAMETER);
470 	}
471 
472 	/*
473 	 * Check that the response is either targeted to the Normal world or the
474 	 * SPMC e.g. a PM response.
475 	 */
476 	if (!direct_msg_validate_dst_id(dst_id)) {
477 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
478 			dst_id);
479 		return spmc_ffa_error_return(handle,
480 					     FFA_ERROR_INVALID_PARAMETER);
481 	}
482 
483 	/* Obtain the SP descriptor and update its runtime state. */
484 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
485 	if (sp == NULL) {
486 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
487 			dst_id);
488 		return spmc_ffa_error_return(handle,
489 					     FFA_ERROR_INVALID_PARAMETER);
490 	}
491 
492 	if (sp->runtime_el == S_EL0) {
493 		spin_lock(&sp->rt_state_lock);
494 	}
495 
496 	/* Sanity check state is being tracked correctly in the SPMC. */
497 	idx = get_ec_index(sp);
498 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
499 
500 	/* Ensure SP execution context was in the right runtime model. */
501 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
502 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
503 			idx, sp->ec[idx].rt_model);
504 		if (sp->runtime_el == S_EL0) {
505 			spin_unlock(&sp->rt_state_lock);
506 		}
507 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
508 	}
509 
510 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
511 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
512 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
513 		if (sp->runtime_el == S_EL0) {
514 			spin_unlock(&sp->rt_state_lock);
515 		}
516 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
517 	}
518 
519 	/* Update the state of the SP execution context. */
520 	sp->ec[idx].rt_state = RT_STATE_WAITING;
521 
522 	/* Clear the ongoing direct request ID. */
523 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
524 
525 	if (sp->runtime_el == S_EL0) {
526 		spin_unlock(&sp->rt_state_lock);
527 	}
528 
529 	/*
530 	 * If the receiver is not the SPMC then forward the response to the
531 	 * Normal world.
532 	 */
533 	if (dst_id == FFA_SPMC_ID) {
534 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
535 		/* Should not get here. */
536 		panic();
537 	}
538 
539 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
540 			       handle, cookie, flags, dst_id);
541 }
542 
543 /*******************************************************************************
544  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
545  * cycles.
546  ******************************************************************************/
547 static uint64_t msg_wait_handler(uint32_t smc_fid,
548 				 bool secure_origin,
549 				 uint64_t x1,
550 				 uint64_t x2,
551 				 uint64_t x3,
552 				 uint64_t x4,
553 				 void *cookie,
554 				 void *handle,
555 				 uint64_t flags)
556 {
557 	struct secure_partition_desc *sp;
558 	unsigned int idx;
559 
560 	/*
561 	 * Check that the response did not originate from the Normal world as
562 	 * only the secure world can call this ABI.
563 	 */
564 	if (!secure_origin) {
565 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
566 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
567 	}
568 
569 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
570 	sp = spmc_get_current_sp_ctx();
571 	if (sp == NULL) {
572 		return spmc_ffa_error_return(handle,
573 					     FFA_ERROR_INVALID_PARAMETER);
574 	}
575 
576 	/*
577 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
578 	 */
579 	idx = get_ec_index(sp);
580 	if (sp->runtime_el == S_EL0) {
581 		spin_lock(&sp->rt_state_lock);
582 	}
583 
584 	/* Ensure SP execution context was in the right runtime model. */
585 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
586 		if (sp->runtime_el == S_EL0) {
587 			spin_unlock(&sp->rt_state_lock);
588 		}
589 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
590 	}
591 
592 	/* Sanity check the state is being tracked correctly in the SPMC. */
593 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
594 
595 	/*
596 	 * Perform a synchronous exit if the partition was initialising. The
597 	 * state is updated after the exit.
598 	 */
599 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
600 		if (sp->runtime_el == S_EL0) {
601 			spin_unlock(&sp->rt_state_lock);
602 		}
603 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
604 		/* Should not get here */
605 		panic();
606 	}
607 
608 	/* Update the state of the SP execution context. */
609 	sp->ec[idx].rt_state = RT_STATE_WAITING;
610 
611 	/* Resume normal world if a secure interrupt was handled. */
612 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
613 		/* FFA_MSG_WAIT can only be called from the secure world. */
614 		unsigned int secure_state_in = SECURE;
615 		unsigned int secure_state_out = NON_SECURE;
616 
617 		cm_el1_sysregs_context_save(secure_state_in);
618 		cm_el1_sysregs_context_restore(secure_state_out);
619 		cm_set_next_eret_context(secure_state_out);
620 
621 		if (sp->runtime_el == S_EL0) {
622 			spin_unlock(&sp->rt_state_lock);
623 		}
624 
625 		SMC_RET0(cm_get_context(secure_state_out));
626 	}
627 
628 	/* Protect the runtime state of a S-EL0 SP with a lock. */
629 	if (sp->runtime_el == S_EL0) {
630 		spin_unlock(&sp->rt_state_lock);
631 	}
632 
633 	/* Forward the response to the Normal world. */
634 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
635 			       handle, cookie, flags, FFA_NWD_ID);
636 }
637 
638 static uint64_t ffa_error_handler(uint32_t smc_fid,
639 				 bool secure_origin,
640 				 uint64_t x1,
641 				 uint64_t x2,
642 				 uint64_t x3,
643 				 uint64_t x4,
644 				 void *cookie,
645 				 void *handle,
646 				 uint64_t flags)
647 {
648 	struct secure_partition_desc *sp;
649 	unsigned int idx;
650 
651 	/* Check that the response did not originate from the Normal world. */
652 	if (!secure_origin) {
653 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
654 	}
655 
656 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
657 	sp = spmc_get_current_sp_ctx();
658 	if (sp == NULL) {
659 		return spmc_ffa_error_return(handle,
660 					     FFA_ERROR_INVALID_PARAMETER);
661 	}
662 
663 	/* Get the execution context of the SP that invoked FFA_ERROR. */
664 	idx = get_ec_index(sp);
665 
666 	/*
667 	 * We only expect FFA_ERROR to be received during SP initialisation
668 	 * otherwise this is an invalid call.
669 	 */
670 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
671 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
672 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
673 		/* Should not get here. */
674 		panic();
675 	}
676 
677 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
678 }
679 
680 static uint64_t ffa_version_handler(uint32_t smc_fid,
681 				    bool secure_origin,
682 				    uint64_t x1,
683 				    uint64_t x2,
684 				    uint64_t x3,
685 				    uint64_t x4,
686 				    void *cookie,
687 				    void *handle,
688 				    uint64_t flags)
689 {
690 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
691 
692 	if (requested_version & FFA_VERSION_BIT31_MASK) {
693 		/* Invalid encoding, return an error. */
694 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
695 		/* Execution stops here. */
696 	}
697 
698 	/* Determine the caller to store the requested version. */
699 	if (secure_origin) {
700 		/*
701 		 * Ensure that the SP is reporting the same version as
702 		 * specified in its manifest. If these do not match there is
703 		 * something wrong with the SP.
704 		 * TODO: Should we abort the SP? For now assert this is not
705 		 *       case.
706 		 */
707 		assert(requested_version ==
708 		       spmc_get_current_sp_ctx()->ffa_version);
709 	} else {
710 		/*
711 		 * If this is called by the normal world, record this
712 		 * information in its descriptor.
713 		 */
714 		spmc_get_hyp_ctx()->ffa_version = requested_version;
715 	}
716 
717 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
718 					  FFA_VERSION_MINOR));
719 }
720 
721 /*******************************************************************************
722  * Helper function to obtain the FF-A version of the calling partition.
723  ******************************************************************************/
724 uint32_t get_partition_ffa_version(bool secure_origin)
725 {
726 	if (secure_origin) {
727 		return spmc_get_current_sp_ctx()->ffa_version;
728 	} else {
729 		return spmc_get_hyp_ctx()->ffa_version;
730 	}
731 }
732 
733 static uint64_t rxtx_map_handler(uint32_t smc_fid,
734 				 bool secure_origin,
735 				 uint64_t x1,
736 				 uint64_t x2,
737 				 uint64_t x3,
738 				 uint64_t x4,
739 				 void *cookie,
740 				 void *handle,
741 				 uint64_t flags)
742 {
743 	int ret;
744 	uint32_t error_code;
745 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
746 	struct mailbox *mbox;
747 	uintptr_t tx_address = x1;
748 	uintptr_t rx_address = x2;
749 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
750 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
751 
752 	/*
753 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
754 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
755 	 * ABI on behalf of a VM and reject it if this is the case.
756 	 */
757 	if (tx_address == 0 || rx_address == 0) {
758 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
759 		return spmc_ffa_error_return(handle,
760 					     FFA_ERROR_INVALID_PARAMETER);
761 	}
762 
763 	/* Ensure the specified buffers are not the same. */
764 	if (tx_address == rx_address) {
765 		WARN("TX Buffer must not be the same as RX Buffer.\n");
766 		return spmc_ffa_error_return(handle,
767 					     FFA_ERROR_INVALID_PARAMETER);
768 	}
769 
770 	/* Ensure the buffer size is not 0. */
771 	if (buf_size == 0U) {
772 		WARN("Buffer size must not be 0\n");
773 		return spmc_ffa_error_return(handle,
774 					     FFA_ERROR_INVALID_PARAMETER);
775 	}
776 
777 	/*
778 	 * Ensure the buffer size is a multiple of the translation granule size
779 	 * in TF-A.
780 	 */
781 	if (buf_size % PAGE_SIZE != 0U) {
782 		WARN("Buffer size must be aligned to translation granule.\n");
783 		return spmc_ffa_error_return(handle,
784 					     FFA_ERROR_INVALID_PARAMETER);
785 	}
786 
787 	/* Obtain the RX/TX buffer pair descriptor. */
788 	mbox = spmc_get_mbox_desc(secure_origin);
789 
790 	spin_lock(&mbox->lock);
791 
792 	/* Check if buffers have already been mapped. */
793 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
794 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
795 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
796 		error_code = FFA_ERROR_DENIED;
797 		goto err;
798 	}
799 
800 	/* memmap the TX buffer as read only. */
801 	ret = mmap_add_dynamic_region(tx_address, /* PA */
802 			tx_address, /* VA */
803 			buf_size, /* size */
804 			mem_atts | MT_RO_DATA); /* attrs */
805 	if (ret != 0) {
806 		/* Return the correct error code. */
807 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
808 						FFA_ERROR_INVALID_PARAMETER;
809 		WARN("Unable to map TX buffer: %d\n", error_code);
810 		goto err;
811 	}
812 
813 	/* memmap the RX buffer as read write. */
814 	ret = mmap_add_dynamic_region(rx_address, /* PA */
815 			rx_address, /* VA */
816 			buf_size, /* size */
817 			mem_atts | MT_RW_DATA); /* attrs */
818 
819 	if (ret != 0) {
820 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
821 						FFA_ERROR_INVALID_PARAMETER;
822 		WARN("Unable to map RX buffer: %d\n", error_code);
823 		/* Unmap the TX buffer again. */
824 		mmap_remove_dynamic_region(tx_address, buf_size);
825 		goto err;
826 	}
827 
828 	mbox->tx_buffer = (void *) tx_address;
829 	mbox->rx_buffer = (void *) rx_address;
830 	mbox->rxtx_page_count = page_count;
831 	spin_unlock(&mbox->lock);
832 
833 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
834 	/* Execution stops here. */
835 err:
836 	spin_unlock(&mbox->lock);
837 	return spmc_ffa_error_return(handle, error_code);
838 }
839 
840 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
841 				   bool secure_origin,
842 				   uint64_t x1,
843 				   uint64_t x2,
844 				   uint64_t x3,
845 				   uint64_t x4,
846 				   void *cookie,
847 				   void *handle,
848 				   uint64_t flags)
849 {
850 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
851 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
852 
853 	/*
854 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
855 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
856 	 * ABI on behalf of a VM and reject it if this is the case.
857 	 */
858 	if (x1 != 0UL) {
859 		return spmc_ffa_error_return(handle,
860 					     FFA_ERROR_INVALID_PARAMETER);
861 	}
862 
863 	spin_lock(&mbox->lock);
864 
865 	/* Check if buffers are currently mapped. */
866 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
867 		spin_unlock(&mbox->lock);
868 		return spmc_ffa_error_return(handle,
869 					     FFA_ERROR_INVALID_PARAMETER);
870 	}
871 
872 	/* Unmap RX Buffer */
873 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
874 				       buf_size) != 0) {
875 		WARN("Unable to unmap RX buffer!\n");
876 	}
877 
878 	mbox->rx_buffer = 0;
879 
880 	/* Unmap TX Buffer */
881 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
882 				       buf_size) != 0) {
883 		WARN("Unable to unmap TX buffer!\n");
884 	}
885 
886 	mbox->tx_buffer = 0;
887 	mbox->rxtx_page_count = 0;
888 
889 	spin_unlock(&mbox->lock);
890 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
891 }
892 
893 /*
894  * Helper function to populate the properties field of a Partition Info Get
895  * descriptor.
896  */
897 static uint32_t
898 partition_info_get_populate_properties(uint32_t sp_properties,
899 				       enum sp_execution_state sp_ec_state)
900 {
901 	uint32_t properties = sp_properties;
902 	uint32_t ec_state;
903 
904 	/* Determine the execution state of the SP. */
905 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
906 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
907 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
908 
909 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
910 
911 	return properties;
912 }
913 
914 /*
915  * Collate the partition information in a v1.1 partition information
916  * descriptor format, this will be converter later if required.
917  */
918 static int partition_info_get_handler_v1_1(uint32_t *uuid,
919 					   struct ffa_partition_info_v1_1
920 						  *partitions,
921 					   uint32_t max_partitions,
922 					   uint32_t *partition_count)
923 {
924 	uint32_t index;
925 	struct ffa_partition_info_v1_1 *desc;
926 	bool null_uuid = is_null_uuid(uuid);
927 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
928 
929 	/* Deal with Logical Partitions. */
930 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
931 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
932 			/* Found a matching UUID, populate appropriately. */
933 			if (*partition_count >= max_partitions) {
934 				return FFA_ERROR_NO_MEMORY;
935 			}
936 
937 			desc = &partitions[*partition_count];
938 			desc->ep_id = el3_lp_descs[index].sp_id;
939 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
940 			/* LSPs must be AArch64. */
941 			desc->properties =
942 				partition_info_get_populate_properties(
943 					el3_lp_descs[index].properties,
944 					SP_STATE_AARCH64);
945 
946 			if (null_uuid) {
947 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
948 			}
949 			(*partition_count)++;
950 		}
951 	}
952 
953 	/* Deal with physical SP's. */
954 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
955 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
956 			/* Found a matching UUID, populate appropriately. */
957 			if (*partition_count >= max_partitions) {
958 				return FFA_ERROR_NO_MEMORY;
959 			}
960 
961 			desc = &partitions[*partition_count];
962 			desc->ep_id = sp_desc[index].sp_id;
963 			/*
964 			 * Execution context count must match No. cores for
965 			 * S-EL1 SPs.
966 			 */
967 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
968 			desc->properties =
969 				partition_info_get_populate_properties(
970 					sp_desc[index].properties,
971 					sp_desc[index].execution_state);
972 
973 			if (null_uuid) {
974 				copy_uuid(desc->uuid, sp_desc[index].uuid);
975 			}
976 			(*partition_count)++;
977 		}
978 	}
979 	return 0;
980 }
981 
982 /*
983  * Handle the case where that caller only wants the count of partitions
984  * matching a given UUID and does not want the corresponding descriptors
985  * populated.
986  */
987 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
988 {
989 	uint32_t index = 0;
990 	uint32_t partition_count = 0;
991 	bool null_uuid = is_null_uuid(uuid);
992 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
993 
994 	/* Deal with Logical Partitions. */
995 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
996 		if (null_uuid ||
997 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
998 			(partition_count)++;
999 		}
1000 	}
1001 
1002 	/* Deal with physical SP's. */
1003 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1004 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
1005 			(partition_count)++;
1006 		}
1007 	}
1008 	return partition_count;
1009 }
1010 
1011 /*
1012  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1013  * the corresponding descriptor format from the v1.1 descriptor array.
1014  */
1015 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1016 					     *partitions,
1017 					     struct mailbox *mbox,
1018 					     int partition_count)
1019 {
1020 	uint32_t index;
1021 	uint32_t buf_size;
1022 	uint32_t descriptor_size;
1023 	struct ffa_partition_info_v1_0 *v1_0_partitions =
1024 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1025 
1026 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1027 	descriptor_size = partition_count *
1028 			  sizeof(struct ffa_partition_info_v1_0);
1029 
1030 	if (descriptor_size > buf_size) {
1031 		return FFA_ERROR_NO_MEMORY;
1032 	}
1033 
1034 	for (index = 0U; index < partition_count; index++) {
1035 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
1036 		v1_0_partitions[index].execution_ctx_count =
1037 			partitions[index].execution_ctx_count;
1038 		/* Only report v1.0 properties. */
1039 		v1_0_partitions[index].properties =
1040 			(partitions[index].properties &
1041 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1042 	}
1043 	return 0;
1044 }
1045 
1046 /*
1047  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1048  * v1.0 implementations.
1049  */
1050 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1051 					   bool secure_origin,
1052 					   uint64_t x1,
1053 					   uint64_t x2,
1054 					   uint64_t x3,
1055 					   uint64_t x4,
1056 					   void *cookie,
1057 					   void *handle,
1058 					   uint64_t flags)
1059 {
1060 	int ret;
1061 	uint32_t partition_count = 0;
1062 	uint32_t size = 0;
1063 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1064 	struct mailbox *mbox;
1065 	uint64_t info_get_flags;
1066 	bool count_only;
1067 	uint32_t uuid[4];
1068 
1069 	uuid[0] = x1;
1070 	uuid[1] = x2;
1071 	uuid[2] = x3;
1072 	uuid[3] = x4;
1073 
1074 	/* Determine if the Partition descriptors should be populated. */
1075 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1076 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1077 
1078 	/* Handle the case where we don't need to populate the descriptors. */
1079 	if (count_only) {
1080 		partition_count = partition_info_get_handler_count_only(uuid);
1081 		if (partition_count == 0) {
1082 			return spmc_ffa_error_return(handle,
1083 						FFA_ERROR_INVALID_PARAMETER);
1084 		}
1085 	} else {
1086 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1087 
1088 		/*
1089 		 * Handle the case where the partition descriptors are required,
1090 		 * check we have the buffers available and populate the
1091 		 * appropriate structure version.
1092 		 */
1093 
1094 		/* Obtain the v1.1 format of the descriptors. */
1095 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1096 						      MAX_SP_LP_PARTITIONS,
1097 						      &partition_count);
1098 
1099 		/* Check if an error occurred during discovery. */
1100 		if (ret != 0) {
1101 			goto err;
1102 		}
1103 
1104 		/* If we didn't find any matches the UUID is unknown. */
1105 		if (partition_count == 0) {
1106 			ret = FFA_ERROR_INVALID_PARAMETER;
1107 			goto err;
1108 		}
1109 
1110 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1111 		mbox = spmc_get_mbox_desc(secure_origin);
1112 
1113 		/*
1114 		 * If the caller has not bothered registering its RX/TX pair
1115 		 * then return an error code.
1116 		 */
1117 		spin_lock(&mbox->lock);
1118 		if (mbox->rx_buffer == NULL) {
1119 			ret = FFA_ERROR_BUSY;
1120 			goto err_unlock;
1121 		}
1122 
1123 		/* Ensure the RX buffer is currently free. */
1124 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1125 			ret = FFA_ERROR_BUSY;
1126 			goto err_unlock;
1127 		}
1128 
1129 		/* Zero the RX buffer before populating. */
1130 		(void)memset(mbox->rx_buffer, 0,
1131 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1132 
1133 		/*
1134 		 * Depending on the FF-A version of the requesting partition
1135 		 * we may need to convert to a v1.0 format otherwise we can copy
1136 		 * directly.
1137 		 */
1138 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1139 			ret = partition_info_populate_v1_0(partitions,
1140 							   mbox,
1141 							   partition_count);
1142 			if (ret != 0) {
1143 				goto err_unlock;
1144 			}
1145 		} else {
1146 			uint32_t buf_size = mbox->rxtx_page_count *
1147 					    FFA_PAGE_SIZE;
1148 
1149 			/* Ensure the descriptor will fit in the buffer. */
1150 			size = sizeof(struct ffa_partition_info_v1_1);
1151 			if (partition_count * size  > buf_size) {
1152 				ret = FFA_ERROR_NO_MEMORY;
1153 				goto err_unlock;
1154 			}
1155 			memcpy(mbox->rx_buffer, partitions,
1156 			       partition_count * size);
1157 		}
1158 
1159 		mbox->state = MAILBOX_STATE_FULL;
1160 		spin_unlock(&mbox->lock);
1161 	}
1162 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1163 
1164 err_unlock:
1165 	spin_unlock(&mbox->lock);
1166 err:
1167 	return spmc_ffa_error_return(handle, ret);
1168 }
1169 
1170 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1171 {
1172 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1173 }
1174 
1175 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1176 					      uint32_t input_properties,
1177 					      void *handle)
1178 {
1179 	/*
1180 	 * If we're called by the normal world we don't support any
1181 	 * additional features.
1182 	 */
1183 	if (!secure_origin) {
1184 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1185 			return spmc_ffa_error_return(handle,
1186 						     FFA_ERROR_NOT_SUPPORTED);
1187 		}
1188 
1189 	} else {
1190 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1191 		/*
1192 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1193 		 * call. If v1.0 check and store whether the SP has requested
1194 		 * the use of the NS bit.
1195 		 */
1196 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1197 			if ((input_properties &
1198 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1199 				return spmc_ffa_error_return(handle,
1200 						       FFA_ERROR_NOT_SUPPORTED);
1201 			}
1202 			return ffa_feature_success(handle,
1203 						   FFA_FEATURES_RET_REQ_NS_BIT);
1204 		} else {
1205 			sp->ns_bit_requested = (input_properties &
1206 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1207 					       0U;
1208 		}
1209 		if (sp->ns_bit_requested) {
1210 			return ffa_feature_success(handle,
1211 						   FFA_FEATURES_RET_REQ_NS_BIT);
1212 		}
1213 	}
1214 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1215 }
1216 
1217 static uint64_t ffa_features_handler(uint32_t smc_fid,
1218 				     bool secure_origin,
1219 				     uint64_t x1,
1220 				     uint64_t x2,
1221 				     uint64_t x3,
1222 				     uint64_t x4,
1223 				     void *cookie,
1224 				     void *handle,
1225 				     uint64_t flags)
1226 {
1227 	uint32_t function_id = (uint32_t) x1;
1228 	uint32_t input_properties = (uint32_t) x2;
1229 
1230 	/* Check if a Feature ID was requested. */
1231 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1232 		/* We currently don't support any additional features. */
1233 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1234 	}
1235 
1236 	/*
1237 	 * Handle the cases where we have separate handlers due to additional
1238 	 * properties.
1239 	 */
1240 	switch (function_id) {
1241 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1242 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1243 		return ffa_features_retrieve_request(secure_origin,
1244 						     input_properties,
1245 						     handle);
1246 	}
1247 
1248 	/*
1249 	 * We don't currently support additional input properties for these
1250 	 * other ABIs therefore ensure this value is set to 0.
1251 	 */
1252 	if (input_properties != 0U) {
1253 		return spmc_ffa_error_return(handle,
1254 					     FFA_ERROR_NOT_SUPPORTED);
1255 	}
1256 
1257 	/* Report if any other FF-A ABI is supported. */
1258 	switch (function_id) {
1259 	/* Supported features from both worlds. */
1260 	case FFA_ERROR:
1261 	case FFA_SUCCESS_SMC32:
1262 	case FFA_INTERRUPT:
1263 	case FFA_SPM_ID_GET:
1264 	case FFA_ID_GET:
1265 	case FFA_FEATURES:
1266 	case FFA_VERSION:
1267 	case FFA_RX_RELEASE:
1268 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1269 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1270 	case FFA_PARTITION_INFO_GET:
1271 	case FFA_RXTX_MAP_SMC32:
1272 	case FFA_RXTX_MAP_SMC64:
1273 	case FFA_RXTX_UNMAP:
1274 	case FFA_MEM_FRAG_TX:
1275 	case FFA_MSG_RUN:
1276 
1277 		/*
1278 		 * We are relying on the fact that the other registers
1279 		 * will be set to 0 as these values align with the
1280 		 * currently implemented features of the SPMC. If this
1281 		 * changes this function must be extended to handle
1282 		 * reporting the additional functionality.
1283 		 */
1284 
1285 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1286 		/* Execution stops here. */
1287 
1288 	/* Supported ABIs only from the secure world. */
1289 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1290 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1291 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1292 	case FFA_MEM_RELINQUISH:
1293 	case FFA_MSG_WAIT:
1294 	case FFA_CONSOLE_LOG_SMC32:
1295 	case FFA_CONSOLE_LOG_SMC64:
1296 
1297 		if (!secure_origin) {
1298 			return spmc_ffa_error_return(handle,
1299 				FFA_ERROR_NOT_SUPPORTED);
1300 		}
1301 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1302 		/* Execution stops here. */
1303 
1304 	/* Supported features only from the normal world. */
1305 	case FFA_MEM_SHARE_SMC32:
1306 	case FFA_MEM_SHARE_SMC64:
1307 	case FFA_MEM_LEND_SMC32:
1308 	case FFA_MEM_LEND_SMC64:
1309 	case FFA_MEM_RECLAIM:
1310 	case FFA_MEM_FRAG_RX:
1311 
1312 		if (secure_origin) {
1313 			return spmc_ffa_error_return(handle,
1314 					FFA_ERROR_NOT_SUPPORTED);
1315 		}
1316 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1317 		/* Execution stops here. */
1318 
1319 	default:
1320 		return spmc_ffa_error_return(handle,
1321 					FFA_ERROR_NOT_SUPPORTED);
1322 	}
1323 }
1324 
1325 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1326 				   bool secure_origin,
1327 				   uint64_t x1,
1328 				   uint64_t x2,
1329 				   uint64_t x3,
1330 				   uint64_t x4,
1331 				   void *cookie,
1332 				   void *handle,
1333 				   uint64_t flags)
1334 {
1335 	if (secure_origin) {
1336 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1337 			 spmc_get_current_sp_ctx()->sp_id);
1338 	} else {
1339 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1340 			 spmc_get_hyp_ctx()->ns_ep_id);
1341 	}
1342 }
1343 
1344 /*
1345  * Enable an SP to query the ID assigned to the SPMC.
1346  */
1347 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1348 				       bool secure_origin,
1349 				       uint64_t x1,
1350 				       uint64_t x2,
1351 				       uint64_t x3,
1352 				       uint64_t x4,
1353 				       void *cookie,
1354 				       void *handle,
1355 				       uint64_t flags)
1356 {
1357 	assert(x1 == 0UL);
1358 	assert(x2 == 0UL);
1359 	assert(x3 == 0UL);
1360 	assert(x4 == 0UL);
1361 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1362 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1363 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1364 
1365 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1366 }
1367 
1368 static uint64_t ffa_run_handler(uint32_t smc_fid,
1369 				bool secure_origin,
1370 				uint64_t x1,
1371 				uint64_t x2,
1372 				uint64_t x3,
1373 				uint64_t x4,
1374 				void *cookie,
1375 				void *handle,
1376 				uint64_t flags)
1377 {
1378 	struct secure_partition_desc *sp;
1379 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1380 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1381 	unsigned int idx;
1382 	unsigned int *rt_state;
1383 	unsigned int *rt_model;
1384 
1385 	/* Can only be called from the normal world. */
1386 	if (secure_origin) {
1387 		ERROR("FFA_RUN can only be called from NWd.\n");
1388 		return spmc_ffa_error_return(handle,
1389 					     FFA_ERROR_INVALID_PARAMETER);
1390 	}
1391 
1392 	/* Cannot run a Normal world partition. */
1393 	if (ffa_is_normal_world_id(target_id)) {
1394 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1395 		return spmc_ffa_error_return(handle,
1396 					     FFA_ERROR_INVALID_PARAMETER);
1397 	}
1398 
1399 	/* Check that the target SP exists. */
1400 	sp = spmc_get_sp_ctx(target_id);
1401 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1402 	if (sp == NULL) {
1403 		return spmc_ffa_error_return(handle,
1404 					     FFA_ERROR_INVALID_PARAMETER);
1405 	}
1406 
1407 	idx = get_ec_index(sp);
1408 
1409 	if (idx != vcpu_id) {
1410 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1411 		return spmc_ffa_error_return(handle,
1412 					     FFA_ERROR_INVALID_PARAMETER);
1413 	}
1414 	if (sp->runtime_el == S_EL0) {
1415 		spin_lock(&sp->rt_state_lock);
1416 	}
1417 	rt_state = &((sp->ec[idx]).rt_state);
1418 	rt_model = &((sp->ec[idx]).rt_model);
1419 	if (*rt_state == RT_STATE_RUNNING) {
1420 		if (sp->runtime_el == S_EL0) {
1421 			spin_unlock(&sp->rt_state_lock);
1422 		}
1423 		ERROR("Partition (0x%x) is already running.\n", target_id);
1424 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1425 	}
1426 
1427 	/*
1428 	 * Sanity check that if the execution context was not waiting then it
1429 	 * was either in the direct request or the run partition runtime model.
1430 	 */
1431 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1432 		assert(*rt_model == RT_MODEL_RUN ||
1433 		       *rt_model == RT_MODEL_DIR_REQ);
1434 	}
1435 
1436 	/*
1437 	 * If the context was waiting then update the partition runtime model.
1438 	 */
1439 	if (*rt_state == RT_STATE_WAITING) {
1440 		*rt_model = RT_MODEL_RUN;
1441 	}
1442 
1443 	/*
1444 	 * Forward the request to the correct SP vCPU after updating
1445 	 * its state.
1446 	 */
1447 	*rt_state = RT_STATE_RUNNING;
1448 
1449 	if (sp->runtime_el == S_EL0) {
1450 		spin_unlock(&sp->rt_state_lock);
1451 	}
1452 
1453 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1454 			       handle, cookie, flags, target_id);
1455 }
1456 
1457 static uint64_t rx_release_handler(uint32_t smc_fid,
1458 				   bool secure_origin,
1459 				   uint64_t x1,
1460 				   uint64_t x2,
1461 				   uint64_t x3,
1462 				   uint64_t x4,
1463 				   void *cookie,
1464 				   void *handle,
1465 				   uint64_t flags)
1466 {
1467 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1468 
1469 	spin_lock(&mbox->lock);
1470 
1471 	if (mbox->state != MAILBOX_STATE_FULL) {
1472 		spin_unlock(&mbox->lock);
1473 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1474 	}
1475 
1476 	mbox->state = MAILBOX_STATE_EMPTY;
1477 	spin_unlock(&mbox->lock);
1478 
1479 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1480 }
1481 
1482 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1483 				     bool secure_origin,
1484 				     uint64_t x1,
1485 				     uint64_t x2,
1486 				     uint64_t x3,
1487 				     uint64_t x4,
1488 				     void *cookie,
1489 				     void *handle,
1490 				     uint64_t flags)
1491 {
1492 	char *chars;
1493 	size_t chars_max;
1494 	size_t chars_count = x1;
1495 
1496 	/* Does not support request from Nwd. */
1497 	if (!secure_origin) {
1498 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1499 	}
1500 
1501 	assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1502 	if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1503 		uint32_t registers[] = {
1504 			(uint32_t)x2,
1505 			(uint32_t)x3,
1506 			(uint32_t)x4,
1507 			(uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5),
1508 			(uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6),
1509 			(uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7),
1510 		};
1511 		chars_max = ARRAY_SIZE(registers) * sizeof(uint32_t);
1512 		chars = (char *)registers;
1513 	} else {
1514 		uint64_t registers[] = {
1515 			x2,
1516 			x3,
1517 			x4,
1518 			SMC_GET_GP(handle, CTX_GPREG_X5),
1519 			SMC_GET_GP(handle, CTX_GPREG_X6),
1520 			SMC_GET_GP(handle, CTX_GPREG_X7),
1521 		};
1522 		chars_max = ARRAY_SIZE(registers) * sizeof(uint64_t);
1523 		chars = (char *)registers;
1524 	}
1525 
1526 	if ((chars_count == 0) || (chars_count > chars_max)) {
1527 		return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1528 	}
1529 
1530 	for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1531 		putchar(chars[i]);
1532 	}
1533 
1534 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1535 }
1536 
1537 /*
1538  * Perform initial validation on the provided secondary entry point.
1539  * For now ensure it does not lie within the BL31 Image or the SP's
1540  * RX/TX buffers as these are mapped within EL3.
1541  * TODO: perform validation for additional invalid memory regions.
1542  */
1543 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1544 {
1545 	struct mailbox *mb;
1546 	uintptr_t buffer_size;
1547 	uintptr_t sp_rx_buffer;
1548 	uintptr_t sp_tx_buffer;
1549 	uintptr_t sp_rx_buffer_limit;
1550 	uintptr_t sp_tx_buffer_limit;
1551 
1552 	mb = &sp->mailbox;
1553 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1554 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1555 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1556 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1557 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1558 
1559 	/*
1560 	 * Check if the entry point lies within BL31, or the
1561 	 * SP's RX or TX buffer.
1562 	 */
1563 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1564 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1565 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1566 		return -EINVAL;
1567 	}
1568 	return 0;
1569 }
1570 
1571 /*******************************************************************************
1572  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1573  *  register an entry point for initialization during a secondary cold boot.
1574  ******************************************************************************/
1575 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1576 					    bool secure_origin,
1577 					    uint64_t x1,
1578 					    uint64_t x2,
1579 					    uint64_t x3,
1580 					    uint64_t x4,
1581 					    void *cookie,
1582 					    void *handle,
1583 					    uint64_t flags)
1584 {
1585 	struct secure_partition_desc *sp;
1586 	struct sp_exec_ctx *sp_ctx;
1587 
1588 	/* This request cannot originate from the Normal world. */
1589 	if (!secure_origin) {
1590 		WARN("%s: Can only be called from SWd.\n", __func__);
1591 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1592 	}
1593 
1594 	/* Get the context of the current SP. */
1595 	sp = spmc_get_current_sp_ctx();
1596 	if (sp == NULL) {
1597 		WARN("%s: Cannot find SP context.\n", __func__);
1598 		return spmc_ffa_error_return(handle,
1599 					     FFA_ERROR_INVALID_PARAMETER);
1600 	}
1601 
1602 	/* Only an S-EL1 SP should be invoking this ABI. */
1603 	if (sp->runtime_el != S_EL1) {
1604 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1605 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1606 	}
1607 
1608 	/* Ensure the SP is in its initialization state. */
1609 	sp_ctx = spmc_get_sp_ec(sp);
1610 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1611 		WARN("%s: Can only be called during SP initialization.\n",
1612 		     __func__);
1613 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1614 	}
1615 
1616 	/* Perform initial validation of the secondary entry point. */
1617 	if (validate_secondary_ep(x1, sp)) {
1618 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1619 		     __func__, x1);
1620 		return spmc_ffa_error_return(handle,
1621 					     FFA_ERROR_INVALID_PARAMETER);
1622 	}
1623 
1624 	/*
1625 	 * Update the secondary entrypoint in SP context.
1626 	 * We don't need a lock here as during partition initialization there
1627 	 * will only be a single core online.
1628 	 */
1629 	sp->secondary_ep = x1;
1630 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1631 
1632 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1633 }
1634 
1635 /*******************************************************************************
1636  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1637  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1638  * function converts a permission value from the FF-A format to the mmap_attr_t
1639  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1640  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1641  * ignored by the function xlat_change_mem_attributes_ctx().
1642  ******************************************************************************/
1643 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1644 {
1645 	unsigned int tf_attr = 0U;
1646 	unsigned int access;
1647 
1648 	/* Deal with data access permissions first. */
1649 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1650 
1651 	switch (access) {
1652 	case FFA_MEM_PERM_DATA_RW:
1653 		/* Return 0 if the execute is set with RW. */
1654 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1655 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1656 		}
1657 		break;
1658 
1659 	case FFA_MEM_PERM_DATA_RO:
1660 		tf_attr |= MT_RO | MT_USER;
1661 		/* Deal with the instruction access permissions next. */
1662 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1663 			tf_attr |= MT_EXECUTE;
1664 		} else {
1665 			tf_attr |= MT_EXECUTE_NEVER;
1666 		}
1667 		break;
1668 
1669 	case FFA_MEM_PERM_DATA_NA:
1670 	default:
1671 		return tf_attr;
1672 	}
1673 
1674 	return tf_attr;
1675 }
1676 
1677 /*******************************************************************************
1678  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1679  ******************************************************************************/
1680 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1681 					 bool secure_origin,
1682 					 uint64_t x1,
1683 					 uint64_t x2,
1684 					 uint64_t x3,
1685 					 uint64_t x4,
1686 					 void *cookie,
1687 					 void *handle,
1688 					 uint64_t flags)
1689 {
1690 	struct secure_partition_desc *sp;
1691 	unsigned int idx;
1692 	uintptr_t base_va = (uintptr_t) x1;
1693 	size_t size = (size_t)(x2 * PAGE_SIZE);
1694 	uint32_t tf_attr;
1695 	int ret;
1696 
1697 	/* This request cannot originate from the Normal world. */
1698 	if (!secure_origin) {
1699 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1700 	}
1701 
1702 	if (size == 0) {
1703 		return spmc_ffa_error_return(handle,
1704 					     FFA_ERROR_INVALID_PARAMETER);
1705 	}
1706 
1707 	/* Get the context of the current SP. */
1708 	sp = spmc_get_current_sp_ctx();
1709 	if (sp == NULL) {
1710 		return spmc_ffa_error_return(handle,
1711 					     FFA_ERROR_INVALID_PARAMETER);
1712 	}
1713 
1714 	/* A S-EL1 SP has no business invoking this ABI. */
1715 	if (sp->runtime_el == S_EL1) {
1716 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1717 	}
1718 
1719 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1720 		return spmc_ffa_error_return(handle,
1721 					     FFA_ERROR_INVALID_PARAMETER);
1722 	}
1723 
1724 	/* Get the execution context of the calling SP. */
1725 	idx = get_ec_index(sp);
1726 
1727 	/*
1728 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1729 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1730 	 * and can only be initialising on this cpu.
1731 	 */
1732 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1733 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1734 	}
1735 
1736 	VERBOSE("Setting memory permissions:\n");
1737 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1738 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1739 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1740 
1741 	/* Convert inbound permissions to TF-A permission attributes */
1742 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1743 	if (tf_attr == 0U) {
1744 		return spmc_ffa_error_return(handle,
1745 					     FFA_ERROR_INVALID_PARAMETER);
1746 	}
1747 
1748 	/* Request the change in permissions */
1749 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1750 					     base_va, size, tf_attr);
1751 	if (ret != 0) {
1752 		return spmc_ffa_error_return(handle,
1753 					     FFA_ERROR_INVALID_PARAMETER);
1754 	}
1755 
1756 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1757 }
1758 
1759 /*******************************************************************************
1760  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1761  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1762  * function converts a permission value from the mmap_attr_t format to the FF-A
1763  * format.
1764  ******************************************************************************/
1765 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1766 {
1767 	unsigned int perms = 0U;
1768 	unsigned int data_access;
1769 
1770 	if ((attr & MT_USER) == 0) {
1771 		/* No access from EL0. */
1772 		data_access = FFA_MEM_PERM_DATA_NA;
1773 	} else {
1774 		if ((attr & MT_RW) != 0) {
1775 			data_access = FFA_MEM_PERM_DATA_RW;
1776 		} else {
1777 			data_access = FFA_MEM_PERM_DATA_RO;
1778 		}
1779 	}
1780 
1781 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1782 		<< FFA_MEM_PERM_DATA_SHIFT;
1783 
1784 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1785 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1786 	}
1787 
1788 	return perms;
1789 }
1790 
1791 /*******************************************************************************
1792  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1793  ******************************************************************************/
1794 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1795 					 bool secure_origin,
1796 					 uint64_t x1,
1797 					 uint64_t x2,
1798 					 uint64_t x3,
1799 					 uint64_t x4,
1800 					 void *cookie,
1801 					 void *handle,
1802 					 uint64_t flags)
1803 {
1804 	struct secure_partition_desc *sp;
1805 	unsigned int idx;
1806 	uintptr_t base_va = (uintptr_t)x1;
1807 	uint32_t tf_attr = 0;
1808 	int ret;
1809 
1810 	/* This request cannot originate from the Normal world. */
1811 	if (!secure_origin) {
1812 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1813 	}
1814 
1815 	/* Get the context of the current SP. */
1816 	sp = spmc_get_current_sp_ctx();
1817 	if (sp == NULL) {
1818 		return spmc_ffa_error_return(handle,
1819 					     FFA_ERROR_INVALID_PARAMETER);
1820 	}
1821 
1822 	/* A S-EL1 SP has no business invoking this ABI. */
1823 	if (sp->runtime_el == S_EL1) {
1824 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1825 	}
1826 
1827 	/* Get the execution context of the calling SP. */
1828 	idx = get_ec_index(sp);
1829 
1830 	/*
1831 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1832 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1833 	 * and can only be initialising on this cpu.
1834 	 */
1835 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1836 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1837 	}
1838 
1839 	/* Request the permissions */
1840 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1841 	if (ret != 0) {
1842 		return spmc_ffa_error_return(handle,
1843 					     FFA_ERROR_INVALID_PARAMETER);
1844 	}
1845 
1846 	/* Convert TF-A permission to FF-A permissions attributes. */
1847 	x2 = mmap_perm_to_ffa_perm(tf_attr);
1848 
1849 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1850 }
1851 
1852 /*******************************************************************************
1853  * This function will parse the Secure Partition Manifest. From manifest, it
1854  * will fetch details for preparing Secure partition image context and secure
1855  * partition image boot arguments if any.
1856  ******************************************************************************/
1857 static int sp_manifest_parse(void *sp_manifest, int offset,
1858 			     struct secure_partition_desc *sp,
1859 			     entry_point_info_t *ep_info,
1860 			     int32_t *boot_info_reg)
1861 {
1862 	int32_t ret, node;
1863 	uint32_t config_32;
1864 
1865 	/*
1866 	 * Look for the mandatory fields that are expected to be present in
1867 	 * the SP manifests.
1868 	 */
1869 	node = fdt_path_offset(sp_manifest, "/");
1870 	if (node < 0) {
1871 		ERROR("Did not find root node.\n");
1872 		return node;
1873 	}
1874 
1875 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1876 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1877 	if (ret != 0) {
1878 		ERROR("Missing Secure Partition UUID.\n");
1879 		return ret;
1880 	}
1881 
1882 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1883 	if (ret != 0) {
1884 		ERROR("Missing SP Exception Level information.\n");
1885 		return ret;
1886 	}
1887 
1888 	sp->runtime_el = config_32;
1889 
1890 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1891 	if (ret != 0) {
1892 		ERROR("Missing Secure Partition FF-A Version.\n");
1893 		return ret;
1894 	}
1895 
1896 	sp->ffa_version = config_32;
1897 
1898 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1899 	if (ret != 0) {
1900 		ERROR("Missing Secure Partition Execution State.\n");
1901 		return ret;
1902 	}
1903 
1904 	sp->execution_state = config_32;
1905 
1906 	ret = fdt_read_uint32(sp_manifest, node,
1907 			      "messaging-method", &config_32);
1908 	if (ret != 0) {
1909 		ERROR("Missing Secure Partition messaging method.\n");
1910 		return ret;
1911 	}
1912 
1913 	/* Validate this entry, we currently only support direct messaging. */
1914 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1915 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1916 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1917 		     config_32);
1918 		return -EINVAL;
1919 	}
1920 
1921 	sp->properties = config_32;
1922 
1923 	ret = fdt_read_uint32(sp_manifest, node,
1924 			      "execution-ctx-count", &config_32);
1925 
1926 	if (ret != 0) {
1927 		ERROR("Missing SP Execution Context Count.\n");
1928 		return ret;
1929 	}
1930 
1931 	/*
1932 	 * Ensure this field is set correctly in the manifest however
1933 	 * since this is currently a hardcoded value for S-EL1 partitions
1934 	 * we don't need to save it here, just validate.
1935 	 */
1936 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
1937 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1938 			config_32, PLATFORM_CORE_COUNT);
1939 		return -EINVAL;
1940 	}
1941 
1942 	/*
1943 	 * Look for the optional fields that are expected to be present in
1944 	 * an SP manifest.
1945 	 */
1946 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1947 	if (ret != 0) {
1948 		WARN("Missing Secure Partition ID.\n");
1949 	} else {
1950 		if (!is_ffa_secure_id_valid(config_32)) {
1951 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1952 			      config_32);
1953 			return -EINVAL;
1954 		}
1955 		sp->sp_id = config_32;
1956 	}
1957 
1958 	ret = fdt_read_uint32(sp_manifest, node,
1959 			      "power-management-messages", &config_32);
1960 	if (ret != 0) {
1961 		WARN("Missing Power Management Messages entry.\n");
1962 	} else {
1963 		if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
1964 			ERROR("Power messages not supported for S-EL0 SP\n");
1965 			return -EINVAL;
1966 		}
1967 
1968 		/*
1969 		 * Ensure only the currently supported power messages have
1970 		 * been requested.
1971 		 */
1972 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1973 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1974 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1975 			ERROR("Requested unsupported PM messages (%x)\n",
1976 			      config_32);
1977 			return -EINVAL;
1978 		}
1979 		sp->pwr_mgmt_msgs = config_32;
1980 	}
1981 
1982 	ret = fdt_read_uint32(sp_manifest, node,
1983 			      "gp-register-num", &config_32);
1984 	if (ret != 0) {
1985 		WARN("Missing boot information register.\n");
1986 	} else {
1987 		/* Check if a register number between 0-3 is specified. */
1988 		if (config_32 < 4) {
1989 			*boot_info_reg = config_32;
1990 		} else {
1991 			WARN("Incorrect boot information register (%u).\n",
1992 			     config_32);
1993 		}
1994 	}
1995 
1996 	return 0;
1997 }
1998 
1999 /*******************************************************************************
2000  * This function gets the Secure Partition Manifest base and maps the manifest
2001  * region.
2002  * Currently only one Secure Partition manifest is considered which is used to
2003  * prepare the context for the single Secure Partition.
2004  ******************************************************************************/
2005 static int find_and_prepare_sp_context(void)
2006 {
2007 	void *sp_manifest;
2008 	uintptr_t manifest_base;
2009 	uintptr_t manifest_base_align;
2010 	entry_point_info_t *next_image_ep_info;
2011 	int32_t ret, boot_info_reg = -1;
2012 	struct secure_partition_desc *sp;
2013 
2014 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2015 	if (next_image_ep_info == NULL) {
2016 		WARN("No Secure Partition image provided by BL2.\n");
2017 		return -ENOENT;
2018 	}
2019 
2020 	sp_manifest = (void *)next_image_ep_info->args.arg0;
2021 	if (sp_manifest == NULL) {
2022 		WARN("Secure Partition manifest absent.\n");
2023 		return -ENOENT;
2024 	}
2025 
2026 	manifest_base = (uintptr_t)sp_manifest;
2027 	manifest_base_align = page_align(manifest_base, DOWN);
2028 
2029 	/*
2030 	 * Map the secure partition manifest region in the EL3 translation
2031 	 * regime.
2032 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2033 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
2034 	 * not completely accommodate the secure partition manifest region.
2035 	 */
2036 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2037 				      manifest_base_align,
2038 				      PAGE_SIZE * 2,
2039 				      MT_RO_DATA);
2040 	if (ret != 0) {
2041 		ERROR("Error while mapping SP manifest (%d).\n", ret);
2042 		return ret;
2043 	}
2044 
2045 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2046 					    "arm,ffa-manifest-1.0");
2047 	if (ret < 0) {
2048 		ERROR("Error happened in SP manifest reading.\n");
2049 		return -EINVAL;
2050 	}
2051 
2052 	/*
2053 	 * Store the size of the manifest so that it can be used later to pass
2054 	 * the manifest as boot information later.
2055 	 */
2056 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2057 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2058 	     next_image_ep_info->args.arg1);
2059 
2060 	/*
2061 	 * Select an SP descriptor for initialising the partition's execution
2062 	 * context on the primary CPU.
2063 	 */
2064 	sp = spmc_get_current_sp_ctx();
2065 
2066 #if SPMC_AT_EL3_SEL0_SP
2067 	/* Assign translation tables context. */
2068 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2069 
2070 #endif /* SPMC_AT_EL3_SEL0_SP */
2071 	/* Initialize entry point information for the SP */
2072 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2073 		       SECURE | EP_ST_ENABLE);
2074 
2075 	/* Parse the SP manifest. */
2076 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2077 				&boot_info_reg);
2078 	if (ret != 0) {
2079 		ERROR("Error in Secure Partition manifest parsing.\n");
2080 		return ret;
2081 	}
2082 
2083 	/* Check that the runtime EL in the manifest was correct. */
2084 	if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) {
2085 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
2086 		return -EINVAL;
2087 	}
2088 
2089 	/* Perform any common initialisation. */
2090 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2091 
2092 	/* Perform any initialisation specific to S-EL1 SPs. */
2093 	if (sp->runtime_el == S_EL1) {
2094 		spmc_el1_sp_setup(sp, next_image_ep_info);
2095 	}
2096 
2097 #if SPMC_AT_EL3_SEL0_SP
2098 	/* Setup spsr in endpoint info for common context management routine. */
2099 	if (sp->runtime_el == S_EL0) {
2100 		spmc_el0_sp_spsr_setup(next_image_ep_info);
2101 	}
2102 #endif /* SPMC_AT_EL3_SEL0_SP */
2103 
2104 	/* Initialize the SP context with the required ep info. */
2105 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
2106 
2107 #if SPMC_AT_EL3_SEL0_SP
2108 	/*
2109 	 * Perform any initialisation specific to S-EL0 not set by common
2110 	 * context management routine.
2111 	 */
2112 	if (sp->runtime_el == S_EL0) {
2113 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2114 	}
2115 #endif /* SPMC_AT_EL3_SEL0_SP */
2116 	return 0;
2117 }
2118 
2119 /*******************************************************************************
2120  * This function takes an SP context pointer and performs a synchronous entry
2121  * into it.
2122  ******************************************************************************/
2123 static int32_t logical_sp_init(void)
2124 {
2125 	int32_t rc = 0;
2126 	struct el3_lp_desc *el3_lp_descs;
2127 
2128 	/* Perform initial validation of the Logical Partitions. */
2129 	rc = el3_sp_desc_validate();
2130 	if (rc != 0) {
2131 		ERROR("Logical Partition validation failed!\n");
2132 		return rc;
2133 	}
2134 
2135 	el3_lp_descs = get_el3_lp_array();
2136 
2137 	INFO("Logical Secure Partition init start.\n");
2138 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2139 		rc = el3_lp_descs[i].init();
2140 		if (rc != 0) {
2141 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2142 			      el3_lp_descs[i].sp_id);
2143 			return rc;
2144 		}
2145 		VERBOSE("Logical SP (0x%x) Initialized\n",
2146 			      el3_lp_descs[i].sp_id);
2147 	}
2148 
2149 	INFO("Logical Secure Partition init completed.\n");
2150 
2151 	return rc;
2152 }
2153 
2154 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2155 {
2156 	uint64_t rc;
2157 
2158 	assert(ec != NULL);
2159 
2160 	/* Assign the context of the SP to this CPU */
2161 	cm_set_context(&(ec->cpu_ctx), SECURE);
2162 
2163 	/* Restore the context assigned above */
2164 	cm_el1_sysregs_context_restore(SECURE);
2165 	cm_set_next_eret_context(SECURE);
2166 
2167 	/* Invalidate TLBs at EL1. */
2168 	tlbivmalle1();
2169 	dsbish();
2170 
2171 	/* Enter Secure Partition */
2172 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2173 
2174 	/* Save secure state */
2175 	cm_el1_sysregs_context_save(SECURE);
2176 
2177 	return rc;
2178 }
2179 
2180 /*******************************************************************************
2181  * SPMC Helper Functions.
2182  ******************************************************************************/
2183 static int32_t sp_init(void)
2184 {
2185 	uint64_t rc;
2186 	struct secure_partition_desc *sp;
2187 	struct sp_exec_ctx *ec;
2188 
2189 	sp = spmc_get_current_sp_ctx();
2190 	ec = spmc_get_sp_ec(sp);
2191 	ec->rt_model = RT_MODEL_INIT;
2192 	ec->rt_state = RT_STATE_RUNNING;
2193 
2194 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2195 
2196 	rc = spmc_sp_synchronous_entry(ec);
2197 	if (rc != 0) {
2198 		/* Indicate SP init was not successful. */
2199 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2200 		      sp->sp_id, rc);
2201 		return 0;
2202 	}
2203 
2204 	ec->rt_state = RT_STATE_WAITING;
2205 	INFO("Secure Partition initialized.\n");
2206 
2207 	return 1;
2208 }
2209 
2210 static void initalize_sp_descs(void)
2211 {
2212 	struct secure_partition_desc *sp;
2213 
2214 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2215 		sp = &sp_desc[i];
2216 		sp->sp_id = INV_SP_ID;
2217 		sp->mailbox.rx_buffer = NULL;
2218 		sp->mailbox.tx_buffer = NULL;
2219 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2220 		sp->secondary_ep = 0;
2221 	}
2222 }
2223 
2224 static void initalize_ns_ep_descs(void)
2225 {
2226 	struct ns_endpoint_desc *ns_ep;
2227 
2228 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2229 		ns_ep = &ns_ep_desc[i];
2230 		/*
2231 		 * Clashes with the Hypervisor ID but will not be a
2232 		 * problem in practice.
2233 		 */
2234 		ns_ep->ns_ep_id = 0;
2235 		ns_ep->ffa_version = 0;
2236 		ns_ep->mailbox.rx_buffer = NULL;
2237 		ns_ep->mailbox.tx_buffer = NULL;
2238 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2239 	}
2240 }
2241 
2242 /*******************************************************************************
2243  * Initialize SPMC attributes for the SPMD.
2244  ******************************************************************************/
2245 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2246 {
2247 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2248 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2249 	spmc_attrs->exec_state = MODE_RW_64;
2250 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2251 }
2252 
2253 /*******************************************************************************
2254  * Initialize contexts of all Secure Partitions.
2255  ******************************************************************************/
2256 int32_t spmc_setup(void)
2257 {
2258 	int32_t ret;
2259 	uint32_t flags;
2260 
2261 	/* Initialize endpoint descriptors */
2262 	initalize_sp_descs();
2263 	initalize_ns_ep_descs();
2264 
2265 	/*
2266 	 * Retrieve the information of the datastore for tracking shared memory
2267 	 * requests allocated by platform code and zero the region if available.
2268 	 */
2269 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2270 					    &spmc_shmem_obj_state.data_size);
2271 	if (ret != 0) {
2272 		ERROR("Failed to obtain memory descriptor backing store!\n");
2273 		return ret;
2274 	}
2275 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2276 
2277 	/* Setup logical SPs. */
2278 	ret = logical_sp_init();
2279 	if (ret != 0) {
2280 		ERROR("Failed to initialize Logical Partitions.\n");
2281 		return ret;
2282 	}
2283 
2284 	/* Perform physical SP setup. */
2285 
2286 	/* Disable MMU at EL1 (initialized by BL2) */
2287 	disable_mmu_icache_el1();
2288 
2289 	/* Initialize context of the SP */
2290 	INFO("Secure Partition context setup start.\n");
2291 
2292 	ret = find_and_prepare_sp_context();
2293 	if (ret != 0) {
2294 		ERROR("Error in SP finding and context preparation.\n");
2295 		return ret;
2296 	}
2297 
2298 	/* Register power management hooks with PSCI */
2299 	psci_register_spd_pm_hook(&spmc_pm);
2300 
2301 	/*
2302 	 * Register an interrupt handler for S-EL1 interrupts
2303 	 * when generated during code executing in the
2304 	 * non-secure state.
2305 	 */
2306 	flags = 0;
2307 	set_interrupt_rm_flag(flags, NON_SECURE);
2308 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2309 					      spmc_sp_interrupt_handler,
2310 					      flags);
2311 	if (ret != 0) {
2312 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2313 		panic();
2314 	}
2315 
2316 	/* Register init function for deferred init.  */
2317 	bl31_register_bl32_init(&sp_init);
2318 
2319 	INFO("Secure Partition setup done.\n");
2320 
2321 	return 0;
2322 }
2323 
2324 /*******************************************************************************
2325  * Secure Partition Manager SMC handler.
2326  ******************************************************************************/
2327 uint64_t spmc_smc_handler(uint32_t smc_fid,
2328 			  bool secure_origin,
2329 			  uint64_t x1,
2330 			  uint64_t x2,
2331 			  uint64_t x3,
2332 			  uint64_t x4,
2333 			  void *cookie,
2334 			  void *handle,
2335 			  uint64_t flags)
2336 {
2337 	switch (smc_fid) {
2338 
2339 	case FFA_VERSION:
2340 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2341 					   x4, cookie, handle, flags);
2342 
2343 	case FFA_SPM_ID_GET:
2344 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2345 					     x3, x4, cookie, handle, flags);
2346 
2347 	case FFA_ID_GET:
2348 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2349 					  x4, cookie, handle, flags);
2350 
2351 	case FFA_FEATURES:
2352 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2353 					    x4, cookie, handle, flags);
2354 
2355 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2356 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2357 						   x2, x3, x4, cookie, handle,
2358 						   flags);
2359 
2360 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2361 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2362 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2363 					      x3, x4, cookie, handle, flags);
2364 
2365 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2366 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2367 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2368 					       x3, x4, cookie, handle, flags);
2369 
2370 	case FFA_RXTX_MAP_SMC32:
2371 	case FFA_RXTX_MAP_SMC64:
2372 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2373 					cookie, handle, flags);
2374 
2375 	case FFA_RXTX_UNMAP:
2376 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2377 					  x4, cookie, handle, flags);
2378 
2379 	case FFA_PARTITION_INFO_GET:
2380 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2381 						  x2, x3, x4, cookie, handle,
2382 						  flags);
2383 
2384 	case FFA_RX_RELEASE:
2385 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2386 					  x4, cookie, handle, flags);
2387 
2388 	case FFA_MSG_WAIT:
2389 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2390 					cookie, handle, flags);
2391 
2392 	case FFA_ERROR:
2393 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2394 					cookie, handle, flags);
2395 
2396 	case FFA_MSG_RUN:
2397 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2398 				       cookie, handle, flags);
2399 
2400 	case FFA_MEM_SHARE_SMC32:
2401 	case FFA_MEM_SHARE_SMC64:
2402 	case FFA_MEM_LEND_SMC32:
2403 	case FFA_MEM_LEND_SMC64:
2404 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2405 					 cookie, handle, flags);
2406 
2407 	case FFA_MEM_FRAG_TX:
2408 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2409 					    x4, cookie, handle, flags);
2410 
2411 	case FFA_MEM_FRAG_RX:
2412 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2413 					    x4, cookie, handle, flags);
2414 
2415 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2416 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2417 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2418 						 x3, x4, cookie, handle, flags);
2419 
2420 	case FFA_MEM_RELINQUISH:
2421 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2422 					       x3, x4, cookie, handle, flags);
2423 
2424 	case FFA_MEM_RECLAIM:
2425 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2426 						x4, cookie, handle, flags);
2427 	case FFA_CONSOLE_LOG_SMC32:
2428 	case FFA_CONSOLE_LOG_SMC64:
2429 		return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2430 						x4, cookie, handle, flags);
2431 
2432 	case FFA_MEM_PERM_GET:
2433 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2434 						x3, x4, cookie, handle, flags);
2435 
2436 	case FFA_MEM_PERM_SET:
2437 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2438 						x3, x4, cookie, handle, flags);
2439 
2440 	default:
2441 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2442 		break;
2443 	}
2444 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2445 }
2446 
2447 /*******************************************************************************
2448  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2449  * validates the interrupt and upon success arranges entry into the SP for
2450  * handling the interrupt.
2451  ******************************************************************************/
2452 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2453 					  uint32_t flags,
2454 					  void *handle,
2455 					  void *cookie)
2456 {
2457 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2458 	struct sp_exec_ctx *ec;
2459 	uint32_t linear_id = plat_my_core_pos();
2460 
2461 	/* Sanity check for a NULL pointer dereference. */
2462 	assert(sp != NULL);
2463 
2464 	/* Check the security state when the exception was generated. */
2465 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2466 
2467 	/* Panic if not an S-EL1 Partition. */
2468 	if (sp->runtime_el != S_EL1) {
2469 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2470 		      linear_id);
2471 		panic();
2472 	}
2473 
2474 	/* Obtain a reference to the SP execution context. */
2475 	ec = spmc_get_sp_ec(sp);
2476 
2477 	/* Ensure that the execution context is in waiting state else panic. */
2478 	if (ec->rt_state != RT_STATE_WAITING) {
2479 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2480 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2481 		panic();
2482 	}
2483 
2484 	/* Update the runtime model and state of the partition. */
2485 	ec->rt_model = RT_MODEL_INTR;
2486 	ec->rt_state = RT_STATE_RUNNING;
2487 
2488 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2489 
2490 	/*
2491 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2492 	 * populated as the SP can determine this by itself.
2493 	 */
2494 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2495 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2496 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2497 				     handle);
2498 }
2499