xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 83c3da7711a246e04f4d0a64593fc0ab46f08bad)
1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30 
31 #include <platform_def.h>
32 
33 /* FFA_MEM_PERM_* helpers */
34 #define FFA_MEM_PERM_MASK		U(7)
35 #define FFA_MEM_PERM_DATA_MASK		U(3)
36 #define FFA_MEM_PERM_DATA_SHIFT		U(0)
37 #define FFA_MEM_PERM_DATA_NA		U(0)
38 #define FFA_MEM_PERM_DATA_RW		U(1)
39 #define FFA_MEM_PERM_DATA_RES		U(2)
40 #define FFA_MEM_PERM_DATA_RO		U(3)
41 #define FFA_MEM_PERM_INST_EXEC          (U(0) << 2)
42 #define FFA_MEM_PERM_INST_NON_EXEC      (U(1) << 2)
43 
44 /* Declare the maximum number of SPs and El3 LPs. */
45 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
46 
47 /*
48  * Allocate a secure partition descriptor to describe each SP in the system that
49  * does not reside at EL3.
50  */
51 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
52 
53 /*
54  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
55  * the system that interacts with a SP. It is used to track the Hypervisor
56  * buffer pair, version and ID for now. It could be extended to track VM
57  * properties when the SPMC supports indirect messaging.
58  */
59 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
60 
61 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
62 					  uint32_t flags,
63 					  void *handle,
64 					  void *cookie);
65 
66 /*
67  * Helper function to obtain the array storing the EL3
68  * Logical Partition descriptors.
69  */
70 struct el3_lp_desc *get_el3_lp_array(void)
71 {
72 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
73 }
74 
75 /*
76  * Helper function to obtain the descriptor of the last SP to whom control was
77  * handed to on this physical cpu. Currently, we assume there is only one SP.
78  * TODO: Expand to track multiple partitions when required.
79  */
80 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
81 {
82 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
83 }
84 
85 /*
86  * Helper function to obtain the execution context of an SP on the
87  * current physical cpu.
88  */
89 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
90 {
91 	return &(sp->ec[get_ec_index(sp)]);
92 }
93 
94 /* Helper function to get pointer to SP context from its ID. */
95 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
96 {
97 	/* Check for Secure World Partitions. */
98 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
99 		if (sp_desc[i].sp_id == id) {
100 			return &(sp_desc[i]);
101 		}
102 	}
103 	return NULL;
104 }
105 
106 /*
107  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
108  * We assume that the first descriptor is reserved for this entity.
109  */
110 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
111 {
112 	return &(ns_ep_desc[0]);
113 }
114 
115 /*
116  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
117  * or OS kernel in the normal world or the last SP that was run.
118  */
119 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
120 {
121 	/* Obtain the RX/TX buffer pair descriptor. */
122 	if (secure_origin) {
123 		return &(spmc_get_current_sp_ctx()->mailbox);
124 	} else {
125 		return &(spmc_get_hyp_ctx()->mailbox);
126 	}
127 }
128 
129 /******************************************************************************
130  * This function returns to the place where spmc_sp_synchronous_entry() was
131  * called originally.
132  ******************************************************************************/
133 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
134 {
135 	/*
136 	 * The SPM must have initiated the original request through a
137 	 * synchronous entry into the secure partition. Jump back to the
138 	 * original C runtime context with the value of rc in x0;
139 	 */
140 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
141 
142 	panic();
143 }
144 
145 /*******************************************************************************
146  * Return FFA_ERROR with specified error code.
147  ******************************************************************************/
148 uint64_t spmc_ffa_error_return(void *handle, int error_code)
149 {
150 	SMC_RET8(handle, FFA_ERROR,
151 		 FFA_TARGET_INFO_MBZ, error_code,
152 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
153 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
154 }
155 
156 /******************************************************************************
157  * Helper function to validate a secure partition ID to ensure it does not
158  * conflict with any other FF-A component and follows the convention to
159  * indicate it resides within the secure world.
160  ******************************************************************************/
161 bool is_ffa_secure_id_valid(uint16_t partition_id)
162 {
163 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
164 
165 	/* Ensure the ID is not the invalid partition ID. */
166 	if (partition_id == INV_SP_ID) {
167 		return false;
168 	}
169 
170 	/* Ensure the ID is not the SPMD ID. */
171 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
172 		return false;
173 	}
174 
175 	/*
176 	 * Ensure the ID follows the convention to indicate it resides
177 	 * in the secure world.
178 	 */
179 	if (!ffa_is_secure_world_id(partition_id)) {
180 		return false;
181 	}
182 
183 	/* Ensure we don't conflict with the SPMC partition ID. */
184 	if (partition_id == FFA_SPMC_ID) {
185 		return false;
186 	}
187 
188 	/* Ensure we do not already have an SP context with this ID. */
189 	if (spmc_get_sp_ctx(partition_id)) {
190 		return false;
191 	}
192 
193 	/* Ensure we don't clash with any Logical SP's. */
194 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
195 		if (el3_lp_descs[i].sp_id == partition_id) {
196 			return false;
197 		}
198 	}
199 
200 	return true;
201 }
202 
203 /*******************************************************************************
204  * This function either forwards the request to the other world or returns
205  * with an ERET depending on the source of the call.
206  * We can assume that the destination is for an entity at a lower exception
207  * level as any messages destined for a logical SP resident in EL3 will have
208  * already been taken care of by the SPMC before entering this function.
209  ******************************************************************************/
210 static uint64_t spmc_smc_return(uint32_t smc_fid,
211 				bool secure_origin,
212 				uint64_t x1,
213 				uint64_t x2,
214 				uint64_t x3,
215 				uint64_t x4,
216 				void *handle,
217 				void *cookie,
218 				uint64_t flags,
219 				uint16_t dst_id)
220 {
221 	/* If the destination is in the normal world always go via the SPMD. */
222 	if (ffa_is_normal_world_id(dst_id)) {
223 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
224 					cookie, handle, flags);
225 	}
226 	/*
227 	 * If the caller is secure and we want to return to the secure world,
228 	 * ERET directly.
229 	 */
230 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
231 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
232 	}
233 	/* If we originated in the normal world then switch contexts. */
234 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
235 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
236 					     x3, x4, handle);
237 	} else {
238 		/* Unknown State. */
239 		panic();
240 	}
241 
242 	/* Shouldn't be Reached. */
243 	return 0;
244 }
245 
246 /*******************************************************************************
247  * FF-A ABI Handlers.
248  ******************************************************************************/
249 
250 /*******************************************************************************
251  * Helper function to validate arg2 as part of a direct message.
252  ******************************************************************************/
253 static inline bool direct_msg_validate_arg2(uint64_t x2)
254 {
255 	/* Check message type. */
256 	if (x2 & FFA_FWK_MSG_BIT) {
257 		/* We have a framework message, ensure it is a known message. */
258 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
259 			VERBOSE("Invalid message format 0x%lx.\n", x2);
260 			return false;
261 		}
262 	} else {
263 		/* We have a partition messages, ensure x2 is not set. */
264 		if (x2 != (uint64_t) 0) {
265 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
266 				x2);
267 			return false;
268 		}
269 	}
270 	return true;
271 }
272 
273 /*******************************************************************************
274  * Helper function to validate the destination ID of a direct response.
275  ******************************************************************************/
276 static bool direct_msg_validate_dst_id(uint16_t dst_id)
277 {
278 	struct secure_partition_desc *sp;
279 
280 	/* Check if we're targeting a normal world partition. */
281 	if (ffa_is_normal_world_id(dst_id)) {
282 		return true;
283 	}
284 
285 	/* Or directed to the SPMC itself.*/
286 	if (dst_id == FFA_SPMC_ID) {
287 		return true;
288 	}
289 
290 	/* Otherwise ensure the SP exists. */
291 	sp = spmc_get_sp_ctx(dst_id);
292 	if (sp != NULL) {
293 		return true;
294 	}
295 
296 	return false;
297 }
298 
299 /*******************************************************************************
300  * Helper function to validate the response from a Logical Partition.
301  ******************************************************************************/
302 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
303 					void *handle)
304 {
305 	/* Retrieve populated Direct Response Arguments. */
306 	uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
307 	uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
308 	uint16_t src_id = ffa_endpoint_source(x1);
309 	uint16_t dst_id = ffa_endpoint_destination(x1);
310 
311 	if (src_id != lp_id) {
312 		ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
313 		return false;
314 	}
315 
316 	/*
317 	 * Check the destination ID is valid and ensure the LP is responding to
318 	 * the original request.
319 	 */
320 	if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
321 		ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
322 		return false;
323 	}
324 
325 	if (!direct_msg_validate_arg2(x2)) {
326 		ERROR("Invalid EL3 LP message encoding.\n");
327 		return false;
328 	}
329 	return true;
330 }
331 
332 /*******************************************************************************
333  * Handle direct request messages and route to the appropriate destination.
334  ******************************************************************************/
335 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
336 				       bool secure_origin,
337 				       uint64_t x1,
338 				       uint64_t x2,
339 				       uint64_t x3,
340 				       uint64_t x4,
341 				       void *cookie,
342 				       void *handle,
343 				       uint64_t flags)
344 {
345 	uint16_t src_id = ffa_endpoint_source(x1);
346 	uint16_t dst_id = ffa_endpoint_destination(x1);
347 	struct el3_lp_desc *el3_lp_descs;
348 	struct secure_partition_desc *sp;
349 	unsigned int idx;
350 
351 	/* Check if arg2 has been populated correctly based on message type. */
352 	if (!direct_msg_validate_arg2(x2)) {
353 		return spmc_ffa_error_return(handle,
354 					     FFA_ERROR_INVALID_PARAMETER);
355 	}
356 
357 	/* Validate Sender is either the current SP or from the normal world. */
358 	if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
359 		(!secure_origin && !ffa_is_normal_world_id(src_id))) {
360 		ERROR("Invalid direct request source ID (0x%x).\n", src_id);
361 		return spmc_ffa_error_return(handle,
362 					FFA_ERROR_INVALID_PARAMETER);
363 	}
364 
365 	el3_lp_descs = get_el3_lp_array();
366 
367 	/* Check if the request is destined for a Logical Partition. */
368 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
369 		if (el3_lp_descs[i].sp_id == dst_id) {
370 			uint64_t ret = el3_lp_descs[i].direct_req(
371 						smc_fid, secure_origin, x1, x2,
372 						x3, x4, cookie, handle, flags);
373 			if (!direct_msg_validate_lp_resp(src_id, dst_id,
374 							 handle)) {
375 				panic();
376 			}
377 
378 			/* Message checks out. */
379 			return ret;
380 		}
381 	}
382 
383 	/*
384 	 * If the request was not targeted to a LSP and from the secure world
385 	 * then it is invalid since a SP cannot call into the Normal world and
386 	 * there is no other SP to call into. If there are other SPs in future
387 	 * then the partition runtime model would need to be validated as well.
388 	 */
389 	if (secure_origin) {
390 		VERBOSE("Direct request not supported to the Normal World.\n");
391 		return spmc_ffa_error_return(handle,
392 					     FFA_ERROR_INVALID_PARAMETER);
393 	}
394 
395 	/* Check if the SP ID is valid. */
396 	sp = spmc_get_sp_ctx(dst_id);
397 	if (sp == NULL) {
398 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
399 			dst_id);
400 		return spmc_ffa_error_return(handle,
401 					     FFA_ERROR_INVALID_PARAMETER);
402 	}
403 
404 	/*
405 	 * Check that the target execution context is in a waiting state before
406 	 * forwarding the direct request to it.
407 	 */
408 	idx = get_ec_index(sp);
409 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
410 		VERBOSE("SP context on core%u is not waiting (%u).\n",
411 			idx, sp->ec[idx].rt_model);
412 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
413 	}
414 
415 	/*
416 	 * Everything checks out so forward the request to the SP after updating
417 	 * its state and runtime model.
418 	 */
419 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
420 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
421 	sp->ec[idx].dir_req_origin_id = src_id;
422 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
423 			       handle, cookie, flags, dst_id);
424 }
425 
426 /*******************************************************************************
427  * Handle direct response messages and route to the appropriate destination.
428  ******************************************************************************/
429 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
430 					bool secure_origin,
431 					uint64_t x1,
432 					uint64_t x2,
433 					uint64_t x3,
434 					uint64_t x4,
435 					void *cookie,
436 					void *handle,
437 					uint64_t flags)
438 {
439 	uint16_t dst_id = ffa_endpoint_destination(x1);
440 	struct secure_partition_desc *sp;
441 	unsigned int idx;
442 
443 	/* Check if arg2 has been populated correctly based on message type. */
444 	if (!direct_msg_validate_arg2(x2)) {
445 		return spmc_ffa_error_return(handle,
446 					     FFA_ERROR_INVALID_PARAMETER);
447 	}
448 
449 	/* Check that the response did not originate from the Normal world. */
450 	if (!secure_origin) {
451 		VERBOSE("Direct Response not supported from Normal World.\n");
452 		return spmc_ffa_error_return(handle,
453 					     FFA_ERROR_INVALID_PARAMETER);
454 	}
455 
456 	/*
457 	 * Check that the response is either targeted to the Normal world or the
458 	 * SPMC e.g. a PM response.
459 	 */
460 	if (!direct_msg_validate_dst_id(dst_id)) {
461 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
462 			dst_id);
463 		return spmc_ffa_error_return(handle,
464 					     FFA_ERROR_INVALID_PARAMETER);
465 	}
466 
467 	/* Obtain the SP descriptor and update its runtime state. */
468 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
469 	if (sp == NULL) {
470 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
471 			dst_id);
472 		return spmc_ffa_error_return(handle,
473 					     FFA_ERROR_INVALID_PARAMETER);
474 	}
475 
476 	/* Sanity check state is being tracked correctly in the SPMC. */
477 	idx = get_ec_index(sp);
478 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
479 
480 	/* Ensure SP execution context was in the right runtime model. */
481 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
482 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
483 			idx, sp->ec[idx].rt_model);
484 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
485 	}
486 
487 	if (sp->ec[idx].dir_req_origin_id != dst_id) {
488 		WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
489 		     dst_id, sp->ec[idx].dir_req_origin_id, idx);
490 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
491 	}
492 
493 	/* Update the state of the SP execution context. */
494 	sp->ec[idx].rt_state = RT_STATE_WAITING;
495 
496 	/* Clear the ongoing direct request ID. */
497 	sp->ec[idx].dir_req_origin_id = INV_SP_ID;
498 
499 	/*
500 	 * If the receiver is not the SPMC then forward the response to the
501 	 * Normal world.
502 	 */
503 	if (dst_id == FFA_SPMC_ID) {
504 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
505 		/* Should not get here. */
506 		panic();
507 	}
508 
509 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
510 			       handle, cookie, flags, dst_id);
511 }
512 
513 /*******************************************************************************
514  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
515  * cycles.
516  ******************************************************************************/
517 static uint64_t msg_wait_handler(uint32_t smc_fid,
518 				 bool secure_origin,
519 				 uint64_t x1,
520 				 uint64_t x2,
521 				 uint64_t x3,
522 				 uint64_t x4,
523 				 void *cookie,
524 				 void *handle,
525 				 uint64_t flags)
526 {
527 	struct secure_partition_desc *sp;
528 	unsigned int idx;
529 
530 	/*
531 	 * Check that the response did not originate from the Normal world as
532 	 * only the secure world can call this ABI.
533 	 */
534 	if (!secure_origin) {
535 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
536 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
537 	}
538 
539 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
540 	sp = spmc_get_current_sp_ctx();
541 	if (sp == NULL) {
542 		return spmc_ffa_error_return(handle,
543 					     FFA_ERROR_INVALID_PARAMETER);
544 	}
545 
546 	/*
547 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
548 	 */
549 	idx = get_ec_index(sp);
550 
551 	/* Ensure SP execution context was in the right runtime model. */
552 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
553 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
554 	}
555 
556 	/* Sanity check the state is being tracked correctly in the SPMC. */
557 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
558 
559 	/*
560 	 * Perform a synchronous exit if the partition was initialising. The
561 	 * state is updated after the exit.
562 	 */
563 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
564 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
565 		/* Should not get here */
566 		panic();
567 	}
568 
569 	/* Update the state of the SP execution context. */
570 	sp->ec[idx].rt_state = RT_STATE_WAITING;
571 
572 	/* Resume normal world if a secure interrupt was handled. */
573 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
574 		/* FFA_MSG_WAIT can only be called from the secure world. */
575 		unsigned int secure_state_in = SECURE;
576 		unsigned int secure_state_out = NON_SECURE;
577 
578 		cm_el1_sysregs_context_save(secure_state_in);
579 		cm_el1_sysregs_context_restore(secure_state_out);
580 		cm_set_next_eret_context(secure_state_out);
581 		SMC_RET0(cm_get_context(secure_state_out));
582 	}
583 
584 	/* Forward the response to the Normal world. */
585 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
586 			       handle, cookie, flags, FFA_NWD_ID);
587 }
588 
589 static uint64_t ffa_error_handler(uint32_t smc_fid,
590 				 bool secure_origin,
591 				 uint64_t x1,
592 				 uint64_t x2,
593 				 uint64_t x3,
594 				 uint64_t x4,
595 				 void *cookie,
596 				 void *handle,
597 				 uint64_t flags)
598 {
599 	struct secure_partition_desc *sp;
600 	unsigned int idx;
601 
602 	/* Check that the response did not originate from the Normal world. */
603 	if (!secure_origin) {
604 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
605 	}
606 
607 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
608 	sp = spmc_get_current_sp_ctx();
609 	if (sp == NULL) {
610 		return spmc_ffa_error_return(handle,
611 					     FFA_ERROR_INVALID_PARAMETER);
612 	}
613 
614 	/* Get the execution context of the SP that invoked FFA_ERROR. */
615 	idx = get_ec_index(sp);
616 
617 	/*
618 	 * We only expect FFA_ERROR to be received during SP initialisation
619 	 * otherwise this is an invalid call.
620 	 */
621 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
622 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
623 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
624 		/* Should not get here. */
625 		panic();
626 	}
627 
628 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
629 }
630 
631 static uint64_t ffa_version_handler(uint32_t smc_fid,
632 				    bool secure_origin,
633 				    uint64_t x1,
634 				    uint64_t x2,
635 				    uint64_t x3,
636 				    uint64_t x4,
637 				    void *cookie,
638 				    void *handle,
639 				    uint64_t flags)
640 {
641 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
642 
643 	if (requested_version & FFA_VERSION_BIT31_MASK) {
644 		/* Invalid encoding, return an error. */
645 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
646 		/* Execution stops here. */
647 	}
648 
649 	/* Determine the caller to store the requested version. */
650 	if (secure_origin) {
651 		/*
652 		 * Ensure that the SP is reporting the same version as
653 		 * specified in its manifest. If these do not match there is
654 		 * something wrong with the SP.
655 		 * TODO: Should we abort the SP? For now assert this is not
656 		 *       case.
657 		 */
658 		assert(requested_version ==
659 		       spmc_get_current_sp_ctx()->ffa_version);
660 	} else {
661 		/*
662 		 * If this is called by the normal world, record this
663 		 * information in its descriptor.
664 		 */
665 		spmc_get_hyp_ctx()->ffa_version = requested_version;
666 	}
667 
668 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
669 					  FFA_VERSION_MINOR));
670 }
671 
672 /*******************************************************************************
673  * Helper function to obtain the FF-A version of the calling partition.
674  ******************************************************************************/
675 uint32_t get_partition_ffa_version(bool secure_origin)
676 {
677 	if (secure_origin) {
678 		return spmc_get_current_sp_ctx()->ffa_version;
679 	} else {
680 		return spmc_get_hyp_ctx()->ffa_version;
681 	}
682 }
683 
684 static uint64_t rxtx_map_handler(uint32_t smc_fid,
685 				 bool secure_origin,
686 				 uint64_t x1,
687 				 uint64_t x2,
688 				 uint64_t x3,
689 				 uint64_t x4,
690 				 void *cookie,
691 				 void *handle,
692 				 uint64_t flags)
693 {
694 	int ret;
695 	uint32_t error_code;
696 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
697 	struct mailbox *mbox;
698 	uintptr_t tx_address = x1;
699 	uintptr_t rx_address = x2;
700 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
701 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
702 
703 	/*
704 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
705 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
706 	 * ABI on behalf of a VM and reject it if this is the case.
707 	 */
708 	if (tx_address == 0 || rx_address == 0) {
709 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
710 		return spmc_ffa_error_return(handle,
711 					     FFA_ERROR_INVALID_PARAMETER);
712 	}
713 
714 	/* Ensure the specified buffers are not the same. */
715 	if (tx_address == rx_address) {
716 		WARN("TX Buffer must not be the same as RX Buffer.\n");
717 		return spmc_ffa_error_return(handle,
718 					     FFA_ERROR_INVALID_PARAMETER);
719 	}
720 
721 	/* Ensure the buffer size is not 0. */
722 	if (buf_size == 0U) {
723 		WARN("Buffer size must not be 0\n");
724 		return spmc_ffa_error_return(handle,
725 					     FFA_ERROR_INVALID_PARAMETER);
726 	}
727 
728 	/*
729 	 * Ensure the buffer size is a multiple of the translation granule size
730 	 * in TF-A.
731 	 */
732 	if (buf_size % PAGE_SIZE != 0U) {
733 		WARN("Buffer size must be aligned to translation granule.\n");
734 		return spmc_ffa_error_return(handle,
735 					     FFA_ERROR_INVALID_PARAMETER);
736 	}
737 
738 	/* Obtain the RX/TX buffer pair descriptor. */
739 	mbox = spmc_get_mbox_desc(secure_origin);
740 
741 	spin_lock(&mbox->lock);
742 
743 	/* Check if buffers have already been mapped. */
744 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
745 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
746 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
747 		error_code = FFA_ERROR_DENIED;
748 		goto err;
749 	}
750 
751 	/* memmap the TX buffer as read only. */
752 	ret = mmap_add_dynamic_region(tx_address, /* PA */
753 			tx_address, /* VA */
754 			buf_size, /* size */
755 			mem_atts | MT_RO_DATA); /* attrs */
756 	if (ret != 0) {
757 		/* Return the correct error code. */
758 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
759 						FFA_ERROR_INVALID_PARAMETER;
760 		WARN("Unable to map TX buffer: %d\n", error_code);
761 		goto err;
762 	}
763 
764 	/* memmap the RX buffer as read write. */
765 	ret = mmap_add_dynamic_region(rx_address, /* PA */
766 			rx_address, /* VA */
767 			buf_size, /* size */
768 			mem_atts | MT_RW_DATA); /* attrs */
769 
770 	if (ret != 0) {
771 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
772 						FFA_ERROR_INVALID_PARAMETER;
773 		WARN("Unable to map RX buffer: %d\n", error_code);
774 		/* Unmap the TX buffer again. */
775 		mmap_remove_dynamic_region(tx_address, buf_size);
776 		goto err;
777 	}
778 
779 	mbox->tx_buffer = (void *) tx_address;
780 	mbox->rx_buffer = (void *) rx_address;
781 	mbox->rxtx_page_count = page_count;
782 	spin_unlock(&mbox->lock);
783 
784 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
785 	/* Execution stops here. */
786 err:
787 	spin_unlock(&mbox->lock);
788 	return spmc_ffa_error_return(handle, error_code);
789 }
790 
791 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
792 				   bool secure_origin,
793 				   uint64_t x1,
794 				   uint64_t x2,
795 				   uint64_t x3,
796 				   uint64_t x4,
797 				   void *cookie,
798 				   void *handle,
799 				   uint64_t flags)
800 {
801 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
802 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
803 
804 	/*
805 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
806 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
807 	 * ABI on behalf of a VM and reject it if this is the case.
808 	 */
809 	if (x1 != 0UL) {
810 		return spmc_ffa_error_return(handle,
811 					     FFA_ERROR_INVALID_PARAMETER);
812 	}
813 
814 	spin_lock(&mbox->lock);
815 
816 	/* Check if buffers are currently mapped. */
817 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
818 		spin_unlock(&mbox->lock);
819 		return spmc_ffa_error_return(handle,
820 					     FFA_ERROR_INVALID_PARAMETER);
821 	}
822 
823 	/* Unmap RX Buffer */
824 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
825 				       buf_size) != 0) {
826 		WARN("Unable to unmap RX buffer!\n");
827 	}
828 
829 	mbox->rx_buffer = 0;
830 
831 	/* Unmap TX Buffer */
832 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
833 				       buf_size) != 0) {
834 		WARN("Unable to unmap TX buffer!\n");
835 	}
836 
837 	mbox->tx_buffer = 0;
838 	mbox->rxtx_page_count = 0;
839 
840 	spin_unlock(&mbox->lock);
841 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
842 }
843 
844 /*
845  * Helper function to populate the properties field of a Partition Info Get
846  * descriptor.
847  */
848 static uint32_t
849 partition_info_get_populate_properties(uint32_t sp_properties,
850 				       enum sp_execution_state sp_ec_state)
851 {
852 	uint32_t properties = sp_properties;
853 	uint32_t ec_state;
854 
855 	/* Determine the execution state of the SP. */
856 	ec_state = sp_ec_state == SP_STATE_AARCH64 ?
857 		   FFA_PARTITION_INFO_GET_AARCH64_STATE :
858 		   FFA_PARTITION_INFO_GET_AARCH32_STATE;
859 
860 	properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
861 
862 	return properties;
863 }
864 
865 /*
866  * Collate the partition information in a v1.1 partition information
867  * descriptor format, this will be converter later if required.
868  */
869 static int partition_info_get_handler_v1_1(uint32_t *uuid,
870 					   struct ffa_partition_info_v1_1
871 						  *partitions,
872 					   uint32_t max_partitions,
873 					   uint32_t *partition_count)
874 {
875 	uint32_t index;
876 	struct ffa_partition_info_v1_1 *desc;
877 	bool null_uuid = is_null_uuid(uuid);
878 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
879 
880 	/* Deal with Logical Partitions. */
881 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
882 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
883 			/* Found a matching UUID, populate appropriately. */
884 			if (*partition_count >= max_partitions) {
885 				return FFA_ERROR_NO_MEMORY;
886 			}
887 
888 			desc = &partitions[*partition_count];
889 			desc->ep_id = el3_lp_descs[index].sp_id;
890 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
891 			/* LSPs must be AArch64. */
892 			desc->properties =
893 				partition_info_get_populate_properties(
894 					el3_lp_descs[index].properties,
895 					SP_STATE_AARCH64);
896 
897 			if (null_uuid) {
898 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
899 			}
900 			(*partition_count)++;
901 		}
902 	}
903 
904 	/* Deal with physical SP's. */
905 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
906 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
907 			/* Found a matching UUID, populate appropriately. */
908 			if (*partition_count >= max_partitions) {
909 				return FFA_ERROR_NO_MEMORY;
910 			}
911 
912 			desc = &partitions[*partition_count];
913 			desc->ep_id = sp_desc[index].sp_id;
914 			/*
915 			 * Execution context count must match No. cores for
916 			 * S-EL1 SPs.
917 			 */
918 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
919 			desc->properties =
920 				partition_info_get_populate_properties(
921 					sp_desc[index].properties,
922 					sp_desc[index].execution_state);
923 
924 			if (null_uuid) {
925 				copy_uuid(desc->uuid, sp_desc[index].uuid);
926 			}
927 			(*partition_count)++;
928 		}
929 	}
930 	return 0;
931 }
932 
933 /*
934  * Handle the case where that caller only wants the count of partitions
935  * matching a given UUID and does not want the corresponding descriptors
936  * populated.
937  */
938 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
939 {
940 	uint32_t index = 0;
941 	uint32_t partition_count = 0;
942 	bool null_uuid = is_null_uuid(uuid);
943 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
944 
945 	/* Deal with Logical Partitions. */
946 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
947 		if (null_uuid ||
948 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
949 			(partition_count)++;
950 		}
951 	}
952 
953 	/* Deal with physical SP's. */
954 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
955 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
956 			(partition_count)++;
957 		}
958 	}
959 	return partition_count;
960 }
961 
962 /*
963  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
964  * the corresponding descriptor format from the v1.1 descriptor array.
965  */
966 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
967 					     *partitions,
968 					     struct mailbox *mbox,
969 					     int partition_count)
970 {
971 	uint32_t index;
972 	uint32_t buf_size;
973 	uint32_t descriptor_size;
974 	struct ffa_partition_info_v1_0 *v1_0_partitions =
975 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
976 
977 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
978 	descriptor_size = partition_count *
979 			  sizeof(struct ffa_partition_info_v1_0);
980 
981 	if (descriptor_size > buf_size) {
982 		return FFA_ERROR_NO_MEMORY;
983 	}
984 
985 	for (index = 0U; index < partition_count; index++) {
986 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
987 		v1_0_partitions[index].execution_ctx_count =
988 			partitions[index].execution_ctx_count;
989 		/* Only report v1.0 properties. */
990 		v1_0_partitions[index].properties =
991 			(partitions[index].properties &
992 			FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
993 	}
994 	return 0;
995 }
996 
997 /*
998  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
999  * v1.0 implementations.
1000  */
1001 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1002 					   bool secure_origin,
1003 					   uint64_t x1,
1004 					   uint64_t x2,
1005 					   uint64_t x3,
1006 					   uint64_t x4,
1007 					   void *cookie,
1008 					   void *handle,
1009 					   uint64_t flags)
1010 {
1011 	int ret;
1012 	uint32_t partition_count = 0;
1013 	uint32_t size = 0;
1014 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1015 	struct mailbox *mbox;
1016 	uint64_t info_get_flags;
1017 	bool count_only;
1018 	uint32_t uuid[4];
1019 
1020 	uuid[0] = x1;
1021 	uuid[1] = x2;
1022 	uuid[2] = x3;
1023 	uuid[3] = x4;
1024 
1025 	/* Determine if the Partition descriptors should be populated. */
1026 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1027 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1028 
1029 	/* Handle the case where we don't need to populate the descriptors. */
1030 	if (count_only) {
1031 		partition_count = partition_info_get_handler_count_only(uuid);
1032 		if (partition_count == 0) {
1033 			return spmc_ffa_error_return(handle,
1034 						FFA_ERROR_INVALID_PARAMETER);
1035 		}
1036 	} else {
1037 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1038 
1039 		/*
1040 		 * Handle the case where the partition descriptors are required,
1041 		 * check we have the buffers available and populate the
1042 		 * appropriate structure version.
1043 		 */
1044 
1045 		/* Obtain the v1.1 format of the descriptors. */
1046 		ret = partition_info_get_handler_v1_1(uuid, partitions,
1047 						      MAX_SP_LP_PARTITIONS,
1048 						      &partition_count);
1049 
1050 		/* Check if an error occurred during discovery. */
1051 		if (ret != 0) {
1052 			goto err;
1053 		}
1054 
1055 		/* If we didn't find any matches the UUID is unknown. */
1056 		if (partition_count == 0) {
1057 			ret = FFA_ERROR_INVALID_PARAMETER;
1058 			goto err;
1059 		}
1060 
1061 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1062 		mbox = spmc_get_mbox_desc(secure_origin);
1063 
1064 		/*
1065 		 * If the caller has not bothered registering its RX/TX pair
1066 		 * then return an error code.
1067 		 */
1068 		spin_lock(&mbox->lock);
1069 		if (mbox->rx_buffer == NULL) {
1070 			ret = FFA_ERROR_BUSY;
1071 			goto err_unlock;
1072 		}
1073 
1074 		/* Ensure the RX buffer is currently free. */
1075 		if (mbox->state != MAILBOX_STATE_EMPTY) {
1076 			ret = FFA_ERROR_BUSY;
1077 			goto err_unlock;
1078 		}
1079 
1080 		/* Zero the RX buffer before populating. */
1081 		(void)memset(mbox->rx_buffer, 0,
1082 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
1083 
1084 		/*
1085 		 * Depending on the FF-A version of the requesting partition
1086 		 * we may need to convert to a v1.0 format otherwise we can copy
1087 		 * directly.
1088 		 */
1089 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1090 			ret = partition_info_populate_v1_0(partitions,
1091 							   mbox,
1092 							   partition_count);
1093 			if (ret != 0) {
1094 				goto err_unlock;
1095 			}
1096 		} else {
1097 			uint32_t buf_size = mbox->rxtx_page_count *
1098 					    FFA_PAGE_SIZE;
1099 
1100 			/* Ensure the descriptor will fit in the buffer. */
1101 			size = sizeof(struct ffa_partition_info_v1_1);
1102 			if (partition_count * size  > buf_size) {
1103 				ret = FFA_ERROR_NO_MEMORY;
1104 				goto err_unlock;
1105 			}
1106 			memcpy(mbox->rx_buffer, partitions,
1107 			       partition_count * size);
1108 		}
1109 
1110 		mbox->state = MAILBOX_STATE_FULL;
1111 		spin_unlock(&mbox->lock);
1112 	}
1113 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1114 
1115 err_unlock:
1116 	spin_unlock(&mbox->lock);
1117 err:
1118 	return spmc_ffa_error_return(handle, ret);
1119 }
1120 
1121 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1122 {
1123 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1124 }
1125 
1126 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1127 					      uint32_t input_properties,
1128 					      void *handle)
1129 {
1130 	/*
1131 	 * If we're called by the normal world we don't support any
1132 	 * additional features.
1133 	 */
1134 	if (!secure_origin) {
1135 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1136 			return spmc_ffa_error_return(handle,
1137 						     FFA_ERROR_NOT_SUPPORTED);
1138 		}
1139 
1140 	} else {
1141 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1142 		/*
1143 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1144 		 * call. If v1.0 check and store whether the SP has requested
1145 		 * the use of the NS bit.
1146 		 */
1147 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1148 			if ((input_properties &
1149 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1150 				return spmc_ffa_error_return(handle,
1151 						       FFA_ERROR_NOT_SUPPORTED);
1152 			}
1153 			return ffa_feature_success(handle,
1154 						   FFA_FEATURES_RET_REQ_NS_BIT);
1155 		} else {
1156 			sp->ns_bit_requested = (input_properties &
1157 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1158 					       0U;
1159 		}
1160 		if (sp->ns_bit_requested) {
1161 			return ffa_feature_success(handle,
1162 						   FFA_FEATURES_RET_REQ_NS_BIT);
1163 		}
1164 	}
1165 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1166 }
1167 
1168 static uint64_t ffa_features_handler(uint32_t smc_fid,
1169 				     bool secure_origin,
1170 				     uint64_t x1,
1171 				     uint64_t x2,
1172 				     uint64_t x3,
1173 				     uint64_t x4,
1174 				     void *cookie,
1175 				     void *handle,
1176 				     uint64_t flags)
1177 {
1178 	uint32_t function_id = (uint32_t) x1;
1179 	uint32_t input_properties = (uint32_t) x2;
1180 
1181 	/* Check if a Feature ID was requested. */
1182 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1183 		/* We currently don't support any additional features. */
1184 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1185 	}
1186 
1187 	/*
1188 	 * Handle the cases where we have separate handlers due to additional
1189 	 * properties.
1190 	 */
1191 	switch (function_id) {
1192 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1193 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1194 		return ffa_features_retrieve_request(secure_origin,
1195 						     input_properties,
1196 						     handle);
1197 	}
1198 
1199 	/*
1200 	 * We don't currently support additional input properties for these
1201 	 * other ABIs therefore ensure this value is set to 0.
1202 	 */
1203 	if (input_properties != 0U) {
1204 		return spmc_ffa_error_return(handle,
1205 					     FFA_ERROR_NOT_SUPPORTED);
1206 	}
1207 
1208 	/* Report if any other FF-A ABI is supported. */
1209 	switch (function_id) {
1210 	/* Supported features from both worlds. */
1211 	case FFA_ERROR:
1212 	case FFA_SUCCESS_SMC32:
1213 	case FFA_INTERRUPT:
1214 	case FFA_SPM_ID_GET:
1215 	case FFA_ID_GET:
1216 	case FFA_FEATURES:
1217 	case FFA_VERSION:
1218 	case FFA_RX_RELEASE:
1219 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1220 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1221 	case FFA_PARTITION_INFO_GET:
1222 	case FFA_RXTX_MAP_SMC32:
1223 	case FFA_RXTX_MAP_SMC64:
1224 	case FFA_RXTX_UNMAP:
1225 	case FFA_MEM_FRAG_TX:
1226 	case FFA_MSG_RUN:
1227 
1228 		/*
1229 		 * We are relying on the fact that the other registers
1230 		 * will be set to 0 as these values align with the
1231 		 * currently implemented features of the SPMC. If this
1232 		 * changes this function must be extended to handle
1233 		 * reporting the additional functionality.
1234 		 */
1235 
1236 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1237 		/* Execution stops here. */
1238 
1239 	/* Supported ABIs only from the secure world. */
1240 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1241 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1242 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1243 	case FFA_MEM_RELINQUISH:
1244 	case FFA_MSG_WAIT:
1245 
1246 		if (!secure_origin) {
1247 			return spmc_ffa_error_return(handle,
1248 				FFA_ERROR_NOT_SUPPORTED);
1249 		}
1250 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1251 		/* Execution stops here. */
1252 
1253 	/* Supported features only from the normal world. */
1254 	case FFA_MEM_SHARE_SMC32:
1255 	case FFA_MEM_SHARE_SMC64:
1256 	case FFA_MEM_LEND_SMC32:
1257 	case FFA_MEM_LEND_SMC64:
1258 	case FFA_MEM_RECLAIM:
1259 	case FFA_MEM_FRAG_RX:
1260 
1261 		if (secure_origin) {
1262 			return spmc_ffa_error_return(handle,
1263 					FFA_ERROR_NOT_SUPPORTED);
1264 		}
1265 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1266 		/* Execution stops here. */
1267 
1268 	default:
1269 		return spmc_ffa_error_return(handle,
1270 					FFA_ERROR_NOT_SUPPORTED);
1271 	}
1272 }
1273 
1274 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1275 				   bool secure_origin,
1276 				   uint64_t x1,
1277 				   uint64_t x2,
1278 				   uint64_t x3,
1279 				   uint64_t x4,
1280 				   void *cookie,
1281 				   void *handle,
1282 				   uint64_t flags)
1283 {
1284 	if (secure_origin) {
1285 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1286 			 spmc_get_current_sp_ctx()->sp_id);
1287 	} else {
1288 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1289 			 spmc_get_hyp_ctx()->ns_ep_id);
1290 	}
1291 }
1292 
1293 /*
1294  * Enable an SP to query the ID assigned to the SPMC.
1295  */
1296 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1297 				       bool secure_origin,
1298 				       uint64_t x1,
1299 				       uint64_t x2,
1300 				       uint64_t x3,
1301 				       uint64_t x4,
1302 				       void *cookie,
1303 				       void *handle,
1304 				       uint64_t flags)
1305 {
1306 	assert(x1 == 0UL);
1307 	assert(x2 == 0UL);
1308 	assert(x3 == 0UL);
1309 	assert(x4 == 0UL);
1310 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1311 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1312 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1313 
1314 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1315 }
1316 
1317 static uint64_t ffa_run_handler(uint32_t smc_fid,
1318 				bool secure_origin,
1319 				uint64_t x1,
1320 				uint64_t x2,
1321 				uint64_t x3,
1322 				uint64_t x4,
1323 				void *cookie,
1324 				void *handle,
1325 				uint64_t flags)
1326 {
1327 	struct secure_partition_desc *sp;
1328 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1329 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1330 	unsigned int idx;
1331 	unsigned int *rt_state;
1332 	unsigned int *rt_model;
1333 
1334 	/* Can only be called from the normal world. */
1335 	if (secure_origin) {
1336 		ERROR("FFA_RUN can only be called from NWd.\n");
1337 		return spmc_ffa_error_return(handle,
1338 					     FFA_ERROR_INVALID_PARAMETER);
1339 	}
1340 
1341 	/* Cannot run a Normal world partition. */
1342 	if (ffa_is_normal_world_id(target_id)) {
1343 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1344 		return spmc_ffa_error_return(handle,
1345 					     FFA_ERROR_INVALID_PARAMETER);
1346 	}
1347 
1348 	/* Check that the target SP exists. */
1349 	sp = spmc_get_sp_ctx(target_id);
1350 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1351 	if (sp == NULL) {
1352 		return spmc_ffa_error_return(handle,
1353 					     FFA_ERROR_INVALID_PARAMETER);
1354 	}
1355 
1356 	idx = get_ec_index(sp);
1357 	if (idx != vcpu_id) {
1358 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1359 		return spmc_ffa_error_return(handle,
1360 					     FFA_ERROR_INVALID_PARAMETER);
1361 	}
1362 	rt_state = &((sp->ec[idx]).rt_state);
1363 	rt_model = &((sp->ec[idx]).rt_model);
1364 	if (*rt_state == RT_STATE_RUNNING) {
1365 		ERROR("Partition (0x%x) is already running.\n", target_id);
1366 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1367 	}
1368 
1369 	/*
1370 	 * Sanity check that if the execution context was not waiting then it
1371 	 * was either in the direct request or the run partition runtime model.
1372 	 */
1373 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1374 		assert(*rt_model == RT_MODEL_RUN ||
1375 		       *rt_model == RT_MODEL_DIR_REQ);
1376 	}
1377 
1378 	/*
1379 	 * If the context was waiting then update the partition runtime model.
1380 	 */
1381 	if (*rt_state == RT_STATE_WAITING) {
1382 		*rt_model = RT_MODEL_RUN;
1383 	}
1384 
1385 	/*
1386 	 * Forward the request to the correct SP vCPU after updating
1387 	 * its state.
1388 	 */
1389 	*rt_state = RT_STATE_RUNNING;
1390 
1391 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1392 			       handle, cookie, flags, target_id);
1393 }
1394 
1395 static uint64_t rx_release_handler(uint32_t smc_fid,
1396 				   bool secure_origin,
1397 				   uint64_t x1,
1398 				   uint64_t x2,
1399 				   uint64_t x3,
1400 				   uint64_t x4,
1401 				   void *cookie,
1402 				   void *handle,
1403 				   uint64_t flags)
1404 {
1405 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1406 
1407 	spin_lock(&mbox->lock);
1408 
1409 	if (mbox->state != MAILBOX_STATE_FULL) {
1410 		spin_unlock(&mbox->lock);
1411 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1412 	}
1413 
1414 	mbox->state = MAILBOX_STATE_EMPTY;
1415 	spin_unlock(&mbox->lock);
1416 
1417 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1418 }
1419 
1420 /*
1421  * Perform initial validation on the provided secondary entry point.
1422  * For now ensure it does not lie within the BL31 Image or the SP's
1423  * RX/TX buffers as these are mapped within EL3.
1424  * TODO: perform validation for additional invalid memory regions.
1425  */
1426 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1427 {
1428 	struct mailbox *mb;
1429 	uintptr_t buffer_size;
1430 	uintptr_t sp_rx_buffer;
1431 	uintptr_t sp_tx_buffer;
1432 	uintptr_t sp_rx_buffer_limit;
1433 	uintptr_t sp_tx_buffer_limit;
1434 
1435 	mb = &sp->mailbox;
1436 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1437 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1438 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1439 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1440 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1441 
1442 	/*
1443 	 * Check if the entry point lies within BL31, or the
1444 	 * SP's RX or TX buffer.
1445 	 */
1446 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1447 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1448 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1449 		return -EINVAL;
1450 	}
1451 	return 0;
1452 }
1453 
1454 /*******************************************************************************
1455  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1456  *  register an entry point for initialization during a secondary cold boot.
1457  ******************************************************************************/
1458 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1459 					    bool secure_origin,
1460 					    uint64_t x1,
1461 					    uint64_t x2,
1462 					    uint64_t x3,
1463 					    uint64_t x4,
1464 					    void *cookie,
1465 					    void *handle,
1466 					    uint64_t flags)
1467 {
1468 	struct secure_partition_desc *sp;
1469 	struct sp_exec_ctx *sp_ctx;
1470 
1471 	/* This request cannot originate from the Normal world. */
1472 	if (!secure_origin) {
1473 		WARN("%s: Can only be called from SWd.\n", __func__);
1474 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1475 	}
1476 
1477 	/* Get the context of the current SP. */
1478 	sp = spmc_get_current_sp_ctx();
1479 	if (sp == NULL) {
1480 		WARN("%s: Cannot find SP context.\n", __func__);
1481 		return spmc_ffa_error_return(handle,
1482 					     FFA_ERROR_INVALID_PARAMETER);
1483 	}
1484 
1485 	/* Only an S-EL1 SP should be invoking this ABI. */
1486 	if (sp->runtime_el != S_EL1) {
1487 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1488 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1489 	}
1490 
1491 	/* Ensure the SP is in its initialization state. */
1492 	sp_ctx = spmc_get_sp_ec(sp);
1493 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1494 		WARN("%s: Can only be called during SP initialization.\n",
1495 		     __func__);
1496 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1497 	}
1498 
1499 	/* Perform initial validation of the secondary entry point. */
1500 	if (validate_secondary_ep(x1, sp)) {
1501 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1502 		     __func__, x1);
1503 		return spmc_ffa_error_return(handle,
1504 					     FFA_ERROR_INVALID_PARAMETER);
1505 	}
1506 
1507 	/*
1508 	 * Update the secondary entrypoint in SP context.
1509 	 * We don't need a lock here as during partition initialization there
1510 	 * will only be a single core online.
1511 	 */
1512 	sp->secondary_ep = x1;
1513 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1514 
1515 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1516 }
1517 
1518 /*******************************************************************************
1519  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1520  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1521  * function converts a permission value from the FF-A format to the mmap_attr_t
1522  * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1523  * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1524  * ignored by the function xlat_change_mem_attributes_ctx().
1525  ******************************************************************************/
1526 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1527 {
1528 	unsigned int tf_attr = 0U;
1529 	unsigned int access;
1530 
1531 	/* Deal with data access permissions first. */
1532 	access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1533 
1534 	switch (access) {
1535 	case FFA_MEM_PERM_DATA_RW:
1536 		/* Return 0 if the execute is set with RW. */
1537 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1538 			tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1539 		}
1540 		break;
1541 
1542 	case FFA_MEM_PERM_DATA_RO:
1543 		tf_attr |= MT_RO | MT_USER;
1544 		/* Deal with the instruction access permissions next. */
1545 		if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1546 			tf_attr |= MT_EXECUTE;
1547 		} else {
1548 			tf_attr |= MT_EXECUTE_NEVER;
1549 		}
1550 		break;
1551 
1552 	case FFA_MEM_PERM_DATA_NA:
1553 	default:
1554 		return tf_attr;
1555 	}
1556 
1557 	return tf_attr;
1558 }
1559 
1560 /*******************************************************************************
1561  * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1562  ******************************************************************************/
1563 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1564 					 bool secure_origin,
1565 					 uint64_t x1,
1566 					 uint64_t x2,
1567 					 uint64_t x3,
1568 					 uint64_t x4,
1569 					 void *cookie,
1570 					 void *handle,
1571 					 uint64_t flags)
1572 {
1573 	struct secure_partition_desc *sp;
1574 	unsigned int idx;
1575 	uintptr_t base_va = (uintptr_t) x1;
1576 	size_t size = (size_t)(x2 * PAGE_SIZE);
1577 	uint32_t tf_attr;
1578 	int ret;
1579 
1580 	/* This request cannot originate from the Normal world. */
1581 	if (!secure_origin) {
1582 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1583 	}
1584 
1585 	if (size == 0) {
1586 		return spmc_ffa_error_return(handle,
1587 					     FFA_ERROR_INVALID_PARAMETER);
1588 	}
1589 
1590 	/* Get the context of the current SP. */
1591 	sp = spmc_get_current_sp_ctx();
1592 	if (sp == NULL) {
1593 		return spmc_ffa_error_return(handle,
1594 					     FFA_ERROR_INVALID_PARAMETER);
1595 	}
1596 
1597 	/* A S-EL1 SP has no business invoking this ABI. */
1598 	if (sp->runtime_el == S_EL1) {
1599 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1600 	}
1601 
1602 	if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1603 		return spmc_ffa_error_return(handle,
1604 					     FFA_ERROR_INVALID_PARAMETER);
1605 	}
1606 
1607 	/* Get the execution context of the calling SP. */
1608 	idx = get_ec_index(sp);
1609 
1610 	/*
1611 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1612 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1613 	 * and can only be initialising on this cpu.
1614 	 */
1615 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1616 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1617 	}
1618 
1619 	VERBOSE("Setting memory permissions:\n");
1620 	VERBOSE("  Start address  : 0x%lx\n", base_va);
1621 	VERBOSE("  Number of pages: %lu (%zu bytes)\n", x2, size);
1622 	VERBOSE("  Attributes     : 0x%x\n", (uint32_t)x3);
1623 
1624 	/* Convert inbound permissions to TF-A permission attributes */
1625 	tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1626 	if (tf_attr == 0U) {
1627 		return spmc_ffa_error_return(handle,
1628 					     FFA_ERROR_INVALID_PARAMETER);
1629 	}
1630 
1631 	/* Request the change in permissions */
1632 	ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1633 					     base_va, size, tf_attr);
1634 	if (ret != 0) {
1635 		return spmc_ffa_error_return(handle,
1636 					     FFA_ERROR_INVALID_PARAMETER);
1637 	}
1638 
1639 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1640 }
1641 
1642 /*******************************************************************************
1643  * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1644  * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1645  * function converts a permission value from the mmap_attr_t format to the FF-A
1646  * format.
1647  ******************************************************************************/
1648 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1649 {
1650 	unsigned int perms = 0U;
1651 	unsigned int data_access;
1652 
1653 	if ((attr & MT_USER) == 0) {
1654 		/* No access from EL0. */
1655 		data_access = FFA_MEM_PERM_DATA_NA;
1656 	} else {
1657 		if ((attr & MT_RW) != 0) {
1658 			data_access = FFA_MEM_PERM_DATA_RW;
1659 		} else {
1660 			data_access = FFA_MEM_PERM_DATA_RO;
1661 		}
1662 	}
1663 
1664 	perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1665 		<< FFA_MEM_PERM_DATA_SHIFT;
1666 
1667 	if ((attr & MT_EXECUTE_NEVER) != 0U) {
1668 		perms |= FFA_MEM_PERM_INST_NON_EXEC;
1669 	}
1670 
1671 	return perms;
1672 }
1673 
1674 /*******************************************************************************
1675  * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1676  ******************************************************************************/
1677 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1678 					 bool secure_origin,
1679 					 uint64_t x1,
1680 					 uint64_t x2,
1681 					 uint64_t x3,
1682 					 uint64_t x4,
1683 					 void *cookie,
1684 					 void *handle,
1685 					 uint64_t flags)
1686 {
1687 	struct secure_partition_desc *sp;
1688 	unsigned int idx;
1689 	uintptr_t base_va = (uintptr_t)x1;
1690 	uint32_t tf_attr = 0;
1691 	int ret;
1692 
1693 	/* This request cannot originate from the Normal world. */
1694 	if (!secure_origin) {
1695 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1696 	}
1697 
1698 	/* Get the context of the current SP. */
1699 	sp = spmc_get_current_sp_ctx();
1700 	if (sp == NULL) {
1701 		return spmc_ffa_error_return(handle,
1702 					     FFA_ERROR_INVALID_PARAMETER);
1703 	}
1704 
1705 	/* A S-EL1 SP has no business invoking this ABI. */
1706 	if (sp->runtime_el == S_EL1) {
1707 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1708 	}
1709 
1710 	/* Get the execution context of the calling SP. */
1711 	idx = get_ec_index(sp);
1712 
1713 	/*
1714 	 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1715 	 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1716 	 * and can only be initialising on this cpu.
1717 	 */
1718 	if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1719 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1720 	}
1721 
1722 	/* Request the permissions */
1723 	ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1724 	if (ret != 0) {
1725 		return spmc_ffa_error_return(handle,
1726 					     FFA_ERROR_INVALID_PARAMETER);
1727 	}
1728 
1729 	/* Convert TF-A permission to FF-A permissions attributes. */
1730 	x2 = mmap_perm_to_ffa_perm(tf_attr);
1731 
1732 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1733 }
1734 
1735 /*******************************************************************************
1736  * This function will parse the Secure Partition Manifest. From manifest, it
1737  * will fetch details for preparing Secure partition image context and secure
1738  * partition image boot arguments if any.
1739  ******************************************************************************/
1740 static int sp_manifest_parse(void *sp_manifest, int offset,
1741 			     struct secure_partition_desc *sp,
1742 			     entry_point_info_t *ep_info,
1743 			     int32_t *boot_info_reg)
1744 {
1745 	int32_t ret, node;
1746 	uint32_t config_32;
1747 
1748 	/*
1749 	 * Look for the mandatory fields that are expected to be present in
1750 	 * the SP manifests.
1751 	 */
1752 	node = fdt_path_offset(sp_manifest, "/");
1753 	if (node < 0) {
1754 		ERROR("Did not find root node.\n");
1755 		return node;
1756 	}
1757 
1758 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1759 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1760 	if (ret != 0) {
1761 		ERROR("Missing Secure Partition UUID.\n");
1762 		return ret;
1763 	}
1764 
1765 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1766 	if (ret != 0) {
1767 		ERROR("Missing SP Exception Level information.\n");
1768 		return ret;
1769 	}
1770 
1771 	sp->runtime_el = config_32;
1772 
1773 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1774 	if (ret != 0) {
1775 		ERROR("Missing Secure Partition FF-A Version.\n");
1776 		return ret;
1777 	}
1778 
1779 	sp->ffa_version = config_32;
1780 
1781 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1782 	if (ret != 0) {
1783 		ERROR("Missing Secure Partition Execution State.\n");
1784 		return ret;
1785 	}
1786 
1787 	sp->execution_state = config_32;
1788 
1789 	ret = fdt_read_uint32(sp_manifest, node,
1790 			      "messaging-method", &config_32);
1791 	if (ret != 0) {
1792 		ERROR("Missing Secure Partition messaging method.\n");
1793 		return ret;
1794 	}
1795 
1796 	/* Validate this entry, we currently only support direct messaging. */
1797 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1798 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1799 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1800 		     config_32);
1801 		return -EINVAL;
1802 	}
1803 
1804 	sp->properties = config_32;
1805 
1806 	ret = fdt_read_uint32(sp_manifest, node,
1807 			      "execution-ctx-count", &config_32);
1808 
1809 	if (ret != 0) {
1810 		ERROR("Missing SP Execution Context Count.\n");
1811 		return ret;
1812 	}
1813 
1814 	/*
1815 	 * Ensure this field is set correctly in the manifest however
1816 	 * since this is currently a hardcoded value for S-EL1 partitions
1817 	 * we don't need to save it here, just validate.
1818 	 */
1819 	if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
1820 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1821 			config_32, PLATFORM_CORE_COUNT);
1822 		return -EINVAL;
1823 	}
1824 
1825 	/*
1826 	 * Look for the optional fields that are expected to be present in
1827 	 * an SP manifest.
1828 	 */
1829 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1830 	if (ret != 0) {
1831 		WARN("Missing Secure Partition ID.\n");
1832 	} else {
1833 		if (!is_ffa_secure_id_valid(config_32)) {
1834 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1835 			      config_32);
1836 			return -EINVAL;
1837 		}
1838 		sp->sp_id = config_32;
1839 	}
1840 
1841 	ret = fdt_read_uint32(sp_manifest, node,
1842 			      "power-management-messages", &config_32);
1843 	if (ret != 0) {
1844 		WARN("Missing Power Management Messages entry.\n");
1845 	} else {
1846 		/*
1847 		 * Ensure only the currently supported power messages have
1848 		 * been requested.
1849 		 */
1850 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1851 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1852 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1853 			ERROR("Requested unsupported PM messages (%x)\n",
1854 			      config_32);
1855 			return -EINVAL;
1856 		}
1857 		sp->pwr_mgmt_msgs = config_32;
1858 	}
1859 
1860 	ret = fdt_read_uint32(sp_manifest, node,
1861 			      "gp-register-num", &config_32);
1862 	if (ret != 0) {
1863 		WARN("Missing boot information register.\n");
1864 	} else {
1865 		/* Check if a register number between 0-3 is specified. */
1866 		if (config_32 < 4) {
1867 			*boot_info_reg = config_32;
1868 		} else {
1869 			WARN("Incorrect boot information register (%u).\n",
1870 			     config_32);
1871 		}
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 /*******************************************************************************
1878  * This function gets the Secure Partition Manifest base and maps the manifest
1879  * region.
1880  * Currently only one Secure Partition manifest is considered which is used to
1881  * prepare the context for the single Secure Partition.
1882  ******************************************************************************/
1883 static int find_and_prepare_sp_context(void)
1884 {
1885 	void *sp_manifest;
1886 	uintptr_t manifest_base;
1887 	uintptr_t manifest_base_align;
1888 	entry_point_info_t *next_image_ep_info;
1889 	int32_t ret, boot_info_reg = -1;
1890 	struct secure_partition_desc *sp;
1891 
1892 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1893 	if (next_image_ep_info == NULL) {
1894 		WARN("No Secure Partition image provided by BL2.\n");
1895 		return -ENOENT;
1896 	}
1897 
1898 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1899 	if (sp_manifest == NULL) {
1900 		WARN("Secure Partition manifest absent.\n");
1901 		return -ENOENT;
1902 	}
1903 
1904 	manifest_base = (uintptr_t)sp_manifest;
1905 	manifest_base_align = page_align(manifest_base, DOWN);
1906 
1907 	/*
1908 	 * Map the secure partition manifest region in the EL3 translation
1909 	 * regime.
1910 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1911 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1912 	 * not completely accommodate the secure partition manifest region.
1913 	 */
1914 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1915 				      manifest_base_align,
1916 				      PAGE_SIZE * 2,
1917 				      MT_RO_DATA);
1918 	if (ret != 0) {
1919 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1920 		return ret;
1921 	}
1922 
1923 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1924 					    "arm,ffa-manifest-1.0");
1925 	if (ret < 0) {
1926 		ERROR("Error happened in SP manifest reading.\n");
1927 		return -EINVAL;
1928 	}
1929 
1930 	/*
1931 	 * Store the size of the manifest so that it can be used later to pass
1932 	 * the manifest as boot information later.
1933 	 */
1934 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1935 	INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
1936 	     next_image_ep_info->args.arg1);
1937 
1938 	/*
1939 	 * Select an SP descriptor for initialising the partition's execution
1940 	 * context on the primary CPU.
1941 	 */
1942 	sp = spmc_get_current_sp_ctx();
1943 
1944 #if SPMC_AT_EL3_SEL0_SP
1945 	/* Assign translation tables context. */
1946 	sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
1947 
1948 #endif /* SPMC_AT_EL3_SEL0_SP */
1949 	/* Initialize entry point information for the SP */
1950 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1951 		       SECURE | EP_ST_ENABLE);
1952 
1953 	/* Parse the SP manifest. */
1954 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
1955 				&boot_info_reg);
1956 	if (ret != 0) {
1957 		ERROR("Error in Secure Partition manifest parsing.\n");
1958 		return ret;
1959 	}
1960 
1961 	/* Check that the runtime EL in the manifest was correct. */
1962 	if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) {
1963 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1964 		return -EINVAL;
1965 	}
1966 
1967 	/* Perform any common initialisation. */
1968 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
1969 
1970 	/* Perform any initialisation specific to S-EL1 SPs. */
1971 	if (sp->runtime_el == S_EL1) {
1972 		spmc_el1_sp_setup(sp, next_image_ep_info);
1973 	}
1974 
1975 #if SPMC_AT_EL3_SEL0_SP
1976 	/* Setup spsr in endpoint info for common context management routine. */
1977 	if (sp->runtime_el == S_EL0) {
1978 		spmc_el0_sp_spsr_setup(next_image_ep_info);
1979 	}
1980 #endif /* SPMC_AT_EL3_SEL0_SP */
1981 
1982 	/* Initialize the SP context with the required ep info. */
1983 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1984 
1985 #if SPMC_AT_EL3_SEL0_SP
1986 	/*
1987 	 * Perform any initialisation specific to S-EL0 not set by common
1988 	 * context management routine.
1989 	 */
1990 	if (sp->runtime_el == S_EL0) {
1991 		spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
1992 	}
1993 #endif /* SPMC_AT_EL3_SEL0_SP */
1994 	return 0;
1995 }
1996 
1997 /*******************************************************************************
1998  * This function takes an SP context pointer and performs a synchronous entry
1999  * into it.
2000  ******************************************************************************/
2001 static int32_t logical_sp_init(void)
2002 {
2003 	int32_t rc = 0;
2004 	struct el3_lp_desc *el3_lp_descs;
2005 
2006 	/* Perform initial validation of the Logical Partitions. */
2007 	rc = el3_sp_desc_validate();
2008 	if (rc != 0) {
2009 		ERROR("Logical Partition validation failed!\n");
2010 		return rc;
2011 	}
2012 
2013 	el3_lp_descs = get_el3_lp_array();
2014 
2015 	INFO("Logical Secure Partition init start.\n");
2016 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2017 		rc = el3_lp_descs[i].init();
2018 		if (rc != 0) {
2019 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
2020 			      el3_lp_descs[i].sp_id);
2021 			return rc;
2022 		}
2023 		VERBOSE("Logical SP (0x%x) Initialized\n",
2024 			      el3_lp_descs[i].sp_id);
2025 	}
2026 
2027 	INFO("Logical Secure Partition init completed.\n");
2028 
2029 	return rc;
2030 }
2031 
2032 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2033 {
2034 	uint64_t rc;
2035 
2036 	assert(ec != NULL);
2037 
2038 	/* Assign the context of the SP to this CPU */
2039 	cm_set_context(&(ec->cpu_ctx), SECURE);
2040 
2041 	/* Restore the context assigned above */
2042 	cm_el1_sysregs_context_restore(SECURE);
2043 	cm_set_next_eret_context(SECURE);
2044 
2045 	/* Invalidate TLBs at EL1. */
2046 	tlbivmalle1();
2047 	dsbish();
2048 
2049 	/* Enter Secure Partition */
2050 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2051 
2052 	/* Save secure state */
2053 	cm_el1_sysregs_context_save(SECURE);
2054 
2055 	return rc;
2056 }
2057 
2058 /*******************************************************************************
2059  * SPMC Helper Functions.
2060  ******************************************************************************/
2061 static int32_t sp_init(void)
2062 {
2063 	uint64_t rc;
2064 	struct secure_partition_desc *sp;
2065 	struct sp_exec_ctx *ec;
2066 
2067 	sp = spmc_get_current_sp_ctx();
2068 	ec = spmc_get_sp_ec(sp);
2069 	ec->rt_model = RT_MODEL_INIT;
2070 	ec->rt_state = RT_STATE_RUNNING;
2071 
2072 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2073 
2074 	rc = spmc_sp_synchronous_entry(ec);
2075 	if (rc != 0) {
2076 		/* Indicate SP init was not successful. */
2077 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
2078 		      sp->sp_id, rc);
2079 		return 0;
2080 	}
2081 
2082 	ec->rt_state = RT_STATE_WAITING;
2083 	INFO("Secure Partition initialized.\n");
2084 
2085 	return 1;
2086 }
2087 
2088 static void initalize_sp_descs(void)
2089 {
2090 	struct secure_partition_desc *sp;
2091 
2092 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2093 		sp = &sp_desc[i];
2094 		sp->sp_id = INV_SP_ID;
2095 		sp->mailbox.rx_buffer = NULL;
2096 		sp->mailbox.tx_buffer = NULL;
2097 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
2098 		sp->secondary_ep = 0;
2099 	}
2100 }
2101 
2102 static void initalize_ns_ep_descs(void)
2103 {
2104 	struct ns_endpoint_desc *ns_ep;
2105 
2106 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2107 		ns_ep = &ns_ep_desc[i];
2108 		/*
2109 		 * Clashes with the Hypervisor ID but will not be a
2110 		 * problem in practice.
2111 		 */
2112 		ns_ep->ns_ep_id = 0;
2113 		ns_ep->ffa_version = 0;
2114 		ns_ep->mailbox.rx_buffer = NULL;
2115 		ns_ep->mailbox.tx_buffer = NULL;
2116 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2117 	}
2118 }
2119 
2120 /*******************************************************************************
2121  * Initialize SPMC attributes for the SPMD.
2122  ******************************************************************************/
2123 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2124 {
2125 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
2126 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
2127 	spmc_attrs->exec_state = MODE_RW_64;
2128 	spmc_attrs->spmc_id = FFA_SPMC_ID;
2129 }
2130 
2131 /*******************************************************************************
2132  * Initialize contexts of all Secure Partitions.
2133  ******************************************************************************/
2134 int32_t spmc_setup(void)
2135 {
2136 	int32_t ret;
2137 	uint32_t flags;
2138 
2139 	/* Initialize endpoint descriptors */
2140 	initalize_sp_descs();
2141 	initalize_ns_ep_descs();
2142 
2143 	/*
2144 	 * Retrieve the information of the datastore for tracking shared memory
2145 	 * requests allocated by platform code and zero the region if available.
2146 	 */
2147 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2148 					    &spmc_shmem_obj_state.data_size);
2149 	if (ret != 0) {
2150 		ERROR("Failed to obtain memory descriptor backing store!\n");
2151 		return ret;
2152 	}
2153 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2154 
2155 	/* Setup logical SPs. */
2156 	ret = logical_sp_init();
2157 	if (ret != 0) {
2158 		ERROR("Failed to initialize Logical Partitions.\n");
2159 		return ret;
2160 	}
2161 
2162 	/* Perform physical SP setup. */
2163 
2164 	/* Disable MMU at EL1 (initialized by BL2) */
2165 	disable_mmu_icache_el1();
2166 
2167 	/* Initialize context of the SP */
2168 	INFO("Secure Partition context setup start.\n");
2169 
2170 	ret = find_and_prepare_sp_context();
2171 	if (ret != 0) {
2172 		ERROR("Error in SP finding and context preparation.\n");
2173 		return ret;
2174 	}
2175 
2176 	/* Register power management hooks with PSCI */
2177 	psci_register_spd_pm_hook(&spmc_pm);
2178 
2179 	/*
2180 	 * Register an interrupt handler for S-EL1 interrupts
2181 	 * when generated during code executing in the
2182 	 * non-secure state.
2183 	 */
2184 	flags = 0;
2185 	set_interrupt_rm_flag(flags, NON_SECURE);
2186 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2187 					      spmc_sp_interrupt_handler,
2188 					      flags);
2189 	if (ret != 0) {
2190 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
2191 		panic();
2192 	}
2193 
2194 	/* Register init function for deferred init.  */
2195 	bl31_register_bl32_init(&sp_init);
2196 
2197 	INFO("Secure Partition setup done.\n");
2198 
2199 	return 0;
2200 }
2201 
2202 /*******************************************************************************
2203  * Secure Partition Manager SMC handler.
2204  ******************************************************************************/
2205 uint64_t spmc_smc_handler(uint32_t smc_fid,
2206 			  bool secure_origin,
2207 			  uint64_t x1,
2208 			  uint64_t x2,
2209 			  uint64_t x3,
2210 			  uint64_t x4,
2211 			  void *cookie,
2212 			  void *handle,
2213 			  uint64_t flags)
2214 {
2215 	switch (smc_fid) {
2216 
2217 	case FFA_VERSION:
2218 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2219 					   x4, cookie, handle, flags);
2220 
2221 	case FFA_SPM_ID_GET:
2222 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2223 					     x3, x4, cookie, handle, flags);
2224 
2225 	case FFA_ID_GET:
2226 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2227 					  x4, cookie, handle, flags);
2228 
2229 	case FFA_FEATURES:
2230 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2231 					    x4, cookie, handle, flags);
2232 
2233 	case FFA_SECONDARY_EP_REGISTER_SMC64:
2234 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2235 						   x2, x3, x4, cookie, handle,
2236 						   flags);
2237 
2238 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2239 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2240 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2241 					      x3, x4, cookie, handle, flags);
2242 
2243 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2244 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2245 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2246 					       x3, x4, cookie, handle, flags);
2247 
2248 	case FFA_RXTX_MAP_SMC32:
2249 	case FFA_RXTX_MAP_SMC64:
2250 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2251 					cookie, handle, flags);
2252 
2253 	case FFA_RXTX_UNMAP:
2254 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2255 					  x4, cookie, handle, flags);
2256 
2257 	case FFA_PARTITION_INFO_GET:
2258 		return partition_info_get_handler(smc_fid, secure_origin, x1,
2259 						  x2, x3, x4, cookie, handle,
2260 						  flags);
2261 
2262 	case FFA_RX_RELEASE:
2263 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2264 					  x4, cookie, handle, flags);
2265 
2266 	case FFA_MSG_WAIT:
2267 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2268 					cookie, handle, flags);
2269 
2270 	case FFA_ERROR:
2271 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2272 					cookie, handle, flags);
2273 
2274 	case FFA_MSG_RUN:
2275 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2276 				       cookie, handle, flags);
2277 
2278 	case FFA_MEM_SHARE_SMC32:
2279 	case FFA_MEM_SHARE_SMC64:
2280 	case FFA_MEM_LEND_SMC32:
2281 	case FFA_MEM_LEND_SMC64:
2282 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2283 					 cookie, handle, flags);
2284 
2285 	case FFA_MEM_FRAG_TX:
2286 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2287 					    x4, cookie, handle, flags);
2288 
2289 	case FFA_MEM_FRAG_RX:
2290 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2291 					    x4, cookie, handle, flags);
2292 
2293 	case FFA_MEM_RETRIEVE_REQ_SMC32:
2294 	case FFA_MEM_RETRIEVE_REQ_SMC64:
2295 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2296 						 x3, x4, cookie, handle, flags);
2297 
2298 	case FFA_MEM_RELINQUISH:
2299 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2300 					       x3, x4, cookie, handle, flags);
2301 
2302 	case FFA_MEM_RECLAIM:
2303 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2304 					    x4, cookie, handle, flags);
2305 
2306 	case FFA_MEM_PERM_GET:
2307 		return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2308 						x3, x4, cookie, handle, flags);
2309 
2310 	case FFA_MEM_PERM_SET:
2311 		return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2312 						x3, x4, cookie, handle, flags);
2313 
2314 	default:
2315 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2316 		break;
2317 	}
2318 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2319 }
2320 
2321 /*******************************************************************************
2322  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2323  * validates the interrupt and upon success arranges entry into the SP for
2324  * handling the interrupt.
2325  ******************************************************************************/
2326 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2327 					  uint32_t flags,
2328 					  void *handle,
2329 					  void *cookie)
2330 {
2331 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2332 	struct sp_exec_ctx *ec;
2333 	uint32_t linear_id = plat_my_core_pos();
2334 
2335 	/* Sanity check for a NULL pointer dereference. */
2336 	assert(sp != NULL);
2337 
2338 	/* Check the security state when the exception was generated. */
2339 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
2340 
2341 	/* Panic if not an S-EL1 Partition. */
2342 	if (sp->runtime_el != S_EL1) {
2343 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2344 		      linear_id);
2345 		panic();
2346 	}
2347 
2348 	/* Obtain a reference to the SP execution context. */
2349 	ec = spmc_get_sp_ec(sp);
2350 
2351 	/* Ensure that the execution context is in waiting state else panic. */
2352 	if (ec->rt_state != RT_STATE_WAITING) {
2353 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2354 		      linear_id, RT_STATE_WAITING, ec->rt_state);
2355 		panic();
2356 	}
2357 
2358 	/* Update the runtime model and state of the partition. */
2359 	ec->rt_model = RT_MODEL_INTR;
2360 	ec->rt_state = RT_STATE_RUNNING;
2361 
2362 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2363 
2364 	/*
2365 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2366 	 * populated as the SP can determine this by itself.
2367 	 */
2368 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
2369 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2370 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2371 				     handle);
2372 }
2373