xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision 59bd2ad83c13ed3c84bb9b841032c95927358890)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <common/debug.h>
14 #include <common/fdt_wrappers.h>
15 #include <common/runtime_svc.h>
16 #include <common/uuid.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/smccc.h>
19 #include <lib/utils.h>
20 #include <lib/xlat_tables/xlat_tables_v2.h>
21 #include <libfdt.h>
22 #include <plat/common/platform.h>
23 #include <services/el3_spmc_logical_sp.h>
24 #include <services/ffa_svc.h>
25 #include <services/spmc_svc.h>
26 #include <services/spmd_svc.h>
27 #include "spmc.h"
28 
29 #include <platform_def.h>
30 
31 /* Declare the maximum number of SPs and El3 LPs. */
32 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
33 
34 /*
35  * Allocate a secure partition descriptor to describe each SP in the system that
36  * does not reside at EL3.
37  */
38 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
39 
40 /*
41  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
42  * the system that interacts with a SP. It is used to track the Hypervisor
43  * buffer pair, version and ID for now. It could be extended to track VM
44  * properties when the SPMC supports indirect messaging.
45  */
46 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
47 
48 /*
49  * Helper function to obtain the array storing the EL3
50  * Logical Partition descriptors.
51  */
52 struct el3_lp_desc *get_el3_lp_array(void)
53 {
54 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
55 }
56 
57 /*
58  * Helper function to obtain the descriptor of the last SP to whom control was
59  * handed to on this physical cpu. Currently, we assume there is only one SP.
60  * TODO: Expand to track multiple partitions when required.
61  */
62 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
63 {
64 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
65 }
66 
67 /*
68  * Helper function to obtain the execution context of an SP on the
69  * current physical cpu.
70  */
71 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
72 {
73 	return &(sp->ec[get_ec_index(sp)]);
74 }
75 
76 /* Helper function to get pointer to SP context from its ID. */
77 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
78 {
79 	/* Check for Secure World Partitions. */
80 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
81 		if (sp_desc[i].sp_id == id) {
82 			return &(sp_desc[i]);
83 		}
84 	}
85 	return NULL;
86 }
87 
88 /*
89  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
90  * We assume that the first descriptor is reserved for this entity.
91  */
92 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
93 {
94 	return &(ns_ep_desc[0]);
95 }
96 
97 /*
98  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
99  * or OS kernel in the normal world or the last SP that was run.
100  */
101 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
102 {
103 	/* Obtain the RX/TX buffer pair descriptor. */
104 	if (secure_origin) {
105 		return &(spmc_get_current_sp_ctx()->mailbox);
106 	} else {
107 		return &(spmc_get_hyp_ctx()->mailbox);
108 	}
109 }
110 
111 /******************************************************************************
112  * This function returns to the place where spmc_sp_synchronous_entry() was
113  * called originally.
114  ******************************************************************************/
115 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
116 {
117 	/*
118 	 * The SPM must have initiated the original request through a
119 	 * synchronous entry into the secure partition. Jump back to the
120 	 * original C runtime context with the value of rc in x0;
121 	 */
122 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
123 
124 	panic();
125 }
126 
127 /*******************************************************************************
128  * Return FFA_ERROR with specified error code.
129  ******************************************************************************/
130 uint64_t spmc_ffa_error_return(void *handle, int error_code)
131 {
132 	SMC_RET8(handle, FFA_ERROR,
133 		 FFA_TARGET_INFO_MBZ, error_code,
134 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
135 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
136 }
137 
138 /******************************************************************************
139  * Helper function to validate a secure partition ID to ensure it does not
140  * conflict with any other FF-A component and follows the convention to
141  * indicate it resides within the secure world.
142  ******************************************************************************/
143 bool is_ffa_secure_id_valid(uint16_t partition_id)
144 {
145 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
146 
147 	/* Ensure the ID is not the invalid partition ID. */
148 	if (partition_id == INV_SP_ID) {
149 		return false;
150 	}
151 
152 	/* Ensure the ID is not the SPMD ID. */
153 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
154 		return false;
155 	}
156 
157 	/*
158 	 * Ensure the ID follows the convention to indicate it resides
159 	 * in the secure world.
160 	 */
161 	if (!ffa_is_secure_world_id(partition_id)) {
162 		return false;
163 	}
164 
165 	/* Ensure we don't conflict with the SPMC partition ID. */
166 	if (partition_id == FFA_SPMC_ID) {
167 		return false;
168 	}
169 
170 	/* Ensure we do not already have an SP context with this ID. */
171 	if (spmc_get_sp_ctx(partition_id)) {
172 		return false;
173 	}
174 
175 	/* Ensure we don't clash with any Logical SP's. */
176 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
177 		if (el3_lp_descs[i].sp_id == partition_id) {
178 			return false;
179 		}
180 	}
181 
182 	return true;
183 }
184 
185 /*******************************************************************************
186  * This function either forwards the request to the other world or returns
187  * with an ERET depending on the source of the call.
188  * We can assume that the destination is for an entity at a lower exception
189  * level as any messages destined for a logical SP resident in EL3 will have
190  * already been taken care of by the SPMC before entering this function.
191  ******************************************************************************/
192 static uint64_t spmc_smc_return(uint32_t smc_fid,
193 				bool secure_origin,
194 				uint64_t x1,
195 				uint64_t x2,
196 				uint64_t x3,
197 				uint64_t x4,
198 				void *handle,
199 				void *cookie,
200 				uint64_t flags,
201 				uint16_t dst_id)
202 {
203 	/* If the destination is in the normal world always go via the SPMD. */
204 	if (ffa_is_normal_world_id(dst_id)) {
205 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
206 					cookie, handle, flags);
207 	}
208 	/*
209 	 * If the caller is secure and we want to return to the secure world,
210 	 * ERET directly.
211 	 */
212 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
213 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
214 	}
215 	/* If we originated in the normal world then switch contexts. */
216 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
217 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
218 					     x3, x4, handle);
219 	} else {
220 		/* Unknown State. */
221 		panic();
222 	}
223 
224 	/* Shouldn't be Reached. */
225 	return 0;
226 }
227 
228 /*******************************************************************************
229  * FF-A ABI Handlers.
230  ******************************************************************************/
231 
232 /*******************************************************************************
233  * Helper function to validate arg2 as part of a direct message.
234  ******************************************************************************/
235 static inline bool direct_msg_validate_arg2(uint64_t x2)
236 {
237 	/* Check message type. */
238 	if (x2 & FFA_FWK_MSG_BIT) {
239 		/* We have a framework message, ensure it is a known message. */
240 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
241 			VERBOSE("Invalid message format 0x%lx.\n", x2);
242 			return false;
243 		}
244 	} else {
245 		/* We have a partition messages, ensure x2 is not set. */
246 		if (x2 != (uint64_t) 0) {
247 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
248 				x2);
249 			return false;
250 		}
251 	}
252 	return true;
253 }
254 
255 /*******************************************************************************
256  * Handle direct request messages and route to the appropriate destination.
257  ******************************************************************************/
258 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
259 				       bool secure_origin,
260 				       uint64_t x1,
261 				       uint64_t x2,
262 				       uint64_t x3,
263 				       uint64_t x4,
264 				       void *cookie,
265 				       void *handle,
266 				       uint64_t flags)
267 {
268 	uint16_t dst_id = ffa_endpoint_destination(x1);
269 	struct el3_lp_desc *el3_lp_descs;
270 	struct secure_partition_desc *sp;
271 	unsigned int idx;
272 
273 	/* Check if arg2 has been populated correctly based on message type. */
274 	if (!direct_msg_validate_arg2(x2)) {
275 		return spmc_ffa_error_return(handle,
276 					     FFA_ERROR_INVALID_PARAMETER);
277 	}
278 
279 	el3_lp_descs = get_el3_lp_array();
280 
281 	/* Check if the request is destined for a Logical Partition. */
282 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
283 		if (el3_lp_descs[i].sp_id == dst_id) {
284 			return el3_lp_descs[i].direct_req(
285 					smc_fid, secure_origin, x1, x2, x3, x4,
286 					cookie, handle, flags);
287 		}
288 	}
289 
290 	/*
291 	 * If the request was not targeted to a LSP and from the secure world
292 	 * then it is invalid since a SP cannot call into the Normal world and
293 	 * there is no other SP to call into. If there are other SPs in future
294 	 * then the partition runtime model would need to be validated as well.
295 	 */
296 	if (secure_origin) {
297 		VERBOSE("Direct request not supported to the Normal World.\n");
298 		return spmc_ffa_error_return(handle,
299 					     FFA_ERROR_INVALID_PARAMETER);
300 	}
301 
302 	/* Check if the SP ID is valid. */
303 	sp = spmc_get_sp_ctx(dst_id);
304 	if (sp == NULL) {
305 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
306 			dst_id);
307 		return spmc_ffa_error_return(handle,
308 					     FFA_ERROR_INVALID_PARAMETER);
309 	}
310 
311 	/*
312 	 * Check that the target execution context is in a waiting state before
313 	 * forwarding the direct request to it.
314 	 */
315 	idx = get_ec_index(sp);
316 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
317 		VERBOSE("SP context on core%u is not waiting (%u).\n",
318 			idx, sp->ec[idx].rt_model);
319 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
320 	}
321 
322 	/*
323 	 * Everything checks out so forward the request to the SP after updating
324 	 * its state and runtime model.
325 	 */
326 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
327 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
328 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
329 			       handle, cookie, flags, dst_id);
330 }
331 
332 /*******************************************************************************
333  * Handle direct response messages and route to the appropriate destination.
334  ******************************************************************************/
335 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
336 					bool secure_origin,
337 					uint64_t x1,
338 					uint64_t x2,
339 					uint64_t x3,
340 					uint64_t x4,
341 					void *cookie,
342 					void *handle,
343 					uint64_t flags)
344 {
345 	uint16_t dst_id = ffa_endpoint_destination(x1);
346 	struct secure_partition_desc *sp;
347 	unsigned int idx;
348 
349 	/* Check if arg2 has been populated correctly based on message type. */
350 	if (!direct_msg_validate_arg2(x2)) {
351 		return spmc_ffa_error_return(handle,
352 					     FFA_ERROR_INVALID_PARAMETER);
353 	}
354 
355 	/* Check that the response did not originate from the Normal world. */
356 	if (!secure_origin) {
357 		VERBOSE("Direct Response not supported from Normal World.\n");
358 		return spmc_ffa_error_return(handle,
359 					     FFA_ERROR_INVALID_PARAMETER);
360 	}
361 
362 	/*
363 	 * Check that the response is either targeted to the Normal world or the
364 	 * SPMC e.g. a PM response.
365 	 */
366 	if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
367 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
368 			dst_id);
369 		return spmc_ffa_error_return(handle,
370 					     FFA_ERROR_INVALID_PARAMETER);
371 	}
372 
373 	/* Obtain the SP descriptor and update its runtime state. */
374 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
375 	if (sp == NULL) {
376 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
377 			dst_id);
378 		return spmc_ffa_error_return(handle,
379 					     FFA_ERROR_INVALID_PARAMETER);
380 	}
381 
382 	/* Sanity check state is being tracked correctly in the SPMC. */
383 	idx = get_ec_index(sp);
384 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
385 
386 	/* Ensure SP execution context was in the right runtime model. */
387 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
388 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
389 			idx, sp->ec[idx].rt_model);
390 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
391 	}
392 
393 	/* Update the state of the SP execution context. */
394 	sp->ec[idx].rt_state = RT_STATE_WAITING;
395 
396 	/*
397 	 * If the receiver is not the SPMC then forward the response to the
398 	 * Normal world.
399 	 */
400 	if (dst_id == FFA_SPMC_ID) {
401 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
402 		/* Should not get here. */
403 		panic();
404 	}
405 
406 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
407 			       handle, cookie, flags, dst_id);
408 }
409 
410 /*******************************************************************************
411  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
412  * cycles.
413  ******************************************************************************/
414 static uint64_t msg_wait_handler(uint32_t smc_fid,
415 				 bool secure_origin,
416 				 uint64_t x1,
417 				 uint64_t x2,
418 				 uint64_t x3,
419 				 uint64_t x4,
420 				 void *cookie,
421 				 void *handle,
422 				 uint64_t flags)
423 {
424 	struct secure_partition_desc *sp;
425 	unsigned int idx;
426 
427 	/*
428 	 * Check that the response did not originate from the Normal world as
429 	 * only the secure world can call this ABI.
430 	 */
431 	if (!secure_origin) {
432 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
433 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
434 	}
435 
436 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
437 	sp = spmc_get_current_sp_ctx();
438 	if (sp == NULL) {
439 		return spmc_ffa_error_return(handle,
440 					     FFA_ERROR_INVALID_PARAMETER);
441 	}
442 
443 	/*
444 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
445 	 */
446 	idx = get_ec_index(sp);
447 
448 	/* Ensure SP execution context was in the right runtime model. */
449 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
450 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
451 	}
452 
453 	/* Sanity check the state is being tracked correctly in the SPMC. */
454 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
455 
456 	/*
457 	 * Perform a synchronous exit if the partition was initialising. The
458 	 * state is updated after the exit.
459 	 */
460 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
461 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
462 		/* Should not get here */
463 		panic();
464 	}
465 
466 	/* Update the state of the SP execution context. */
467 	sp->ec[idx].rt_state = RT_STATE_WAITING;
468 
469 	/* Resume normal world if a secure interrupt was handled. */
470 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
471 		/* FFA_MSG_WAIT can only be called from the secure world. */
472 		unsigned int secure_state_in = SECURE;
473 		unsigned int secure_state_out = NON_SECURE;
474 
475 		cm_el1_sysregs_context_save(secure_state_in);
476 		cm_el1_sysregs_context_restore(secure_state_out);
477 		cm_set_next_eret_context(secure_state_out);
478 		SMC_RET0(cm_get_context(secure_state_out));
479 	}
480 
481 	/* Forward the response to the Normal world. */
482 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
483 			       handle, cookie, flags, FFA_NWD_ID);
484 }
485 
486 static uint64_t ffa_error_handler(uint32_t smc_fid,
487 				 bool secure_origin,
488 				 uint64_t x1,
489 				 uint64_t x2,
490 				 uint64_t x3,
491 				 uint64_t x4,
492 				 void *cookie,
493 				 void *handle,
494 				 uint64_t flags)
495 {
496 	struct secure_partition_desc *sp;
497 	unsigned int idx;
498 
499 	/* Check that the response did not originate from the Normal world. */
500 	if (!secure_origin) {
501 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
502 	}
503 
504 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
505 	sp = spmc_get_current_sp_ctx();
506 	if (sp == NULL) {
507 		return spmc_ffa_error_return(handle,
508 					     FFA_ERROR_INVALID_PARAMETER);
509 	}
510 
511 	/* Get the execution context of the SP that invoked FFA_ERROR. */
512 	idx = get_ec_index(sp);
513 
514 	/*
515 	 * We only expect FFA_ERROR to be received during SP initialisation
516 	 * otherwise this is an invalid call.
517 	 */
518 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
519 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
520 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
521 		/* Should not get here. */
522 		panic();
523 	}
524 
525 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
526 }
527 
528 static uint64_t ffa_version_handler(uint32_t smc_fid,
529 				    bool secure_origin,
530 				    uint64_t x1,
531 				    uint64_t x2,
532 				    uint64_t x3,
533 				    uint64_t x4,
534 				    void *cookie,
535 				    void *handle,
536 				    uint64_t flags)
537 {
538 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
539 
540 	if (requested_version & FFA_VERSION_BIT31_MASK) {
541 		/* Invalid encoding, return an error. */
542 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
543 		/* Execution stops here. */
544 	}
545 
546 	/* Determine the caller to store the requested version. */
547 	if (secure_origin) {
548 		/*
549 		 * Ensure that the SP is reporting the same version as
550 		 * specified in its manifest. If these do not match there is
551 		 * something wrong with the SP.
552 		 * TODO: Should we abort the SP? For now assert this is not
553 		 *       case.
554 		 */
555 		assert(requested_version ==
556 		       spmc_get_current_sp_ctx()->ffa_version);
557 	} else {
558 		/*
559 		 * If this is called by the normal world, record this
560 		 * information in its descriptor.
561 		 */
562 		spmc_get_hyp_ctx()->ffa_version = requested_version;
563 	}
564 
565 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
566 					  FFA_VERSION_MINOR));
567 }
568 
569 /*******************************************************************************
570  * Helper function to obtain the FF-A version of the calling partition.
571  ******************************************************************************/
572 uint32_t get_partition_ffa_version(bool secure_origin)
573 {
574 	if (secure_origin) {
575 		return spmc_get_current_sp_ctx()->ffa_version;
576 	} else {
577 		return spmc_get_hyp_ctx()->ffa_version;
578 	}
579 }
580 
581 static uint64_t rxtx_map_handler(uint32_t smc_fid,
582 				 bool secure_origin,
583 				 uint64_t x1,
584 				 uint64_t x2,
585 				 uint64_t x3,
586 				 uint64_t x4,
587 				 void *cookie,
588 				 void *handle,
589 				 uint64_t flags)
590 {
591 	int ret;
592 	uint32_t error_code;
593 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
594 	struct mailbox *mbox;
595 	uintptr_t tx_address = x1;
596 	uintptr_t rx_address = x2;
597 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
598 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
599 
600 	/*
601 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
602 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
603 	 * ABI on behalf of a VM and reject it if this is the case.
604 	 */
605 	if (tx_address == 0 || rx_address == 0) {
606 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
607 		return spmc_ffa_error_return(handle,
608 					     FFA_ERROR_INVALID_PARAMETER);
609 	}
610 
611 	/* Ensure the specified buffers are not the same. */
612 	if (tx_address == rx_address) {
613 		WARN("TX Buffer must not be the same as RX Buffer.\n");
614 		return spmc_ffa_error_return(handle,
615 					     FFA_ERROR_INVALID_PARAMETER);
616 	}
617 
618 	/* Ensure the buffer size is not 0. */
619 	if (buf_size == 0U) {
620 		WARN("Buffer size must not be 0\n");
621 		return spmc_ffa_error_return(handle,
622 					     FFA_ERROR_INVALID_PARAMETER);
623 	}
624 
625 	/*
626 	 * Ensure the buffer size is a multiple of the translation granule size
627 	 * in TF-A.
628 	 */
629 	if (buf_size % PAGE_SIZE != 0U) {
630 		WARN("Buffer size must be aligned to translation granule.\n");
631 		return spmc_ffa_error_return(handle,
632 					     FFA_ERROR_INVALID_PARAMETER);
633 	}
634 
635 	/* Obtain the RX/TX buffer pair descriptor. */
636 	mbox = spmc_get_mbox_desc(secure_origin);
637 
638 	spin_lock(&mbox->lock);
639 
640 	/* Check if buffers have already been mapped. */
641 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
642 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
643 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
644 		error_code = FFA_ERROR_DENIED;
645 		goto err;
646 	}
647 
648 	/* memmap the TX buffer as read only. */
649 	ret = mmap_add_dynamic_region(tx_address, /* PA */
650 			tx_address, /* VA */
651 			buf_size, /* size */
652 			mem_atts | MT_RO_DATA); /* attrs */
653 	if (ret != 0) {
654 		/* Return the correct error code. */
655 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
656 						FFA_ERROR_INVALID_PARAMETER;
657 		WARN("Unable to map TX buffer: %d\n", error_code);
658 		goto err;
659 	}
660 
661 	/* memmap the RX buffer as read write. */
662 	ret = mmap_add_dynamic_region(rx_address, /* PA */
663 			rx_address, /* VA */
664 			buf_size, /* size */
665 			mem_atts | MT_RW_DATA); /* attrs */
666 
667 	if (ret != 0) {
668 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
669 						FFA_ERROR_INVALID_PARAMETER;
670 		WARN("Unable to map RX buffer: %d\n", error_code);
671 		/* Unmap the TX buffer again. */
672 		mmap_remove_dynamic_region(tx_address, buf_size);
673 		goto err;
674 	}
675 
676 	mbox->tx_buffer = (void *) tx_address;
677 	mbox->rx_buffer = (void *) rx_address;
678 	mbox->rxtx_page_count = page_count;
679 	spin_unlock(&mbox->lock);
680 
681 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
682 	/* Execution stops here. */
683 err:
684 	spin_unlock(&mbox->lock);
685 	return spmc_ffa_error_return(handle, error_code);
686 }
687 
688 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
689 				   bool secure_origin,
690 				   uint64_t x1,
691 				   uint64_t x2,
692 				   uint64_t x3,
693 				   uint64_t x4,
694 				   void *cookie,
695 				   void *handle,
696 				   uint64_t flags)
697 {
698 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
699 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
700 
701 	/*
702 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
703 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
704 	 * ABI on behalf of a VM and reject it if this is the case.
705 	 */
706 	if (x1 != 0UL) {
707 		return spmc_ffa_error_return(handle,
708 					     FFA_ERROR_INVALID_PARAMETER);
709 	}
710 
711 	spin_lock(&mbox->lock);
712 
713 	/* Check if buffers are currently mapped. */
714 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
715 		spin_unlock(&mbox->lock);
716 		return spmc_ffa_error_return(handle,
717 					     FFA_ERROR_INVALID_PARAMETER);
718 	}
719 
720 	/* Unmap RX Buffer */
721 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
722 				       buf_size) != 0) {
723 		WARN("Unable to unmap RX buffer!\n");
724 	}
725 
726 	mbox->rx_buffer = 0;
727 
728 	/* Unmap TX Buffer */
729 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
730 				       buf_size) != 0) {
731 		WARN("Unable to unmap TX buffer!\n");
732 	}
733 
734 	mbox->tx_buffer = 0;
735 	mbox->rxtx_page_count = 0;
736 
737 	spin_unlock(&mbox->lock);
738 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
739 }
740 
741 /*
742  * Collate the partition information in a v1.1 partition information
743  * descriptor format, this will be converter later if required.
744  */
745 static int partition_info_get_handler_v1_1(uint32_t *uuid,
746 					   struct ffa_partition_info_v1_1
747 						  *partitions,
748 					   uint32_t max_partitions,
749 					   uint32_t *partition_count)
750 {
751 	uint32_t index;
752 	struct ffa_partition_info_v1_1 *desc;
753 	bool null_uuid = is_null_uuid(uuid);
754 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
755 
756 	/* Deal with Logical Partitions. */
757 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
758 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
759 			/* Found a matching UUID, populate appropriately. */
760 			if (*partition_count >= max_partitions) {
761 				return FFA_ERROR_NO_MEMORY;
762 			}
763 
764 			desc = &partitions[*partition_count];
765 			desc->ep_id = el3_lp_descs[index].sp_id;
766 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
767 			desc->properties = el3_lp_descs[index].properties;
768 			if (null_uuid) {
769 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
770 			}
771 			(*partition_count)++;
772 		}
773 	}
774 
775 	/* Deal with physical SP's. */
776 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
777 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
778 			/* Found a matching UUID, populate appropriately. */
779 			if (*partition_count >= max_partitions) {
780 				return FFA_ERROR_NO_MEMORY;
781 			}
782 
783 			desc = &partitions[*partition_count];
784 			desc->ep_id = sp_desc[index].sp_id;
785 			/*
786 			 * Execution context count must match No. cores for
787 			 * S-EL1 SPs.
788 			 */
789 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
790 			desc->properties = sp_desc[index].properties;
791 			if (null_uuid) {
792 				copy_uuid(desc->uuid, sp_desc[index].uuid);
793 			}
794 			(*partition_count)++;
795 		}
796 	}
797 	return 0;
798 }
799 
800 /*
801  * Handle the case where that caller only wants the count of partitions
802  * matching a given UUID and does not want the corresponding descriptors
803  * populated.
804  */
805 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
806 {
807 	uint32_t index = 0;
808 	uint32_t partition_count = 0;
809 	bool null_uuid = is_null_uuid(uuid);
810 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
811 
812 	/* Deal with Logical Partitions. */
813 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
814 		if (null_uuid ||
815 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
816 			(partition_count)++;
817 		}
818 	}
819 
820 	/* Deal with physical SP's. */
821 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
822 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
823 			(partition_count)++;
824 		}
825 	}
826 	return partition_count;
827 }
828 
829 /*
830  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
831  * the coresponding descriptor format from the v1.1 descriptor array.
832  */
833 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
834 					     *partitions,
835 					     struct mailbox *mbox,
836 					     int partition_count)
837 {
838 	uint32_t index;
839 	uint32_t buf_size;
840 	uint32_t descriptor_size;
841 	struct ffa_partition_info_v1_0 *v1_0_partitions =
842 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
843 
844 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
845 	descriptor_size = partition_count *
846 			  sizeof(struct ffa_partition_info_v1_0);
847 
848 	if (descriptor_size > buf_size) {
849 		return FFA_ERROR_NO_MEMORY;
850 	}
851 
852 	for (index = 0U; index < partition_count; index++) {
853 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
854 		v1_0_partitions[index].execution_ctx_count =
855 			partitions[index].execution_ctx_count;
856 		v1_0_partitions[index].properties =
857 			partitions[index].properties;
858 	}
859 	return 0;
860 }
861 
862 /*
863  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
864  * v1.0 implementations.
865  */
866 static uint64_t partition_info_get_handler(uint32_t smc_fid,
867 					   bool secure_origin,
868 					   uint64_t x1,
869 					   uint64_t x2,
870 					   uint64_t x3,
871 					   uint64_t x4,
872 					   void *cookie,
873 					   void *handle,
874 					   uint64_t flags)
875 {
876 	int ret;
877 	uint32_t partition_count = 0;
878 	uint32_t size = 0;
879 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
880 	struct mailbox *mbox;
881 	uint64_t info_get_flags;
882 	bool count_only;
883 	uint32_t uuid[4];
884 
885 	uuid[0] = x1;
886 	uuid[1] = x2;
887 	uuid[2] = x3;
888 	uuid[3] = x4;
889 
890 	/* Determine if the Partition descriptors should be populated. */
891 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
892 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
893 
894 	/* Handle the case where we don't need to populate the descriptors. */
895 	if (count_only) {
896 		partition_count = partition_info_get_handler_count_only(uuid);
897 		if (partition_count == 0) {
898 			return spmc_ffa_error_return(handle,
899 						FFA_ERROR_INVALID_PARAMETER);
900 		}
901 	} else {
902 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
903 
904 		/*
905 		 * Handle the case where the partition descriptors are required,
906 		 * check we have the buffers available and populate the
907 		 * appropriate structure version.
908 		 */
909 
910 		/* Obtain the v1.1 format of the descriptors. */
911 		ret = partition_info_get_handler_v1_1(uuid, partitions,
912 						      MAX_SP_LP_PARTITIONS,
913 						      &partition_count);
914 
915 		/* Check if an error occurred during discovery. */
916 		if (ret != 0) {
917 			goto err;
918 		}
919 
920 		/* If we didn't find any matches the UUID is unknown. */
921 		if (partition_count == 0) {
922 			ret = FFA_ERROR_INVALID_PARAMETER;
923 			goto err;
924 		}
925 
926 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
927 		mbox = spmc_get_mbox_desc(secure_origin);
928 
929 		/*
930 		 * If the caller has not bothered registering its RX/TX pair
931 		 * then return an error code.
932 		 */
933 		spin_lock(&mbox->lock);
934 		if (mbox->rx_buffer == NULL) {
935 			ret = FFA_ERROR_BUSY;
936 			goto err_unlock;
937 		}
938 
939 		/* Ensure the RX buffer is currently free. */
940 		if (mbox->state != MAILBOX_STATE_EMPTY) {
941 			ret = FFA_ERROR_BUSY;
942 			goto err_unlock;
943 		}
944 
945 		/* Zero the RX buffer before populating. */
946 		(void)memset(mbox->rx_buffer, 0,
947 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
948 
949 		/*
950 		 * Depending on the FF-A version of the requesting partition
951 		 * we may need to convert to a v1.0 format otherwise we can copy
952 		 * directly.
953 		 */
954 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
955 			ret = partition_info_populate_v1_0(partitions,
956 							   mbox,
957 							   partition_count);
958 			if (ret != 0) {
959 				goto err_unlock;
960 			}
961 		} else {
962 			uint32_t buf_size = mbox->rxtx_page_count *
963 					    FFA_PAGE_SIZE;
964 
965 			/* Ensure the descriptor will fit in the buffer. */
966 			size = sizeof(struct ffa_partition_info_v1_1);
967 			if (partition_count * size  > buf_size) {
968 				ret = FFA_ERROR_NO_MEMORY;
969 				goto err_unlock;
970 			}
971 			memcpy(mbox->rx_buffer, partitions,
972 			       partition_count * size);
973 		}
974 
975 		mbox->state = MAILBOX_STATE_FULL;
976 		spin_unlock(&mbox->lock);
977 	}
978 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
979 
980 err_unlock:
981 	spin_unlock(&mbox->lock);
982 err:
983 	return spmc_ffa_error_return(handle, ret);
984 }
985 
986 static uint64_t ffa_features_handler(uint32_t smc_fid,
987 				     bool secure_origin,
988 				     uint64_t x1,
989 				     uint64_t x2,
990 				     uint64_t x3,
991 				     uint64_t x4,
992 				     void *cookie,
993 				     void *handle,
994 				     uint64_t flags)
995 {
996 	uint32_t function_id = (uint32_t) x1;
997 	uint32_t input_properties = (uint32_t) x2;
998 
999 	/*
1000 	 * We don't currently support any additional input properties
1001 	 * for any ABI therefore ensure this value is always set to 0.
1002 	 */
1003 	if (input_properties != 0) {
1004 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1005 	}
1006 
1007 	/* Check if a Feature ID was requested. */
1008 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1009 		/* We currently don't support any additional features. */
1010 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1011 	}
1012 
1013 	/* Report if an FF-A ABI is supported. */
1014 	switch (function_id) {
1015 	/* Supported features from both worlds. */
1016 	case FFA_ERROR:
1017 	case FFA_SUCCESS_SMC32:
1018 	case FFA_ID_GET:
1019 	case FFA_FEATURES:
1020 	case FFA_VERSION:
1021 	case FFA_RX_RELEASE:
1022 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1023 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1024 	case FFA_PARTITION_INFO_GET:
1025 	case FFA_RXTX_MAP_SMC32:
1026 	case FFA_RXTX_MAP_SMC64:
1027 	case FFA_RXTX_UNMAP:
1028 	case FFA_MSG_RUN:
1029 
1030 		/*
1031 		 * We are relying on the fact that the other registers
1032 		 * will be set to 0 as these values align with the
1033 		 * currently implemented features of the SPMC. If this
1034 		 * changes this function must be extended to handle
1035 		 * reporting the additional functionality.
1036 		 */
1037 
1038 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1039 		/* Execution stops here. */
1040 
1041 	/* Supported ABIs only from the secure world. */
1042 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1043 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1044 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1045 	case FFA_MSG_WAIT:
1046 
1047 		if (!secure_origin) {
1048 			return spmc_ffa_error_return(handle,
1049 					FFA_ERROR_NOT_SUPPORTED);
1050 		}
1051 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1052 		/* Execution stops here. */
1053 
1054 	default:
1055 		return spmc_ffa_error_return(handle,
1056 					FFA_ERROR_NOT_SUPPORTED);
1057 	}
1058 }
1059 
1060 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1061 				   bool secure_origin,
1062 				   uint64_t x1,
1063 				   uint64_t x2,
1064 				   uint64_t x3,
1065 				   uint64_t x4,
1066 				   void *cookie,
1067 				   void *handle,
1068 				   uint64_t flags)
1069 {
1070 	if (secure_origin) {
1071 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1072 			 spmc_get_current_sp_ctx()->sp_id);
1073 	} else {
1074 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1075 			 spmc_get_hyp_ctx()->ns_ep_id);
1076 	}
1077 }
1078 
1079 static uint64_t ffa_run_handler(uint32_t smc_fid,
1080 				bool secure_origin,
1081 				uint64_t x1,
1082 				uint64_t x2,
1083 				uint64_t x3,
1084 				uint64_t x4,
1085 				void *cookie,
1086 				void *handle,
1087 				uint64_t flags)
1088 {
1089 	struct secure_partition_desc *sp;
1090 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1091 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1092 	unsigned int idx;
1093 	unsigned int *rt_state;
1094 	unsigned int *rt_model;
1095 
1096 	/* Can only be called from the normal world. */
1097 	if (secure_origin) {
1098 		ERROR("FFA_RUN can only be called from NWd.\n");
1099 		return spmc_ffa_error_return(handle,
1100 					     FFA_ERROR_INVALID_PARAMETER);
1101 	}
1102 
1103 	/* Cannot run a Normal world partition. */
1104 	if (ffa_is_normal_world_id(target_id)) {
1105 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1106 		return spmc_ffa_error_return(handle,
1107 					     FFA_ERROR_INVALID_PARAMETER);
1108 	}
1109 
1110 	/* Check that the target SP exists. */
1111 	sp = spmc_get_sp_ctx(target_id);
1112 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1113 	if (sp == NULL) {
1114 		return spmc_ffa_error_return(handle,
1115 					     FFA_ERROR_INVALID_PARAMETER);
1116 	}
1117 
1118 	idx = get_ec_index(sp);
1119 	if (idx != vcpu_id) {
1120 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1121 		return spmc_ffa_error_return(handle,
1122 					     FFA_ERROR_INVALID_PARAMETER);
1123 	}
1124 	rt_state = &((sp->ec[idx]).rt_state);
1125 	rt_model = &((sp->ec[idx]).rt_model);
1126 	if (*rt_state == RT_STATE_RUNNING) {
1127 		ERROR("Partition (0x%x) is already running.\n", target_id);
1128 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1129 	}
1130 
1131 	/*
1132 	 * Sanity check that if the execution context was not waiting then it
1133 	 * was either in the direct request or the run partition runtime model.
1134 	 */
1135 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1136 		assert(*rt_model == RT_MODEL_RUN ||
1137 		       *rt_model == RT_MODEL_DIR_REQ);
1138 	}
1139 
1140 	/*
1141 	 * If the context was waiting then update the partition runtime model.
1142 	 */
1143 	if (*rt_state == RT_STATE_WAITING) {
1144 		*rt_model = RT_MODEL_RUN;
1145 	}
1146 
1147 	/*
1148 	 * Forward the request to the correct SP vCPU after updating
1149 	 * its state.
1150 	 */
1151 	*rt_state = RT_STATE_RUNNING;
1152 
1153 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1154 			       handle, cookie, flags, target_id);
1155 }
1156 
1157 static uint64_t rx_release_handler(uint32_t smc_fid,
1158 				   bool secure_origin,
1159 				   uint64_t x1,
1160 				   uint64_t x2,
1161 				   uint64_t x3,
1162 				   uint64_t x4,
1163 				   void *cookie,
1164 				   void *handle,
1165 				   uint64_t flags)
1166 {
1167 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1168 
1169 	spin_lock(&mbox->lock);
1170 
1171 	if (mbox->state != MAILBOX_STATE_FULL) {
1172 		spin_unlock(&mbox->lock);
1173 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1174 	}
1175 
1176 	mbox->state = MAILBOX_STATE_EMPTY;
1177 	spin_unlock(&mbox->lock);
1178 
1179 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1180 }
1181 
1182 /*
1183  * Perform initial validation on the provided secondary entry point.
1184  * For now ensure it does not lie within the BL31 Image or the SP's
1185  * RX/TX buffers as these are mapped within EL3.
1186  * TODO: perform validation for additional invalid memory regions.
1187  */
1188 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1189 {
1190 	struct mailbox *mb;
1191 	uintptr_t buffer_size;
1192 	uintptr_t sp_rx_buffer;
1193 	uintptr_t sp_tx_buffer;
1194 	uintptr_t sp_rx_buffer_limit;
1195 	uintptr_t sp_tx_buffer_limit;
1196 
1197 	mb = &sp->mailbox;
1198 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1199 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1200 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1201 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1202 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1203 
1204 	/*
1205 	 * Check if the entry point lies within BL31, or the
1206 	 * SP's RX or TX buffer.
1207 	 */
1208 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1209 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1210 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1211 		return -EINVAL;
1212 	}
1213 	return 0;
1214 }
1215 
1216 /*******************************************************************************
1217  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1218  *  register an entry point for initialization during a secondary cold boot.
1219  ******************************************************************************/
1220 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1221 					    bool secure_origin,
1222 					    uint64_t x1,
1223 					    uint64_t x2,
1224 					    uint64_t x3,
1225 					    uint64_t x4,
1226 					    void *cookie,
1227 					    void *handle,
1228 					    uint64_t flags)
1229 {
1230 	struct secure_partition_desc *sp;
1231 	struct sp_exec_ctx *sp_ctx;
1232 
1233 	/* This request cannot originate from the Normal world. */
1234 	if (!secure_origin) {
1235 		WARN("%s: Can only be called from SWd.\n", __func__);
1236 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1237 	}
1238 
1239 	/* Get the context of the current SP. */
1240 	sp = spmc_get_current_sp_ctx();
1241 	if (sp == NULL) {
1242 		WARN("%s: Cannot find SP context.\n", __func__);
1243 		return spmc_ffa_error_return(handle,
1244 					     FFA_ERROR_INVALID_PARAMETER);
1245 	}
1246 
1247 	/* Only an S-EL1 SP should be invoking this ABI. */
1248 	if (sp->runtime_el != S_EL1) {
1249 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1250 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1251 	}
1252 
1253 	/* Ensure the SP is in its initialization state. */
1254 	sp_ctx = spmc_get_sp_ec(sp);
1255 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1256 		WARN("%s: Can only be called during SP initialization.\n",
1257 		     __func__);
1258 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1259 	}
1260 
1261 	/* Perform initial validation of the secondary entry point. */
1262 	if (validate_secondary_ep(x1, sp)) {
1263 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1264 		     __func__, x1);
1265 		return spmc_ffa_error_return(handle,
1266 					     FFA_ERROR_INVALID_PARAMETER);
1267 	}
1268 
1269 	/*
1270 	 * Update the secondary entrypoint in SP context.
1271 	 * We don't need a lock here as during partition initialization there
1272 	 * will only be a single core online.
1273 	 */
1274 	sp->secondary_ep = x1;
1275 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1276 
1277 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1278 }
1279 
1280 /*******************************************************************************
1281  * This function will parse the Secure Partition Manifest. From manifest, it
1282  * will fetch details for preparing Secure partition image context and secure
1283  * partition image boot arguments if any.
1284  ******************************************************************************/
1285 static int sp_manifest_parse(void *sp_manifest, int offset,
1286 			     struct secure_partition_desc *sp,
1287 			     entry_point_info_t *ep_info)
1288 {
1289 	int32_t ret, node;
1290 	uint32_t config_32;
1291 
1292 	/*
1293 	 * Look for the mandatory fields that are expected to be present in
1294 	 * the SP manifests.
1295 	 */
1296 	node = fdt_path_offset(sp_manifest, "/");
1297 	if (node < 0) {
1298 		ERROR("Did not find root node.\n");
1299 		return node;
1300 	}
1301 
1302 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1303 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1304 	if (ret != 0) {
1305 		ERROR("Missing Secure Partition UUID.\n");
1306 		return ret;
1307 	}
1308 
1309 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1310 	if (ret != 0) {
1311 		ERROR("Missing SP Exception Level information.\n");
1312 		return ret;
1313 	}
1314 
1315 	sp->runtime_el = config_32;
1316 
1317 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1318 	if (ret != 0) {
1319 		ERROR("Missing Secure Partition FF-A Version.\n");
1320 		return ret;
1321 	}
1322 
1323 	sp->ffa_version = config_32;
1324 
1325 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1326 	if (ret != 0) {
1327 		ERROR("Missing Secure Partition Execution State.\n");
1328 		return ret;
1329 	}
1330 
1331 	sp->execution_state = config_32;
1332 
1333 	ret = fdt_read_uint32(sp_manifest, node,
1334 			      "messaging-method", &config_32);
1335 	if (ret != 0) {
1336 		ERROR("Missing Secure Partition messaging method.\n");
1337 		return ret;
1338 	}
1339 
1340 	/* Validate this entry, we currently only support direct messaging. */
1341 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1342 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1343 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1344 		     config_32);
1345 		return -EINVAL;
1346 	}
1347 
1348 	sp->properties = config_32;
1349 
1350 	ret = fdt_read_uint32(sp_manifest, node,
1351 			      "execution-ctx-count", &config_32);
1352 
1353 	if (ret != 0) {
1354 		ERROR("Missing SP Execution Context Count.\n");
1355 		return ret;
1356 	}
1357 
1358 	/*
1359 	 * Ensure this field is set correctly in the manifest however
1360 	 * since this is currently a hardcoded value for S-EL1 partitions
1361 	 * we don't need to save it here, just validate.
1362 	 */
1363 	if (config_32 != PLATFORM_CORE_COUNT) {
1364 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1365 			config_32, PLATFORM_CORE_COUNT);
1366 		return -EINVAL;
1367 	}
1368 
1369 	/*
1370 	 * Look for the optional fields that are expected to be present in
1371 	 * an SP manifest.
1372 	 */
1373 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1374 	if (ret != 0) {
1375 		WARN("Missing Secure Partition ID.\n");
1376 	} else {
1377 		if (!is_ffa_secure_id_valid(config_32)) {
1378 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1379 			      config_32);
1380 			return -EINVAL;
1381 		}
1382 		sp->sp_id = config_32;
1383 	}
1384 
1385 	ret = fdt_read_uint32(sp_manifest, node,
1386 			      "power-management-messages", &config_32);
1387 	if (ret != 0) {
1388 		WARN("Missing Power Management Messages entry.\n");
1389 	} else {
1390 		/*
1391 		 * Ensure only the currently supported power messages have
1392 		 * been requested.
1393 		 */
1394 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1395 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1396 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1397 			ERROR("Requested unsupported PM messages (%x)\n",
1398 			      config_32);
1399 			return -EINVAL;
1400 		}
1401 		sp->pwr_mgmt_msgs = config_32;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 /*******************************************************************************
1408  * This function gets the Secure Partition Manifest base and maps the manifest
1409  * region.
1410  * Currently only one Secure Partition manifest is considered which is used to
1411  * prepare the context for the single Secure Partition.
1412  ******************************************************************************/
1413 static int find_and_prepare_sp_context(void)
1414 {
1415 	void *sp_manifest;
1416 	uintptr_t manifest_base;
1417 	uintptr_t manifest_base_align;
1418 	entry_point_info_t *next_image_ep_info;
1419 	int32_t ret;
1420 	struct secure_partition_desc *sp;
1421 
1422 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1423 	if (next_image_ep_info == NULL) {
1424 		WARN("No Secure Partition image provided by BL2.\n");
1425 		return -ENOENT;
1426 	}
1427 
1428 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1429 	if (sp_manifest == NULL) {
1430 		WARN("Secure Partition manifest absent.\n");
1431 		return -ENOENT;
1432 	}
1433 
1434 	manifest_base = (uintptr_t)sp_manifest;
1435 	manifest_base_align = page_align(manifest_base, DOWN);
1436 
1437 	/*
1438 	 * Map the secure partition manifest region in the EL3 translation
1439 	 * regime.
1440 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1441 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1442 	 * not completely accommodate the secure partition manifest region.
1443 	 */
1444 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1445 				      manifest_base_align,
1446 				      PAGE_SIZE * 2,
1447 				      MT_RO_DATA);
1448 	if (ret != 0) {
1449 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1450 		return ret;
1451 	}
1452 
1453 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1454 					    "arm,ffa-manifest-1.0");
1455 	if (ret < 0) {
1456 		ERROR("Error happened in SP manifest reading.\n");
1457 		return -EINVAL;
1458 	}
1459 
1460 	/*
1461 	 * Store the size of the manifest so that it can be used later to pass
1462 	 * the manifest as boot information later.
1463 	 */
1464 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1465 	INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1466 
1467 	/*
1468 	 * Select an SP descriptor for initialising the partition's execution
1469 	 * context on the primary CPU.
1470 	 */
1471 	sp = spmc_get_current_sp_ctx();
1472 
1473 	/* Initialize entry point information for the SP */
1474 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1475 		       SECURE | EP_ST_ENABLE);
1476 
1477 	/* Parse the SP manifest. */
1478 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info);
1479 	if (ret != 0) {
1480 		ERROR("Error in Secure Partition manifest parsing.\n");
1481 		return ret;
1482 	}
1483 
1484 	/* Check that the runtime EL in the manifest was correct. */
1485 	if (sp->runtime_el != S_EL1) {
1486 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1487 		return -EINVAL;
1488 	}
1489 
1490 	/* Perform any common initialisation. */
1491 	spmc_sp_common_setup(sp, next_image_ep_info);
1492 
1493 	/* Perform any initialisation specific to S-EL1 SPs. */
1494 	spmc_el1_sp_setup(sp, next_image_ep_info);
1495 
1496 	/* Initialize the SP context with the required ep info. */
1497 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1498 
1499 	return 0;
1500 }
1501 
1502 /*******************************************************************************
1503  * This function takes an SP context pointer and performs a synchronous entry
1504  * into it.
1505  ******************************************************************************/
1506 static int32_t logical_sp_init(void)
1507 {
1508 	int32_t rc = 0;
1509 	struct el3_lp_desc *el3_lp_descs;
1510 
1511 	/* Perform initial validation of the Logical Partitions. */
1512 	rc = el3_sp_desc_validate();
1513 	if (rc != 0) {
1514 		ERROR("Logical Partition validation failed!\n");
1515 		return rc;
1516 	}
1517 
1518 	el3_lp_descs = get_el3_lp_array();
1519 
1520 	INFO("Logical Secure Partition init start.\n");
1521 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1522 		rc = el3_lp_descs[i].init();
1523 		if (rc != 0) {
1524 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
1525 			      el3_lp_descs[i].sp_id);
1526 			return rc;
1527 		}
1528 		VERBOSE("Logical SP (0x%x) Initialized\n",
1529 			      el3_lp_descs[i].sp_id);
1530 	}
1531 
1532 	INFO("Logical Secure Partition init completed.\n");
1533 
1534 	return rc;
1535 }
1536 
1537 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1538 {
1539 	uint64_t rc;
1540 
1541 	assert(ec != NULL);
1542 
1543 	/* Assign the context of the SP to this CPU */
1544 	cm_set_context(&(ec->cpu_ctx), SECURE);
1545 
1546 	/* Restore the context assigned above */
1547 	cm_el1_sysregs_context_restore(SECURE);
1548 	cm_set_next_eret_context(SECURE);
1549 
1550 	/* Invalidate TLBs at EL1. */
1551 	tlbivmalle1();
1552 	dsbish();
1553 
1554 	/* Enter Secure Partition */
1555 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1556 
1557 	/* Save secure state */
1558 	cm_el1_sysregs_context_save(SECURE);
1559 
1560 	return rc;
1561 }
1562 
1563 /*******************************************************************************
1564  * SPMC Helper Functions.
1565  ******************************************************************************/
1566 static int32_t sp_init(void)
1567 {
1568 	uint64_t rc;
1569 	struct secure_partition_desc *sp;
1570 	struct sp_exec_ctx *ec;
1571 
1572 	sp = spmc_get_current_sp_ctx();
1573 	ec = spmc_get_sp_ec(sp);
1574 	ec->rt_model = RT_MODEL_INIT;
1575 	ec->rt_state = RT_STATE_RUNNING;
1576 
1577 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1578 
1579 	rc = spmc_sp_synchronous_entry(ec);
1580 	if (rc != 0) {
1581 		/* Indicate SP init was not successful. */
1582 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
1583 		      sp->sp_id, rc);
1584 		return 0;
1585 	}
1586 
1587 	ec->rt_state = RT_STATE_WAITING;
1588 	INFO("Secure Partition initialized.\n");
1589 
1590 	return 1;
1591 }
1592 
1593 static void initalize_sp_descs(void)
1594 {
1595 	struct secure_partition_desc *sp;
1596 
1597 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1598 		sp = &sp_desc[i];
1599 		sp->sp_id = INV_SP_ID;
1600 		sp->mailbox.rx_buffer = NULL;
1601 		sp->mailbox.tx_buffer = NULL;
1602 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
1603 		sp->secondary_ep = 0;
1604 	}
1605 }
1606 
1607 static void initalize_ns_ep_descs(void)
1608 {
1609 	struct ns_endpoint_desc *ns_ep;
1610 
1611 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1612 		ns_ep = &ns_ep_desc[i];
1613 		/*
1614 		 * Clashes with the Hypervisor ID but will not be a
1615 		 * problem in practice.
1616 		 */
1617 		ns_ep->ns_ep_id = 0;
1618 		ns_ep->ffa_version = 0;
1619 		ns_ep->mailbox.rx_buffer = NULL;
1620 		ns_ep->mailbox.tx_buffer = NULL;
1621 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1622 	}
1623 }
1624 
1625 /*******************************************************************************
1626  * Initialize SPMC attributes for the SPMD.
1627  ******************************************************************************/
1628 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1629 {
1630 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
1631 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
1632 	spmc_attrs->exec_state = MODE_RW_64;
1633 	spmc_attrs->spmc_id = FFA_SPMC_ID;
1634 }
1635 
1636 /*******************************************************************************
1637  * Initialize contexts of all Secure Partitions.
1638  ******************************************************************************/
1639 int32_t spmc_setup(void)
1640 {
1641 	int32_t ret;
1642 
1643 	/* Initialize endpoint descriptors */
1644 	initalize_sp_descs();
1645 	initalize_ns_ep_descs();
1646 
1647 	/* Setup logical SPs. */
1648 	ret = logical_sp_init();
1649 	if (ret != 0) {
1650 		ERROR("Failed to initialize Logical Partitions.\n");
1651 		return ret;
1652 	}
1653 
1654 	/* Perform physical SP setup. */
1655 
1656 	/* Disable MMU at EL1 (initialized by BL2) */
1657 	disable_mmu_icache_el1();
1658 
1659 	/* Initialize context of the SP */
1660 	INFO("Secure Partition context setup start.\n");
1661 
1662 	ret = find_and_prepare_sp_context();
1663 	if (ret != 0) {
1664 		ERROR("Error in SP finding and context preparation.\n");
1665 		return ret;
1666 	}
1667 
1668 	/* Register power management hooks with PSCI */
1669 	psci_register_spd_pm_hook(&spmc_pm);
1670 
1671 	/* Register init function for deferred init.  */
1672 	bl31_register_bl32_init(&sp_init);
1673 
1674 	INFO("Secure Partition setup done.\n");
1675 
1676 	return 0;
1677 }
1678 
1679 /*******************************************************************************
1680  * Secure Partition Manager SMC handler.
1681  ******************************************************************************/
1682 uint64_t spmc_smc_handler(uint32_t smc_fid,
1683 			  bool secure_origin,
1684 			  uint64_t x1,
1685 			  uint64_t x2,
1686 			  uint64_t x3,
1687 			  uint64_t x4,
1688 			  void *cookie,
1689 			  void *handle,
1690 			  uint64_t flags)
1691 {
1692 	switch (smc_fid) {
1693 
1694 	case FFA_VERSION:
1695 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1696 					   x4, cookie, handle, flags);
1697 
1698 	case FFA_ID_GET:
1699 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1700 					  x4, cookie, handle, flags);
1701 
1702 	case FFA_FEATURES:
1703 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1704 					    x4, cookie, handle, flags);
1705 
1706 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1707 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1708 						   x2, x3, x4, cookie, handle,
1709 						   flags);
1710 
1711 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1712 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1713 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1714 					      x3, x4, cookie, handle, flags);
1715 
1716 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1717 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1718 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1719 					       x3, x4, cookie, handle, flags);
1720 
1721 	case FFA_RXTX_MAP_SMC32:
1722 	case FFA_RXTX_MAP_SMC64:
1723 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1724 					cookie, handle, flags);
1725 
1726 	case FFA_RXTX_UNMAP:
1727 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1728 					  x4, cookie, handle, flags);
1729 
1730 	case FFA_PARTITION_INFO_GET:
1731 		return partition_info_get_handler(smc_fid, secure_origin, x1,
1732 						  x2, x3, x4, cookie, handle,
1733 						  flags);
1734 
1735 	case FFA_RX_RELEASE:
1736 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
1737 					  x4, cookie, handle, flags);
1738 
1739 	case FFA_MSG_WAIT:
1740 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1741 					cookie, handle, flags);
1742 
1743 	case FFA_ERROR:
1744 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1745 					cookie, handle, flags);
1746 
1747 	case FFA_MSG_RUN:
1748 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1749 				       cookie, handle, flags);
1750 	default:
1751 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1752 		break;
1753 	}
1754 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1755 }
1756