xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_main.c (revision f74e27723bb54ad1318fa462fbcff70af555b2e6)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <common/debug.h>
14 #include <common/fdt_wrappers.h>
15 #include <common/runtime_svc.h>
16 #include <common/uuid.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <lib/smccc.h>
19 #include <lib/utils.h>
20 #include <lib/xlat_tables/xlat_tables_v2.h>
21 #include <libfdt.h>
22 #include <plat/common/platform.h>
23 #include <services/el3_spmc_logical_sp.h>
24 #include <services/ffa_svc.h>
25 #include <services/spmc_svc.h>
26 #include <services/spmd_svc.h>
27 #include "spmc.h"
28 
29 #include <platform_def.h>
30 
31 /* Declare the maximum number of SPs and El3 LPs. */
32 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
33 
34 /*
35  * Allocate a secure partition descriptor to describe each SP in the system that
36  * does not reside at EL3.
37  */
38 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
39 
40 /*
41  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
42  * the system that interacts with a SP. It is used to track the Hypervisor
43  * buffer pair, version and ID for now. It could be extended to track VM
44  * properties when the SPMC supports indirect messaging.
45  */
46 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
47 
48 /*
49  * Helper function to obtain the array storing the EL3
50  * Logical Partition descriptors.
51  */
52 struct el3_lp_desc *get_el3_lp_array(void)
53 {
54 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
55 }
56 
57 /*
58  * Helper function to obtain the descriptor of the last SP to whom control was
59  * handed to on this physical cpu. Currently, we assume there is only one SP.
60  * TODO: Expand to track multiple partitions when required.
61  */
62 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
63 {
64 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
65 }
66 
67 /*
68  * Helper function to obtain the execution context of an SP on the
69  * current physical cpu.
70  */
71 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
72 {
73 	return &(sp->ec[get_ec_index(sp)]);
74 }
75 
76 /* Helper function to get pointer to SP context from its ID. */
77 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
78 {
79 	/* Check for Secure World Partitions. */
80 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
81 		if (sp_desc[i].sp_id == id) {
82 			return &(sp_desc[i]);
83 		}
84 	}
85 	return NULL;
86 }
87 
88 /*
89  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
90  * We assume that the first descriptor is reserved for this entity.
91  */
92 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
93 {
94 	return &(ns_ep_desc[0]);
95 }
96 
97 /*
98  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
99  * or OS kernel in the normal world or the last SP that was run.
100  */
101 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
102 {
103 	/* Obtain the RX/TX buffer pair descriptor. */
104 	if (secure_origin) {
105 		return &(spmc_get_current_sp_ctx()->mailbox);
106 	} else {
107 		return &(spmc_get_hyp_ctx()->mailbox);
108 	}
109 }
110 
111 /******************************************************************************
112  * This function returns to the place where spmc_sp_synchronous_entry() was
113  * called originally.
114  ******************************************************************************/
115 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
116 {
117 	/*
118 	 * The SPM must have initiated the original request through a
119 	 * synchronous entry into the secure partition. Jump back to the
120 	 * original C runtime context with the value of rc in x0;
121 	 */
122 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
123 
124 	panic();
125 }
126 
127 /*******************************************************************************
128  * Return FFA_ERROR with specified error code.
129  ******************************************************************************/
130 uint64_t spmc_ffa_error_return(void *handle, int error_code)
131 {
132 	SMC_RET8(handle, FFA_ERROR,
133 		 FFA_TARGET_INFO_MBZ, error_code,
134 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
135 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
136 }
137 
138 /******************************************************************************
139  * Helper function to validate a secure partition ID to ensure it does not
140  * conflict with any other FF-A component and follows the convention to
141  * indicate it resides within the secure world.
142  ******************************************************************************/
143 bool is_ffa_secure_id_valid(uint16_t partition_id)
144 {
145 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
146 
147 	/* Ensure the ID is not the invalid partition ID. */
148 	if (partition_id == INV_SP_ID) {
149 		return false;
150 	}
151 
152 	/* Ensure the ID is not the SPMD ID. */
153 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
154 		return false;
155 	}
156 
157 	/*
158 	 * Ensure the ID follows the convention to indicate it resides
159 	 * in the secure world.
160 	 */
161 	if (!ffa_is_secure_world_id(partition_id)) {
162 		return false;
163 	}
164 
165 	/* Ensure we don't conflict with the SPMC partition ID. */
166 	if (partition_id == FFA_SPMC_ID) {
167 		return false;
168 	}
169 
170 	/* Ensure we do not already have an SP context with this ID. */
171 	if (spmc_get_sp_ctx(partition_id)) {
172 		return false;
173 	}
174 
175 	/* Ensure we don't clash with any Logical SP's. */
176 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
177 		if (el3_lp_descs[i].sp_id == partition_id) {
178 			return false;
179 		}
180 	}
181 
182 	return true;
183 }
184 
185 /*******************************************************************************
186  * This function either forwards the request to the other world or returns
187  * with an ERET depending on the source of the call.
188  * We can assume that the destination is for an entity at a lower exception
189  * level as any messages destined for a logical SP resident in EL3 will have
190  * already been taken care of by the SPMC before entering this function.
191  ******************************************************************************/
192 static uint64_t spmc_smc_return(uint32_t smc_fid,
193 				bool secure_origin,
194 				uint64_t x1,
195 				uint64_t x2,
196 				uint64_t x3,
197 				uint64_t x4,
198 				void *handle,
199 				void *cookie,
200 				uint64_t flags,
201 				uint16_t dst_id)
202 {
203 	/* If the destination is in the normal world always go via the SPMD. */
204 	if (ffa_is_normal_world_id(dst_id)) {
205 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
206 					cookie, handle, flags);
207 	}
208 	/*
209 	 * If the caller is secure and we want to return to the secure world,
210 	 * ERET directly.
211 	 */
212 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
213 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
214 	}
215 	/* If we originated in the normal world then switch contexts. */
216 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
217 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
218 					     x3, x4, handle);
219 	} else {
220 		/* Unknown State. */
221 		panic();
222 	}
223 
224 	/* Shouldn't be Reached. */
225 	return 0;
226 }
227 
228 /*******************************************************************************
229  * FF-A ABI Handlers.
230  ******************************************************************************/
231 
232 /*******************************************************************************
233  * Helper function to validate arg2 as part of a direct message.
234  ******************************************************************************/
235 static inline bool direct_msg_validate_arg2(uint64_t x2)
236 {
237 	/*
238 	 * We currently only support partition messages, therefore ensure x2 is
239 	 * not set.
240 	 */
241 	if (x2 != (uint64_t) 0) {
242 		VERBOSE("Arg2 MBZ for partition messages (0x%lx).\n", x2);
243 		return false;
244 	}
245 	return true;
246 }
247 
248 /*******************************************************************************
249  * Handle direct request messages and route to the appropriate destination.
250  ******************************************************************************/
251 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
252 				       bool secure_origin,
253 				       uint64_t x1,
254 				       uint64_t x2,
255 				       uint64_t x3,
256 				       uint64_t x4,
257 				       void *cookie,
258 				       void *handle,
259 				       uint64_t flags)
260 {
261 	uint16_t dst_id = ffa_endpoint_destination(x1);
262 	struct el3_lp_desc *el3_lp_descs;
263 	struct secure_partition_desc *sp;
264 	unsigned int idx;
265 
266 	/* Check if arg2 has been populated correctly based on message type. */
267 	if (!direct_msg_validate_arg2(x2)) {
268 		return spmc_ffa_error_return(handle,
269 					     FFA_ERROR_INVALID_PARAMETER);
270 	}
271 
272 	el3_lp_descs = get_el3_lp_array();
273 
274 	/* Check if the request is destined for a Logical Partition. */
275 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
276 		if (el3_lp_descs[i].sp_id == dst_id) {
277 			return el3_lp_descs[i].direct_req(
278 					smc_fid, secure_origin, x1, x2, x3, x4,
279 					cookie, handle, flags);
280 		}
281 	}
282 
283 	/*
284 	 * If the request was not targeted to a LSP and from the secure world
285 	 * then it is invalid since a SP cannot call into the Normal world and
286 	 * there is no other SP to call into. If there are other SPs in future
287 	 * then the partition runtime model would need to be validated as well.
288 	 */
289 	if (secure_origin) {
290 		VERBOSE("Direct request not supported to the Normal World.\n");
291 		return spmc_ffa_error_return(handle,
292 					     FFA_ERROR_INVALID_PARAMETER);
293 	}
294 
295 	/* Check if the SP ID is valid. */
296 	sp = spmc_get_sp_ctx(dst_id);
297 	if (sp == NULL) {
298 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
299 			dst_id);
300 		return spmc_ffa_error_return(handle,
301 					     FFA_ERROR_INVALID_PARAMETER);
302 	}
303 
304 	/*
305 	 * Check that the target execution context is in a waiting state before
306 	 * forwarding the direct request to it.
307 	 */
308 	idx = get_ec_index(sp);
309 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
310 		VERBOSE("SP context on core%u is not waiting (%u).\n",
311 			idx, sp->ec[idx].rt_model);
312 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
313 	}
314 
315 	/*
316 	 * Everything checks out so forward the request to the SP after updating
317 	 * its state and runtime model.
318 	 */
319 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
320 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
321 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
322 			       handle, cookie, flags, dst_id);
323 }
324 
325 /*******************************************************************************
326  * Handle direct response messages and route to the appropriate destination.
327  ******************************************************************************/
328 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
329 					bool secure_origin,
330 					uint64_t x1,
331 					uint64_t x2,
332 					uint64_t x3,
333 					uint64_t x4,
334 					void *cookie,
335 					void *handle,
336 					uint64_t flags)
337 {
338 	uint16_t dst_id = ffa_endpoint_destination(x1);
339 	struct secure_partition_desc *sp;
340 	unsigned int idx;
341 
342 	/* Check if arg2 has been populated correctly based on message type. */
343 	if (!direct_msg_validate_arg2(x2)) {
344 		return spmc_ffa_error_return(handle,
345 					     FFA_ERROR_INVALID_PARAMETER);
346 	}
347 
348 	/* Check that the response did not originate from the Normal world. */
349 	if (!secure_origin) {
350 		VERBOSE("Direct Response not supported from Normal World.\n");
351 		return spmc_ffa_error_return(handle,
352 					     FFA_ERROR_INVALID_PARAMETER);
353 	}
354 
355 	/*
356 	 * Check that the response is either targeted to the Normal world or the
357 	 * SPMC e.g. a PM response.
358 	 */
359 	if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
360 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
361 			dst_id);
362 		return spmc_ffa_error_return(handle,
363 					     FFA_ERROR_INVALID_PARAMETER);
364 	}
365 
366 	/* Obtain the SP descriptor and update its runtime state. */
367 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
368 	if (sp == NULL) {
369 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
370 			dst_id);
371 		return spmc_ffa_error_return(handle,
372 					     FFA_ERROR_INVALID_PARAMETER);
373 	}
374 
375 	/* Sanity check state is being tracked correctly in the SPMC. */
376 	idx = get_ec_index(sp);
377 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
378 
379 	/* Ensure SP execution context was in the right runtime model. */
380 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
381 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
382 			idx, sp->ec[idx].rt_model);
383 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
384 	}
385 
386 	/* Update the state of the SP execution context. */
387 	sp->ec[idx].rt_state = RT_STATE_WAITING;
388 
389 	/*
390 	 * If the receiver is not the SPMC then forward the response to the
391 	 * Normal world.
392 	 */
393 	if (dst_id == FFA_SPMC_ID) {
394 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
395 		/* Should not get here. */
396 		panic();
397 	}
398 
399 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
400 			       handle, cookie, flags, dst_id);
401 }
402 
403 /*******************************************************************************
404  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
405  * cycles.
406  ******************************************************************************/
407 static uint64_t msg_wait_handler(uint32_t smc_fid,
408 				 bool secure_origin,
409 				 uint64_t x1,
410 				 uint64_t x2,
411 				 uint64_t x3,
412 				 uint64_t x4,
413 				 void *cookie,
414 				 void *handle,
415 				 uint64_t flags)
416 {
417 	struct secure_partition_desc *sp;
418 	unsigned int idx;
419 
420 	/*
421 	 * Check that the response did not originate from the Normal world as
422 	 * only the secure world can call this ABI.
423 	 */
424 	if (!secure_origin) {
425 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
426 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
427 	}
428 
429 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
430 	sp = spmc_get_current_sp_ctx();
431 	if (sp == NULL) {
432 		return spmc_ffa_error_return(handle,
433 					     FFA_ERROR_INVALID_PARAMETER);
434 	}
435 
436 	/*
437 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
438 	 */
439 	idx = get_ec_index(sp);
440 
441 	/* Ensure SP execution context was in the right runtime model. */
442 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
443 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
444 	}
445 
446 	/* Sanity check the state is being tracked correctly in the SPMC. */
447 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
448 
449 	/*
450 	 * Perform a synchronous exit if the partition was initialising. The
451 	 * state is updated after the exit.
452 	 */
453 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
454 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
455 		/* Should not get here */
456 		panic();
457 	}
458 
459 	/* Update the state of the SP execution context. */
460 	sp->ec[idx].rt_state = RT_STATE_WAITING;
461 
462 	/* Resume normal world if a secure interrupt was handled. */
463 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
464 		/* FFA_MSG_WAIT can only be called from the secure world. */
465 		unsigned int secure_state_in = SECURE;
466 		unsigned int secure_state_out = NON_SECURE;
467 
468 		cm_el1_sysregs_context_save(secure_state_in);
469 		cm_el1_sysregs_context_restore(secure_state_out);
470 		cm_set_next_eret_context(secure_state_out);
471 		SMC_RET0(cm_get_context(secure_state_out));
472 	}
473 
474 	/* Forward the response to the Normal world. */
475 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
476 			       handle, cookie, flags, FFA_NWD_ID);
477 }
478 
479 static uint64_t ffa_error_handler(uint32_t smc_fid,
480 				 bool secure_origin,
481 				 uint64_t x1,
482 				 uint64_t x2,
483 				 uint64_t x3,
484 				 uint64_t x4,
485 				 void *cookie,
486 				 void *handle,
487 				 uint64_t flags)
488 {
489 	struct secure_partition_desc *sp;
490 	unsigned int idx;
491 
492 	/* Check that the response did not originate from the Normal world. */
493 	if (!secure_origin) {
494 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
495 	}
496 
497 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
498 	sp = spmc_get_current_sp_ctx();
499 	if (sp == NULL) {
500 		return spmc_ffa_error_return(handle,
501 					     FFA_ERROR_INVALID_PARAMETER);
502 	}
503 
504 	/* Get the execution context of the SP that invoked FFA_ERROR. */
505 	idx = get_ec_index(sp);
506 
507 	/*
508 	 * We only expect FFA_ERROR to be received during SP initialisation
509 	 * otherwise this is an invalid call.
510 	 */
511 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
512 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
513 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
514 		/* Should not get here. */
515 		panic();
516 	}
517 
518 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
519 }
520 
521 static uint64_t ffa_version_handler(uint32_t smc_fid,
522 				    bool secure_origin,
523 				    uint64_t x1,
524 				    uint64_t x2,
525 				    uint64_t x3,
526 				    uint64_t x4,
527 				    void *cookie,
528 				    void *handle,
529 				    uint64_t flags)
530 {
531 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
532 
533 	if (requested_version & FFA_VERSION_BIT31_MASK) {
534 		/* Invalid encoding, return an error. */
535 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
536 		/* Execution stops here. */
537 	}
538 
539 	/* Determine the caller to store the requested version. */
540 	if (secure_origin) {
541 		/*
542 		 * Ensure that the SP is reporting the same version as
543 		 * specified in its manifest. If these do not match there is
544 		 * something wrong with the SP.
545 		 * TODO: Should we abort the SP? For now assert this is not
546 		 *       case.
547 		 */
548 		assert(requested_version ==
549 		       spmc_get_current_sp_ctx()->ffa_version);
550 	} else {
551 		/*
552 		 * If this is called by the normal world, record this
553 		 * information in its descriptor.
554 		 */
555 		spmc_get_hyp_ctx()->ffa_version = requested_version;
556 	}
557 
558 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
559 					  FFA_VERSION_MINOR));
560 }
561 
562 /*******************************************************************************
563  * Helper function to obtain the FF-A version of the calling partition.
564  ******************************************************************************/
565 uint32_t get_partition_ffa_version(bool secure_origin)
566 {
567 	if (secure_origin) {
568 		return spmc_get_current_sp_ctx()->ffa_version;
569 	} else {
570 		return spmc_get_hyp_ctx()->ffa_version;
571 	}
572 }
573 
574 static uint64_t rxtx_map_handler(uint32_t smc_fid,
575 				 bool secure_origin,
576 				 uint64_t x1,
577 				 uint64_t x2,
578 				 uint64_t x3,
579 				 uint64_t x4,
580 				 void *cookie,
581 				 void *handle,
582 				 uint64_t flags)
583 {
584 	int ret;
585 	uint32_t error_code;
586 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
587 	struct mailbox *mbox;
588 	uintptr_t tx_address = x1;
589 	uintptr_t rx_address = x2;
590 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
591 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
592 
593 	/*
594 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
595 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
596 	 * ABI on behalf of a VM and reject it if this is the case.
597 	 */
598 	if (tx_address == 0 || rx_address == 0) {
599 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
600 		return spmc_ffa_error_return(handle,
601 					     FFA_ERROR_INVALID_PARAMETER);
602 	}
603 
604 	/* Ensure the specified buffers are not the same. */
605 	if (tx_address == rx_address) {
606 		WARN("TX Buffer must not be the same as RX Buffer.\n");
607 		return spmc_ffa_error_return(handle,
608 					     FFA_ERROR_INVALID_PARAMETER);
609 	}
610 
611 	/* Ensure the buffer size is not 0. */
612 	if (buf_size == 0U) {
613 		WARN("Buffer size must not be 0\n");
614 		return spmc_ffa_error_return(handle,
615 					     FFA_ERROR_INVALID_PARAMETER);
616 	}
617 
618 	/*
619 	 * Ensure the buffer size is a multiple of the translation granule size
620 	 * in TF-A.
621 	 */
622 	if (buf_size % PAGE_SIZE != 0U) {
623 		WARN("Buffer size must be aligned to translation granule.\n");
624 		return spmc_ffa_error_return(handle,
625 					     FFA_ERROR_INVALID_PARAMETER);
626 	}
627 
628 	/* Obtain the RX/TX buffer pair descriptor. */
629 	mbox = spmc_get_mbox_desc(secure_origin);
630 
631 	spin_lock(&mbox->lock);
632 
633 	/* Check if buffers have already been mapped. */
634 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
635 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
636 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
637 		error_code = FFA_ERROR_DENIED;
638 		goto err;
639 	}
640 
641 	/* memmap the TX buffer as read only. */
642 	ret = mmap_add_dynamic_region(tx_address, /* PA */
643 			tx_address, /* VA */
644 			buf_size, /* size */
645 			mem_atts | MT_RO_DATA); /* attrs */
646 	if (ret != 0) {
647 		/* Return the correct error code. */
648 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
649 						FFA_ERROR_INVALID_PARAMETER;
650 		WARN("Unable to map TX buffer: %d\n", error_code);
651 		goto err;
652 	}
653 
654 	/* memmap the RX buffer as read write. */
655 	ret = mmap_add_dynamic_region(rx_address, /* PA */
656 			rx_address, /* VA */
657 			buf_size, /* size */
658 			mem_atts | MT_RW_DATA); /* attrs */
659 
660 	if (ret != 0) {
661 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
662 						FFA_ERROR_INVALID_PARAMETER;
663 		WARN("Unable to map RX buffer: %d\n", error_code);
664 		/* Unmap the TX buffer again. */
665 		mmap_remove_dynamic_region(tx_address, buf_size);
666 		goto err;
667 	}
668 
669 	mbox->tx_buffer = (void *) tx_address;
670 	mbox->rx_buffer = (void *) rx_address;
671 	mbox->rxtx_page_count = page_count;
672 	spin_unlock(&mbox->lock);
673 
674 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
675 	/* Execution stops here. */
676 err:
677 	spin_unlock(&mbox->lock);
678 	return spmc_ffa_error_return(handle, error_code);
679 }
680 
681 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
682 				   bool secure_origin,
683 				   uint64_t x1,
684 				   uint64_t x2,
685 				   uint64_t x3,
686 				   uint64_t x4,
687 				   void *cookie,
688 				   void *handle,
689 				   uint64_t flags)
690 {
691 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
692 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
693 
694 	/*
695 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
696 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
697 	 * ABI on behalf of a VM and reject it if this is the case.
698 	 */
699 	if (x1 != 0UL) {
700 		return spmc_ffa_error_return(handle,
701 					     FFA_ERROR_INVALID_PARAMETER);
702 	}
703 
704 	spin_lock(&mbox->lock);
705 
706 	/* Check if buffers are currently mapped. */
707 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
708 		spin_unlock(&mbox->lock);
709 		return spmc_ffa_error_return(handle,
710 					     FFA_ERROR_INVALID_PARAMETER);
711 	}
712 
713 	/* Unmap RX Buffer */
714 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
715 				       buf_size) != 0) {
716 		WARN("Unable to unmap RX buffer!\n");
717 	}
718 
719 	mbox->rx_buffer = 0;
720 
721 	/* Unmap TX Buffer */
722 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
723 				       buf_size) != 0) {
724 		WARN("Unable to unmap TX buffer!\n");
725 	}
726 
727 	mbox->tx_buffer = 0;
728 	mbox->rxtx_page_count = 0;
729 
730 	spin_unlock(&mbox->lock);
731 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
732 }
733 
734 /*
735  * Collate the partition information in a v1.1 partition information
736  * descriptor format, this will be converter later if required.
737  */
738 static int partition_info_get_handler_v1_1(uint32_t *uuid,
739 					   struct ffa_partition_info_v1_1
740 						  *partitions,
741 					   uint32_t max_partitions,
742 					   uint32_t *partition_count)
743 {
744 	uint32_t index;
745 	struct ffa_partition_info_v1_1 *desc;
746 	bool null_uuid = is_null_uuid(uuid);
747 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
748 
749 	/* Deal with Logical Partitions. */
750 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
751 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
752 			/* Found a matching UUID, populate appropriately. */
753 			if (*partition_count >= max_partitions) {
754 				return FFA_ERROR_NO_MEMORY;
755 			}
756 
757 			desc = &partitions[*partition_count];
758 			desc->ep_id = el3_lp_descs[index].sp_id;
759 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
760 			desc->properties = el3_lp_descs[index].properties;
761 			if (null_uuid) {
762 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
763 			}
764 			(*partition_count)++;
765 		}
766 	}
767 
768 	/* Deal with physical SP's. */
769 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
770 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
771 			/* Found a matching UUID, populate appropriately. */
772 			if (*partition_count >= max_partitions) {
773 				return FFA_ERROR_NO_MEMORY;
774 			}
775 
776 			desc = &partitions[*partition_count];
777 			desc->ep_id = sp_desc[index].sp_id;
778 			/*
779 			 * Execution context count must match No. cores for
780 			 * S-EL1 SPs.
781 			 */
782 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
783 			desc->properties = sp_desc[index].properties;
784 			if (null_uuid) {
785 				copy_uuid(desc->uuid, sp_desc[index].uuid);
786 			}
787 			(*partition_count)++;
788 		}
789 	}
790 	return 0;
791 }
792 
793 /*
794  * Handle the case where that caller only wants the count of partitions
795  * matching a given UUID and does not want the corresponding descriptors
796  * populated.
797  */
798 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
799 {
800 	uint32_t index = 0;
801 	uint32_t partition_count = 0;
802 	bool null_uuid = is_null_uuid(uuid);
803 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
804 
805 	/* Deal with Logical Partitions. */
806 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
807 		if (null_uuid ||
808 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
809 			(partition_count)++;
810 		}
811 	}
812 
813 	/* Deal with physical SP's. */
814 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
815 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
816 			(partition_count)++;
817 		}
818 	}
819 	return partition_count;
820 }
821 
822 /*
823  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
824  * the coresponding descriptor format from the v1.1 descriptor array.
825  */
826 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
827 					     *partitions,
828 					     struct mailbox *mbox,
829 					     int partition_count)
830 {
831 	uint32_t index;
832 	uint32_t buf_size;
833 	uint32_t descriptor_size;
834 	struct ffa_partition_info_v1_0 *v1_0_partitions =
835 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
836 
837 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
838 	descriptor_size = partition_count *
839 			  sizeof(struct ffa_partition_info_v1_0);
840 
841 	if (descriptor_size > buf_size) {
842 		return FFA_ERROR_NO_MEMORY;
843 	}
844 
845 	for (index = 0U; index < partition_count; index++) {
846 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
847 		v1_0_partitions[index].execution_ctx_count =
848 			partitions[index].execution_ctx_count;
849 		v1_0_partitions[index].properties =
850 			partitions[index].properties;
851 	}
852 	return 0;
853 }
854 
855 /*
856  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
857  * v1.0 implementations.
858  */
859 static uint64_t partition_info_get_handler(uint32_t smc_fid,
860 					   bool secure_origin,
861 					   uint64_t x1,
862 					   uint64_t x2,
863 					   uint64_t x3,
864 					   uint64_t x4,
865 					   void *cookie,
866 					   void *handle,
867 					   uint64_t flags)
868 {
869 	int ret;
870 	uint32_t partition_count = 0;
871 	uint32_t size = 0;
872 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
873 	struct mailbox *mbox;
874 	uint64_t info_get_flags;
875 	bool count_only;
876 	uint32_t uuid[4];
877 
878 	uuid[0] = x1;
879 	uuid[1] = x2;
880 	uuid[2] = x3;
881 	uuid[3] = x4;
882 
883 	/* Determine if the Partition descriptors should be populated. */
884 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
885 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
886 
887 	/* Handle the case where we don't need to populate the descriptors. */
888 	if (count_only) {
889 		partition_count = partition_info_get_handler_count_only(uuid);
890 		if (partition_count == 0) {
891 			return spmc_ffa_error_return(handle,
892 						FFA_ERROR_INVALID_PARAMETER);
893 		}
894 	} else {
895 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
896 
897 		/*
898 		 * Handle the case where the partition descriptors are required,
899 		 * check we have the buffers available and populate the
900 		 * appropriate structure version.
901 		 */
902 
903 		/* Obtain the v1.1 format of the descriptors. */
904 		ret = partition_info_get_handler_v1_1(uuid, partitions,
905 						      MAX_SP_LP_PARTITIONS,
906 						      &partition_count);
907 
908 		/* Check if an error occurred during discovery. */
909 		if (ret != 0) {
910 			goto err;
911 		}
912 
913 		/* If we didn't find any matches the UUID is unknown. */
914 		if (partition_count == 0) {
915 			ret = FFA_ERROR_INVALID_PARAMETER;
916 			goto err;
917 		}
918 
919 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
920 		mbox = spmc_get_mbox_desc(secure_origin);
921 
922 		/*
923 		 * If the caller has not bothered registering its RX/TX pair
924 		 * then return an error code.
925 		 */
926 		spin_lock(&mbox->lock);
927 		if (mbox->rx_buffer == NULL) {
928 			ret = FFA_ERROR_BUSY;
929 			goto err_unlock;
930 		}
931 
932 		/* Ensure the RX buffer is currently free. */
933 		if (mbox->state != MAILBOX_STATE_EMPTY) {
934 			ret = FFA_ERROR_BUSY;
935 			goto err_unlock;
936 		}
937 
938 		/* Zero the RX buffer before populating. */
939 		(void)memset(mbox->rx_buffer, 0,
940 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
941 
942 		/*
943 		 * Depending on the FF-A version of the requesting partition
944 		 * we may need to convert to a v1.0 format otherwise we can copy
945 		 * directly.
946 		 */
947 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
948 			ret = partition_info_populate_v1_0(partitions,
949 							   mbox,
950 							   partition_count);
951 			if (ret != 0) {
952 				goto err_unlock;
953 			}
954 		} else {
955 			uint32_t buf_size = mbox->rxtx_page_count *
956 					    FFA_PAGE_SIZE;
957 
958 			/* Ensure the descriptor will fit in the buffer. */
959 			size = sizeof(struct ffa_partition_info_v1_1);
960 			if (partition_count * size  > buf_size) {
961 				ret = FFA_ERROR_NO_MEMORY;
962 				goto err_unlock;
963 			}
964 			memcpy(mbox->rx_buffer, partitions,
965 			       partition_count * size);
966 		}
967 
968 		mbox->state = MAILBOX_STATE_FULL;
969 		spin_unlock(&mbox->lock);
970 	}
971 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
972 
973 err_unlock:
974 	spin_unlock(&mbox->lock);
975 err:
976 	return spmc_ffa_error_return(handle, ret);
977 }
978 
979 /*******************************************************************************
980  * This function will parse the Secure Partition Manifest. From manifest, it
981  * will fetch details for preparing Secure partition image context and secure
982  * partition image boot arguments if any.
983  ******************************************************************************/
984 static int sp_manifest_parse(void *sp_manifest, int offset,
985 			     struct secure_partition_desc *sp,
986 			     entry_point_info_t *ep_info)
987 {
988 	int32_t ret, node;
989 	uint32_t config_32;
990 
991 	/*
992 	 * Look for the mandatory fields that are expected to be present in
993 	 * the SP manifests.
994 	 */
995 	node = fdt_path_offset(sp_manifest, "/");
996 	if (node < 0) {
997 		ERROR("Did not find root node.\n");
998 		return node;
999 	}
1000 
1001 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1002 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1003 	if (ret != 0) {
1004 		ERROR("Missing Secure Partition UUID.\n");
1005 		return ret;
1006 	}
1007 
1008 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1009 	if (ret != 0) {
1010 		ERROR("Missing SP Exception Level information.\n");
1011 		return ret;
1012 	}
1013 
1014 	sp->runtime_el = config_32;
1015 
1016 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1017 	if (ret != 0) {
1018 		ERROR("Missing Secure Partition FF-A Version.\n");
1019 		return ret;
1020 	}
1021 
1022 	sp->ffa_version = config_32;
1023 
1024 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1025 	if (ret != 0) {
1026 		ERROR("Missing Secure Partition Execution State.\n");
1027 		return ret;
1028 	}
1029 
1030 	sp->execution_state = config_32;
1031 
1032 	ret = fdt_read_uint32(sp_manifest, node,
1033 			      "messaging-method", &config_32);
1034 	if (ret != 0) {
1035 		ERROR("Missing Secure Partition messaging method.\n");
1036 		return ret;
1037 	}
1038 
1039 	/* Validate this entry, we currently only support direct messaging. */
1040 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1041 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1042 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1043 		     config_32);
1044 		return -EINVAL;
1045 	}
1046 
1047 	sp->properties = config_32;
1048 
1049 	ret = fdt_read_uint32(sp_manifest, node,
1050 			      "execution-ctx-count", &config_32);
1051 
1052 	if (ret != 0) {
1053 		ERROR("Missing SP Execution Context Count.\n");
1054 		return ret;
1055 	}
1056 
1057 	/*
1058 	 * Ensure this field is set correctly in the manifest however
1059 	 * since this is currently a hardcoded value for S-EL1 partitions
1060 	 * we don't need to save it here, just validate.
1061 	 */
1062 	if (config_32 != PLATFORM_CORE_COUNT) {
1063 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1064 			config_32, PLATFORM_CORE_COUNT);
1065 		return -EINVAL;
1066 	}
1067 
1068 	/*
1069 	 * Look for the optional fields that are expected to be present in
1070 	 * an SP manifest.
1071 	 */
1072 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1073 	if (ret != 0) {
1074 		WARN("Missing Secure Partition ID.\n");
1075 	} else {
1076 		if (!is_ffa_secure_id_valid(config_32)) {
1077 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1078 			      config_32);
1079 			return -EINVAL;
1080 		}
1081 		sp->sp_id = config_32;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 /*******************************************************************************
1088  * This function gets the Secure Partition Manifest base and maps the manifest
1089  * region.
1090  * Currently only one Secure Partition manifest is considered which is used to
1091  * prepare the context for the single Secure Partition.
1092  ******************************************************************************/
1093 static int find_and_prepare_sp_context(void)
1094 {
1095 	void *sp_manifest;
1096 	uintptr_t manifest_base;
1097 	uintptr_t manifest_base_align;
1098 	entry_point_info_t *next_image_ep_info;
1099 	int32_t ret;
1100 	struct secure_partition_desc *sp;
1101 
1102 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1103 	if (next_image_ep_info == NULL) {
1104 		WARN("No Secure Partition image provided by BL2.\n");
1105 		return -ENOENT;
1106 	}
1107 
1108 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1109 	if (sp_manifest == NULL) {
1110 		WARN("Secure Partition manifest absent.\n");
1111 		return -ENOENT;
1112 	}
1113 
1114 	manifest_base = (uintptr_t)sp_manifest;
1115 	manifest_base_align = page_align(manifest_base, DOWN);
1116 
1117 	/*
1118 	 * Map the secure partition manifest region in the EL3 translation
1119 	 * regime.
1120 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1121 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1122 	 * not completely accommodate the secure partition manifest region.
1123 	 */
1124 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1125 				      manifest_base_align,
1126 				      PAGE_SIZE * 2,
1127 				      MT_RO_DATA);
1128 	if (ret != 0) {
1129 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1130 		return ret;
1131 	}
1132 
1133 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1134 					    "arm,ffa-manifest-1.0");
1135 	if (ret < 0) {
1136 		ERROR("Error happened in SP manifest reading.\n");
1137 		return -EINVAL;
1138 	}
1139 
1140 	/*
1141 	 * Store the size of the manifest so that it can be used later to pass
1142 	 * the manifest as boot information later.
1143 	 */
1144 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1145 	INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1146 
1147 	/*
1148 	 * Select an SP descriptor for initialising the partition's execution
1149 	 * context on the primary CPU.
1150 	 */
1151 	sp = spmc_get_current_sp_ctx();
1152 
1153 	/* Initialize entry point information for the SP */
1154 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1155 		       SECURE | EP_ST_ENABLE);
1156 
1157 	/* Parse the SP manifest. */
1158 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info);
1159 	if (ret != 0) {
1160 		ERROR("Error in Secure Partition manifest parsing.\n");
1161 		return ret;
1162 	}
1163 
1164 	/* Check that the runtime EL in the manifest was correct. */
1165 	if (sp->runtime_el != S_EL1) {
1166 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1167 		return -EINVAL;
1168 	}
1169 
1170 	/* Perform any common initialisation. */
1171 	spmc_sp_common_setup(sp, next_image_ep_info);
1172 
1173 	/* Perform any initialisation specific to S-EL1 SPs. */
1174 	spmc_el1_sp_setup(sp, next_image_ep_info);
1175 
1176 	/* Initialize the SP context with the required ep info. */
1177 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1178 
1179 	return 0;
1180 }
1181 
1182 /*******************************************************************************
1183  * This function takes an SP context pointer and performs a synchronous entry
1184  * into it.
1185  ******************************************************************************/
1186 static int32_t logical_sp_init(void)
1187 {
1188 	int32_t rc = 0;
1189 	struct el3_lp_desc *el3_lp_descs;
1190 
1191 	/* Perform initial validation of the Logical Partitions. */
1192 	rc = el3_sp_desc_validate();
1193 	if (rc != 0) {
1194 		ERROR("Logical Partition validation failed!\n");
1195 		return rc;
1196 	}
1197 
1198 	el3_lp_descs = get_el3_lp_array();
1199 
1200 	INFO("Logical Secure Partition init start.\n");
1201 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1202 		rc = el3_lp_descs[i].init();
1203 		if (rc != 0) {
1204 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
1205 			      el3_lp_descs[i].sp_id);
1206 			return rc;
1207 		}
1208 		VERBOSE("Logical SP (0x%x) Initialized\n",
1209 			      el3_lp_descs[i].sp_id);
1210 	}
1211 
1212 	INFO("Logical Secure Partition init completed.\n");
1213 
1214 	return rc;
1215 }
1216 
1217 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1218 {
1219 	uint64_t rc;
1220 
1221 	assert(ec != NULL);
1222 
1223 	/* Assign the context of the SP to this CPU */
1224 	cm_set_context(&(ec->cpu_ctx), SECURE);
1225 
1226 	/* Restore the context assigned above */
1227 	cm_el1_sysregs_context_restore(SECURE);
1228 	cm_set_next_eret_context(SECURE);
1229 
1230 	/* Invalidate TLBs at EL1. */
1231 	tlbivmalle1();
1232 	dsbish();
1233 
1234 	/* Enter Secure Partition */
1235 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1236 
1237 	/* Save secure state */
1238 	cm_el1_sysregs_context_save(SECURE);
1239 
1240 	return rc;
1241 }
1242 
1243 /*******************************************************************************
1244  * SPMC Helper Functions.
1245  ******************************************************************************/
1246 static int32_t sp_init(void)
1247 {
1248 	uint64_t rc;
1249 	struct secure_partition_desc *sp;
1250 	struct sp_exec_ctx *ec;
1251 
1252 	sp = spmc_get_current_sp_ctx();
1253 	ec = spmc_get_sp_ec(sp);
1254 	ec->rt_model = RT_MODEL_INIT;
1255 	ec->rt_state = RT_STATE_RUNNING;
1256 
1257 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1258 
1259 	rc = spmc_sp_synchronous_entry(ec);
1260 	if (rc != 0) {
1261 		/* Indicate SP init was not successful. */
1262 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
1263 		      sp->sp_id, rc);
1264 		return 0;
1265 	}
1266 
1267 	ec->rt_state = RT_STATE_WAITING;
1268 	INFO("Secure Partition initialized.\n");
1269 
1270 	return 1;
1271 }
1272 
1273 static void initalize_sp_descs(void)
1274 {
1275 	struct secure_partition_desc *sp;
1276 
1277 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1278 		sp = &sp_desc[i];
1279 		sp->sp_id = INV_SP_ID;
1280 		sp->mailbox.rx_buffer = NULL;
1281 		sp->mailbox.tx_buffer = NULL;
1282 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
1283 		sp->secondary_ep = 0;
1284 	}
1285 }
1286 
1287 static void initalize_ns_ep_descs(void)
1288 {
1289 	struct ns_endpoint_desc *ns_ep;
1290 
1291 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1292 		ns_ep = &ns_ep_desc[i];
1293 		/*
1294 		 * Clashes with the Hypervisor ID but will not be a
1295 		 * problem in practice.
1296 		 */
1297 		ns_ep->ns_ep_id = 0;
1298 		ns_ep->ffa_version = 0;
1299 		ns_ep->mailbox.rx_buffer = NULL;
1300 		ns_ep->mailbox.tx_buffer = NULL;
1301 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1302 	}
1303 }
1304 
1305 /*******************************************************************************
1306  * Initialize SPMC attributes for the SPMD.
1307  ******************************************************************************/
1308 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1309 {
1310 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
1311 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
1312 	spmc_attrs->exec_state = MODE_RW_64;
1313 	spmc_attrs->spmc_id = FFA_SPMC_ID;
1314 }
1315 
1316 /*******************************************************************************
1317  * Initialize contexts of all Secure Partitions.
1318  ******************************************************************************/
1319 int32_t spmc_setup(void)
1320 {
1321 	int32_t ret;
1322 
1323 	/* Initialize endpoint descriptors */
1324 	initalize_sp_descs();
1325 	initalize_ns_ep_descs();
1326 
1327 	/* Setup logical SPs. */
1328 	ret = logical_sp_init();
1329 	if (ret != 0) {
1330 		ERROR("Failed to initialize Logical Partitions.\n");
1331 		return ret;
1332 	}
1333 
1334 	/* Perform physical SP setup. */
1335 
1336 	/* Disable MMU at EL1 (initialized by BL2) */
1337 	disable_mmu_icache_el1();
1338 
1339 	/* Initialize context of the SP */
1340 	INFO("Secure Partition context setup start.\n");
1341 
1342 	ret = find_and_prepare_sp_context();
1343 	if (ret != 0) {
1344 		ERROR("Error in SP finding and context preparation.\n");
1345 		return ret;
1346 	}
1347 
1348 	/* Register init function for deferred init.  */
1349 	bl31_register_bl32_init(&sp_init);
1350 
1351 	INFO("Secure Partition setup done.\n");
1352 
1353 	return 0;
1354 }
1355 
1356 /*******************************************************************************
1357  * Secure Partition Manager SMC handler.
1358  ******************************************************************************/
1359 uint64_t spmc_smc_handler(uint32_t smc_fid,
1360 			  bool secure_origin,
1361 			  uint64_t x1,
1362 			  uint64_t x2,
1363 			  uint64_t x3,
1364 			  uint64_t x4,
1365 			  void *cookie,
1366 			  void *handle,
1367 			  uint64_t flags)
1368 {
1369 	switch (smc_fid) {
1370 
1371 	case FFA_VERSION:
1372 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1373 					   x4, cookie, handle, flags);
1374 
1375 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1376 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1377 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1378 					      x3, x4, cookie, handle, flags);
1379 
1380 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1381 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1382 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1383 					       x3, x4, cookie, handle, flags);
1384 
1385 	case FFA_RXTX_MAP_SMC32:
1386 	case FFA_RXTX_MAP_SMC64:
1387 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1388 					cookie, handle, flags);
1389 
1390 	case FFA_RXTX_UNMAP:
1391 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1392 					  x4, cookie, handle, flags);
1393 
1394 	case FFA_PARTITION_INFO_GET:
1395 		return partition_info_get_handler(smc_fid, secure_origin, x1,
1396 						  x2, x3, x4, cookie, handle,
1397 						  flags);
1398 
1399 	case FFA_MSG_WAIT:
1400 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1401 					cookie, handle, flags);
1402 
1403 	case FFA_ERROR:
1404 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1405 					cookie, handle, flags);
1406 
1407 	default:
1408 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1409 		break;
1410 	}
1411 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1412 }
1413