xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_main.c (revision 6da76075bf4b953d621aa15c379e62a5f785de3f)
1 /*
2  * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch/aarch64/arch_features.h>
15 #include <bl31/bl31.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/spinlock.h>
22 #include <lib/utils.h>
23 #include <plat/common/common_def.h>
24 #include <plat/common/platform.h>
25 #include <platform_def.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include <smccc_helpers.h>
30 #include "spmd_private.h"
31 
32 /*******************************************************************************
33  * SPM Core context information.
34  ******************************************************************************/
35 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
36 
37 /*******************************************************************************
38  * SPM Core attribute information is read from its manifest if the SPMC is not
39  * at EL3. Else, it is populated from the SPMC directly.
40  ******************************************************************************/
41 static spmc_manifest_attribute_t spmc_attrs;
42 
43 /*******************************************************************************
44  * SPM Core entry point information. Discovered on the primary core and reused
45  * on secondary cores.
46  ******************************************************************************/
47 static entry_point_info_t *spmc_ep_info;
48 
49 /*******************************************************************************
50  * SPM Core context on CPU based on mpidr.
51  ******************************************************************************/
52 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
53 {
54 	int core_idx = plat_core_pos_by_mpidr(mpidr);
55 
56 	if (core_idx < 0) {
57 		ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
58 		panic();
59 	}
60 
61 	return &spm_core_context[core_idx];
62 }
63 
64 /*******************************************************************************
65  * SPM Core context on current CPU get helper.
66  ******************************************************************************/
67 spmd_spm_core_context_t *spmd_get_context(void)
68 {
69 	return spmd_get_context_by_mpidr(read_mpidr());
70 }
71 
72 /*******************************************************************************
73  * SPM Core ID getter.
74  ******************************************************************************/
75 uint16_t spmd_spmc_id_get(void)
76 {
77 	return spmc_attrs.spmc_id;
78 }
79 
80 /*******************************************************************************
81  * Static function declaration.
82  ******************************************************************************/
83 static int32_t spmd_init(void);
84 static int spmd_spmc_init(void *pm_addr);
85 static uint64_t spmd_ffa_error_return(void *handle,
86 				       int error_code);
87 static uint64_t spmd_smc_forward(uint32_t smc_fid,
88 				 bool secure_origin,
89 				 uint64_t x1,
90 				 uint64_t x2,
91 				 uint64_t x3,
92 				 uint64_t x4,
93 				 void *handle);
94 
95 /******************************************************************************
96  * Builds an SPMD to SPMC direct message request.
97  *****************************************************************************/
98 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
99 			     unsigned long long message)
100 {
101 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
102 	write_ctx_reg(gpregs, CTX_GPREG_X1,
103 		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
104 		 spmd_spmc_id_get());
105 	write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
106 	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
107 }
108 
109 
110 /*******************************************************************************
111  * This function takes an SPMC context pointer and performs a synchronous
112  * SPMC entry.
113  ******************************************************************************/
114 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
115 {
116 	uint64_t rc;
117 
118 	assert(spmc_ctx != NULL);
119 
120 	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
121 
122 	/* Restore the context assigned above */
123 #if SPMD_SPM_AT_SEL2
124 	cm_el2_sysregs_context_restore(SECURE);
125 #else
126 	cm_el1_sysregs_context_restore(SECURE);
127 #endif
128 	cm_set_next_eret_context(SECURE);
129 
130 	/* Enter SPMC */
131 	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
132 
133 	/* Save secure state */
134 #if SPMD_SPM_AT_SEL2
135 	cm_el2_sysregs_context_save(SECURE);
136 #else
137 	cm_el1_sysregs_context_save(SECURE);
138 #endif
139 
140 	return rc;
141 }
142 
143 /*******************************************************************************
144  * This function returns to the place where spmd_spm_core_sync_entry() was
145  * called originally.
146  ******************************************************************************/
147 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
148 {
149 	spmd_spm_core_context_t *ctx = spmd_get_context();
150 
151 	/* Get current CPU context from SPMC context */
152 	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
153 
154 	/*
155 	 * The SPMD must have initiated the original request through a
156 	 * synchronous entry into SPMC. Jump back to the original C runtime
157 	 * context with the value of rc in x0;
158 	 */
159 	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
160 
161 	panic();
162 }
163 
164 /*******************************************************************************
165  * Jump to the SPM Core for the first time.
166  ******************************************************************************/
167 static int32_t spmd_init(void)
168 {
169 	spmd_spm_core_context_t *ctx = spmd_get_context();
170 	uint64_t rc;
171 
172 	VERBOSE("SPM Core init start.\n");
173 
174 	/* Primary boot core enters the SPMC for initialization. */
175 	ctx->state = SPMC_STATE_ON_PENDING;
176 
177 	rc = spmd_spm_core_sync_entry(ctx);
178 	if (rc != 0ULL) {
179 		ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
180 		return 0;
181 	}
182 
183 	ctx->state = SPMC_STATE_ON;
184 
185 	VERBOSE("SPM Core init end.\n");
186 
187 	return 1;
188 }
189 
190 /*******************************************************************************
191  * spmd_secure_interrupt_handler
192  * Enter the SPMC for further handling of the secure interrupt by the SPMC
193  * itself or a Secure Partition.
194  ******************************************************************************/
195 static uint64_t spmd_secure_interrupt_handler(uint32_t id,
196 					      uint32_t flags,
197 					      void *handle,
198 					      void *cookie)
199 {
200 	spmd_spm_core_context_t *ctx = spmd_get_context();
201 	gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
202 	unsigned int linear_id = plat_my_core_pos();
203 	int64_t rc;
204 
205 	/* Sanity check the security state when the exception was generated */
206 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
207 
208 	/* Sanity check the pointer to this cpu's context */
209 	assert(handle == cm_get_context(NON_SECURE));
210 
211 	/* Save the non-secure context before entering SPMC */
212 	cm_el1_sysregs_context_save(NON_SECURE);
213 #if SPMD_SPM_AT_SEL2
214 	cm_el2_sysregs_context_save(NON_SECURE);
215 #endif
216 
217 	/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
218 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
219 	write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
220 	write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
221 	write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
222 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
223 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
224 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
225 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
226 
227 	/* Mark current core as handling a secure interrupt. */
228 	ctx->secure_interrupt_ongoing = true;
229 
230 	rc = spmd_spm_core_sync_entry(ctx);
231 	if (rc != 0ULL) {
232 		ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
233 	}
234 
235 	ctx->secure_interrupt_ongoing = false;
236 
237 	cm_el1_sysregs_context_restore(NON_SECURE);
238 #if SPMD_SPM_AT_SEL2
239 	cm_el2_sysregs_context_restore(NON_SECURE);
240 #endif
241 	cm_set_next_eret_context(NON_SECURE);
242 
243 	SMC_RET0(&ctx->cpu_ctx);
244 }
245 
246 /*******************************************************************************
247  * Loads SPMC manifest and inits SPMC.
248  ******************************************************************************/
249 static int spmd_spmc_init(void *pm_addr)
250 {
251 	cpu_context_t *cpu_ctx;
252 	unsigned int core_id;
253 	uint32_t ep_attr, flags;
254 	int rc;
255 
256 	/* Load the SPM Core manifest */
257 	rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
258 	if (rc != 0) {
259 		WARN("No or invalid SPM Core manifest image provided by BL2\n");
260 		return rc;
261 	}
262 
263 	/*
264 	 * Ensure that the SPM Core version is compatible with the SPM
265 	 * Dispatcher version.
266 	 */
267 	if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
268 	    (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
269 		WARN("Unsupported FFA version (%u.%u)\n",
270 		     spmc_attrs.major_version, spmc_attrs.minor_version);
271 		return -EINVAL;
272 	}
273 
274 	VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
275 	     spmc_attrs.minor_version);
276 
277 	VERBOSE("SPM Core run time EL%x.\n",
278 	     SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
279 
280 	/* Validate the SPMC ID, Ensure high bit is set */
281 	if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
282 			SPMC_SECURE_ID_MASK) == 0U) {
283 		WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
284 		return -EINVAL;
285 	}
286 
287 	/* Validate the SPM Core execution state */
288 	if ((spmc_attrs.exec_state != MODE_RW_64) &&
289 	    (spmc_attrs.exec_state != MODE_RW_32)) {
290 		WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
291 		     spmc_attrs.exec_state);
292 		return -EINVAL;
293 	}
294 
295 	VERBOSE("%s%x.\n", "SPM Core execution state 0x",
296 		spmc_attrs.exec_state);
297 
298 #if SPMD_SPM_AT_SEL2
299 	/* Ensure manifest has not requested AArch32 state in S-EL2 */
300 	if (spmc_attrs.exec_state == MODE_RW_32) {
301 		WARN("AArch32 state at S-EL2 is not supported.\n");
302 		return -EINVAL;
303 	}
304 
305 	/*
306 	 * Check if S-EL2 is supported on this system if S-EL2
307 	 * is required for SPM
308 	 */
309 	if (!is_armv8_4_sel2_present()) {
310 		WARN("SPM Core run time S-EL2 is not supported.\n");
311 		return -EINVAL;
312 	}
313 #endif /* SPMD_SPM_AT_SEL2 */
314 
315 	/* Initialise an entrypoint to set up the CPU context */
316 	ep_attr = SECURE | EP_ST_ENABLE;
317 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
318 		ep_attr |= EP_EE_BIG;
319 	}
320 
321 	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
322 
323 	/*
324 	 * Populate SPSR for SPM Core based upon validated parameters from the
325 	 * manifest.
326 	 */
327 	if (spmc_attrs.exec_state == MODE_RW_32) {
328 		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
329 						 SPSR_E_LITTLE,
330 						 DAIF_FIQ_BIT |
331 						 DAIF_IRQ_BIT |
332 						 DAIF_ABT_BIT);
333 	} else {
334 
335 #if SPMD_SPM_AT_SEL2
336 		static const uint32_t runtime_el = MODE_EL2;
337 #else
338 		static const uint32_t runtime_el = MODE_EL1;
339 #endif
340 		spmc_ep_info->spsr = SPSR_64(runtime_el,
341 					     MODE_SP_ELX,
342 					     DISABLE_ALL_EXCEPTIONS);
343 	}
344 
345 	/* Set an initial SPMC context state for all cores. */
346 	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
347 		spm_core_context[core_id].state = SPMC_STATE_OFF;
348 
349 		/* Setup an initial cpu context for the SPMC. */
350 		cpu_ctx = &spm_core_context[core_id].cpu_ctx;
351 		cm_setup_context(cpu_ctx, spmc_ep_info);
352 
353 		/*
354 		 * Pass the core linear ID to the SPMC through x4.
355 		 * (TF-A implementation defined behavior helping
356 		 * a legacy TOS migration to adopt FF-A).
357 		 */
358 		write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
359 	}
360 
361 	/* Register power management hooks with PSCI */
362 	psci_register_spd_pm_hook(&spmd_pm);
363 
364 	/* Register init function for deferred init. */
365 	bl31_register_bl32_init(&spmd_init);
366 
367 	INFO("SPM Core setup done.\n");
368 
369 	/*
370 	 * Register an interrupt handler routing secure interrupts to SPMD
371 	 * while the NWd is running.
372 	 */
373 	flags = 0;
374 	set_interrupt_rm_flag(flags, NON_SECURE);
375 	rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
376 					     spmd_secure_interrupt_handler,
377 					     flags);
378 	if (rc != 0) {
379 		panic();
380 	}
381 
382 	return 0;
383 }
384 
385 /*******************************************************************************
386  * Initialize context of SPM Core.
387  ******************************************************************************/
388 int spmd_setup(void)
389 {
390 	int rc;
391 	void *spmc_manifest;
392 
393 	/*
394 	 * If the SPMC is at EL3, then just initialise it directly. The
395 	 * shenanigans of when it is at a lower EL are not needed.
396 	 */
397 	if (is_spmc_at_el3()) {
398 		/* Allow the SPMC to populate its attributes directly. */
399 		spmc_populate_attrs(&spmc_attrs);
400 
401 		rc = spmc_setup();
402 		if (rc != 0) {
403 			ERROR("SPMC initialisation failed 0x%x.\n", rc);
404 		}
405 		return rc;
406 	}
407 
408 	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
409 	if (spmc_ep_info == NULL) {
410 		WARN("No SPM Core image provided by BL2 boot loader.\n");
411 		return -EINVAL;
412 	}
413 
414 	/* Under no circumstances will this parameter be 0 */
415 	assert(spmc_ep_info->pc != 0ULL);
416 
417 	/*
418 	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
419 	 * be used as a manifest for the SPM Core at the next lower EL/mode.
420 	 */
421 	spmc_manifest = (void *)spmc_ep_info->args.arg0;
422 	if (spmc_manifest == NULL) {
423 		ERROR("Invalid or absent SPM Core manifest.\n");
424 		return -EINVAL;
425 	}
426 
427 	/* Load manifest, init SPMC */
428 	rc = spmd_spmc_init(spmc_manifest);
429 	if (rc != 0) {
430 		WARN("Booting device without SPM initialization.\n");
431 	}
432 
433 	return rc;
434 }
435 
436 /*******************************************************************************
437  * Forward SMC to the other security state
438  ******************************************************************************/
439 static uint64_t spmd_smc_forward(uint32_t smc_fid,
440 				 bool secure_origin,
441 				 uint64_t x1,
442 				 uint64_t x2,
443 				 uint64_t x3,
444 				 uint64_t x4,
445 				 void *handle)
446 {
447 	unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
448 	unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
449 
450 	/* Save incoming security state */
451 #if SPMD_SPM_AT_SEL2
452 	if (secure_state_in == NON_SECURE) {
453 		cm_el1_sysregs_context_save(secure_state_in);
454 	}
455 	cm_el2_sysregs_context_save(secure_state_in);
456 #else
457 	cm_el1_sysregs_context_save(secure_state_in);
458 #endif
459 
460 	/* Restore outgoing security state */
461 #if SPMD_SPM_AT_SEL2
462 	if (secure_state_out == NON_SECURE) {
463 		cm_el1_sysregs_context_restore(secure_state_out);
464 	}
465 	cm_el2_sysregs_context_restore(secure_state_out);
466 #else
467 	cm_el1_sysregs_context_restore(secure_state_out);
468 #endif
469 	cm_set_next_eret_context(secure_state_out);
470 
471 	SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
472 			SMC_GET_GP(handle, CTX_GPREG_X5),
473 			SMC_GET_GP(handle, CTX_GPREG_X6),
474 			SMC_GET_GP(handle, CTX_GPREG_X7));
475 }
476 
477 /*******************************************************************************
478  * Return FFA_ERROR with specified error code
479  ******************************************************************************/
480 static uint64_t spmd_ffa_error_return(void *handle, int error_code)
481 {
482 	SMC_RET8(handle, (uint32_t) FFA_ERROR,
483 		 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
484 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
485 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
486 }
487 
488 /*******************************************************************************
489  * spmd_check_address_in_binary_image
490  ******************************************************************************/
491 bool spmd_check_address_in_binary_image(uint64_t address)
492 {
493 	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
494 
495 	return ((address >= spmc_attrs.load_address) &&
496 		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
497 }
498 
499 /******************************************************************************
500  * spmd_is_spmc_message
501  *****************************************************************************/
502 static bool spmd_is_spmc_message(unsigned int ep)
503 {
504 	return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
505 		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
506 }
507 
508 /******************************************************************************
509  * spmd_handle_spmc_message
510  *****************************************************************************/
511 static int spmd_handle_spmc_message(unsigned long long msg,
512 		unsigned long long parm1, unsigned long long parm2,
513 		unsigned long long parm3, unsigned long long parm4)
514 {
515 	VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
516 		msg, parm1, parm2, parm3, parm4);
517 
518 	return -EINVAL;
519 }
520 
521 /*******************************************************************************
522  * This function handles all SMCs in the range reserved for FFA. Each call is
523  * either forwarded to the other security state or handled by the SPM dispatcher
524  ******************************************************************************/
525 uint64_t spmd_smc_handler(uint32_t smc_fid,
526 			  uint64_t x1,
527 			  uint64_t x2,
528 			  uint64_t x3,
529 			  uint64_t x4,
530 			  void *cookie,
531 			  void *handle,
532 			  uint64_t flags)
533 {
534 	unsigned int linear_id = plat_my_core_pos();
535 	spmd_spm_core_context_t *ctx = spmd_get_context();
536 	bool secure_origin;
537 	int32_t ret;
538 	uint32_t input_version;
539 
540 	/* Determine which security state this SMC originated from */
541 	secure_origin = is_caller_secure(flags);
542 
543 	VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
544 		" 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
545 		    linear_id, smc_fid, x1, x2, x3, x4,
546 		    SMC_GET_GP(handle, CTX_GPREG_X5),
547 		    SMC_GET_GP(handle, CTX_GPREG_X6),
548 		    SMC_GET_GP(handle, CTX_GPREG_X7));
549 
550 	switch (smc_fid) {
551 	case FFA_ERROR:
552 		/*
553 		 * Check if this is the first invocation of this interface on
554 		 * this CPU. If so, then indicate that the SPM Core initialised
555 		 * unsuccessfully.
556 		 */
557 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
558 			spmd_spm_core_sync_exit(x2);
559 		}
560 
561 		return spmd_smc_forward(smc_fid, secure_origin,
562 					x1, x2, x3, x4, handle);
563 		break; /* not reached */
564 
565 	case FFA_VERSION:
566 		input_version = (uint32_t)(0xFFFFFFFF & x1);
567 		/*
568 		 * If caller is secure and SPMC was initialized,
569 		 * return FFA_VERSION of SPMD.
570 		 * If caller is non secure and SPMC was initialized,
571 		 * return SPMC's version.
572 		 * Sanity check to "input_version".
573 		 */
574 		if ((input_version & FFA_VERSION_BIT31_MASK) ||
575 			(ctx->state == SPMC_STATE_RESET)) {
576 			ret = FFA_ERROR_NOT_SUPPORTED;
577 		} else if (!secure_origin) {
578 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
579 			uint64_t rc;
580 
581 			if (spmc_attrs.major_version == 1 &&
582 			    spmc_attrs.minor_version == 0) {
583 				ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
584 						       spmc_attrs.minor_version);
585 				SMC_RET8(handle, (uint32_t)ret,
586 					 FFA_TARGET_INFO_MBZ,
587 					 FFA_TARGET_INFO_MBZ,
588 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
589 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
590 					 FFA_PARAM_MBZ);
591 				break;
592 			}
593 			/* Save non-secure system registers context */
594 			cm_el1_sysregs_context_save(NON_SECURE);
595 #if SPMD_SPM_AT_SEL2
596 			cm_el2_sysregs_context_save(NON_SECURE);
597 #endif
598 
599 			/*
600 			 * The incoming request has FFA_VERSION as X0 smc_fid
601 			 * and requested version in x1. Prepare a direct request
602 			 * from SPMD to SPMC with FFA_VERSION framework function
603 			 * identifier in X2 and requested version in X3.
604 			 */
605 			spmd_build_spmc_message(gpregs,
606 						SPMD_FWK_MSG_FFA_VERSION_REQ,
607 						input_version);
608 
609 			rc = spmd_spm_core_sync_entry(ctx);
610 
611 			if ((rc != 0ULL) ||
612 			    (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
613 				FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
614 			    (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
615 				(SPMD_FWK_MSG_BIT |
616 				 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
617 				ERROR("Failed to forward FFA_VERSION\n");
618 				ret = FFA_ERROR_NOT_SUPPORTED;
619 			} else {
620 				ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
621 			}
622 
623 			/*
624 			 * Return here after SPMC has handled FFA_VERSION.
625 			 * The returned SPMC version is held in X3.
626 			 * Forward this version in X0 to the non-secure caller.
627 			 */
628 			return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
629 						FFA_PARAM_MBZ, FFA_PARAM_MBZ,
630 						FFA_PARAM_MBZ, gpregs);
631 		} else {
632 			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
633 					       FFA_VERSION_MINOR);
634 		}
635 
636 		SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
637 			 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
638 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
639 		break; /* not reached */
640 
641 	case FFA_FEATURES:
642 		/*
643 		 * This is an optional interface. Do the minimal checks and
644 		 * forward to SPM Core which will handle it if implemented.
645 		 */
646 
647 		/* Forward SMC from Normal world to the SPM Core */
648 		if (!secure_origin) {
649 			return spmd_smc_forward(smc_fid, secure_origin,
650 						x1, x2, x3, x4, handle);
651 		}
652 
653 		/*
654 		 * Return success if call was from secure world i.e. all
655 		 * FFA functions are supported. This is essentially a
656 		 * nop.
657 		 */
658 		SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
659 			 SMC_GET_GP(handle, CTX_GPREG_X5),
660 			 SMC_GET_GP(handle, CTX_GPREG_X6),
661 			 SMC_GET_GP(handle, CTX_GPREG_X7));
662 
663 		break; /* not reached */
664 
665 	case FFA_ID_GET:
666 		/*
667 		 * Returns the ID of the calling FFA component.
668 		 */
669 		if (!secure_origin) {
670 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
671 				 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
672 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
673 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
674 				 FFA_PARAM_MBZ);
675 		}
676 
677 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
678 			 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
679 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
680 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
681 			 FFA_PARAM_MBZ);
682 
683 		break; /* not reached */
684 
685 	case FFA_SECONDARY_EP_REGISTER_SMC64:
686 		if (secure_origin) {
687 			ret = spmd_pm_secondary_ep_register(x1);
688 
689 			if (ret < 0) {
690 				SMC_RET8(handle, FFA_ERROR_SMC64,
691 					FFA_TARGET_INFO_MBZ, ret,
692 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
693 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
694 					FFA_PARAM_MBZ);
695 			} else {
696 				SMC_RET8(handle, FFA_SUCCESS_SMC64,
697 					FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
698 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
699 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
700 					FFA_PARAM_MBZ);
701 			}
702 		}
703 
704 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
705 		break; /* Not reached */
706 
707 	case FFA_SPM_ID_GET:
708 		if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
709 			return spmd_ffa_error_return(handle,
710 						     FFA_ERROR_NOT_SUPPORTED);
711 		}
712 		/*
713 		 * Returns the ID of the SPMC or SPMD depending on the FF-A
714 		 * instance where this function is invoked
715 		 */
716 		if (!secure_origin) {
717 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
718 				 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
719 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
720 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
721 				 FFA_PARAM_MBZ);
722 		}
723 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
724 			 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
725 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
726 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
727 			 FFA_PARAM_MBZ);
728 
729 		break; /* not reached */
730 
731 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
732 		if (secure_origin && spmd_is_spmc_message(x1)) {
733 			ret = spmd_handle_spmc_message(x3, x4,
734 				SMC_GET_GP(handle, CTX_GPREG_X5),
735 				SMC_GET_GP(handle, CTX_GPREG_X6),
736 				SMC_GET_GP(handle, CTX_GPREG_X7));
737 
738 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
739 				FFA_TARGET_INFO_MBZ, ret,
740 				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
741 				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
742 				FFA_PARAM_MBZ);
743 		} else {
744 			/* Forward direct message to the other world */
745 			return spmd_smc_forward(smc_fid, secure_origin,
746 				x1, x2, x3, x4, handle);
747 		}
748 		break; /* Not reached */
749 
750 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
751 		if (secure_origin && spmd_is_spmc_message(x1)) {
752 			spmd_spm_core_sync_exit(0ULL);
753 		} else {
754 			/* Forward direct message to the other world */
755 			return spmd_smc_forward(smc_fid, secure_origin,
756 				x1, x2, x3, x4, handle);
757 		}
758 		break; /* Not reached */
759 
760 	case FFA_RX_RELEASE:
761 	case FFA_RXTX_MAP_SMC32:
762 	case FFA_RXTX_MAP_SMC64:
763 	case FFA_RXTX_UNMAP:
764 	case FFA_PARTITION_INFO_GET:
765 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
766 	case FFA_NOTIFICATION_BITMAP_CREATE:
767 	case FFA_NOTIFICATION_BITMAP_DESTROY:
768 	case FFA_NOTIFICATION_BIND:
769 	case FFA_NOTIFICATION_UNBIND:
770 	case FFA_NOTIFICATION_SET:
771 	case FFA_NOTIFICATION_GET:
772 	case FFA_NOTIFICATION_INFO_GET:
773 	case FFA_NOTIFICATION_INFO_GET_SMC64:
774 	case FFA_MSG_SEND2:
775 #endif
776 	case FFA_MSG_RUN:
777 		/*
778 		 * Above calls should be invoked only by the Normal world and
779 		 * must not be forwarded from Secure world to Normal world.
780 		 */
781 		if (secure_origin) {
782 			return spmd_ffa_error_return(handle,
783 						     FFA_ERROR_NOT_SUPPORTED);
784 		}
785 
786 		/* Fall through to forward the call to the other world */
787 	case FFA_MSG_SEND:
788 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
789 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
790 	case FFA_MEM_DONATE_SMC32:
791 	case FFA_MEM_DONATE_SMC64:
792 	case FFA_MEM_LEND_SMC32:
793 	case FFA_MEM_LEND_SMC64:
794 	case FFA_MEM_SHARE_SMC32:
795 	case FFA_MEM_SHARE_SMC64:
796 	case FFA_MEM_RETRIEVE_REQ_SMC32:
797 	case FFA_MEM_RETRIEVE_REQ_SMC64:
798 	case FFA_MEM_RETRIEVE_RESP:
799 	case FFA_MEM_RELINQUISH:
800 	case FFA_MEM_RECLAIM:
801 	case FFA_SUCCESS_SMC32:
802 	case FFA_SUCCESS_SMC64:
803 		/*
804 		 * TODO: Assume that no requests originate from EL3 at the
805 		 * moment. This will change if a SP service is required in
806 		 * response to secure interrupts targeted to EL3. Until then
807 		 * simply forward the call to the Normal world.
808 		 */
809 
810 		return spmd_smc_forward(smc_fid, secure_origin,
811 					x1, x2, x3, x4, handle);
812 		break; /* not reached */
813 
814 	case FFA_MSG_WAIT:
815 		/*
816 		 * Check if this is the first invocation of this interface on
817 		 * this CPU from the Secure world. If so, then indicate that the
818 		 * SPM Core initialised successfully.
819 		 */
820 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
821 			spmd_spm_core_sync_exit(0ULL);
822 		}
823 
824 		/* Fall through to forward the call to the other world */
825 	case FFA_INTERRUPT:
826 	case FFA_MSG_YIELD:
827 		/* This interface must be invoked only by the Secure world */
828 		if (!secure_origin) {
829 			return spmd_ffa_error_return(handle,
830 						      FFA_ERROR_NOT_SUPPORTED);
831 		}
832 
833 		return spmd_smc_forward(smc_fid, secure_origin,
834 					x1, x2, x3, x4, handle);
835 		break; /* not reached */
836 
837 	case FFA_NORMAL_WORLD_RESUME:
838 		if (secure_origin && ctx->secure_interrupt_ongoing) {
839 			spmd_spm_core_sync_exit(0ULL);
840 		} else {
841 			return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
842 		}
843 		break; /* Not reached */
844 
845 	default:
846 		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
847 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
848 	}
849 }
850