xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_main.c (revision f2de48cb143c20ccd7a9c141df3d34cae74049de)
1 /*
2  * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch/aarch64/arch_features.h>
15 #include <bl31/bl31.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/spinlock.h>
22 #include <lib/utils.h>
23 #include <plat/common/common_def.h>
24 #include <plat/common/platform.h>
25 #include <platform_def.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmd_svc.h>
28 #include <smccc_helpers.h>
29 #include "spmd_private.h"
30 
31 /*******************************************************************************
32  * SPM Core context information.
33  ******************************************************************************/
34 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
35 
36 /*******************************************************************************
37  * SPM Core attribute information read from its manifest.
38  ******************************************************************************/
39 static spmc_manifest_attribute_t spmc_attrs;
40 
41 /*******************************************************************************
42  * SPM Core entry point information. Discovered on the primary core and reused
43  * on secondary cores.
44  ******************************************************************************/
45 static entry_point_info_t *spmc_ep_info;
46 
47 /*******************************************************************************
48  * SPM Core context on CPU based on mpidr.
49  ******************************************************************************/
50 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
51 {
52 	int core_idx = plat_core_pos_by_mpidr(mpidr);
53 
54 	if (core_idx < 0) {
55 		ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx);
56 		panic();
57 	}
58 
59 	return &spm_core_context[core_idx];
60 }
61 
62 /*******************************************************************************
63  * SPM Core context on current CPU get helper.
64  ******************************************************************************/
65 spmd_spm_core_context_t *spmd_get_context(void)
66 {
67 	return spmd_get_context_by_mpidr(read_mpidr());
68 }
69 
70 /*******************************************************************************
71  * SPM Core ID getter.
72  ******************************************************************************/
73 uint16_t spmd_spmc_id_get(void)
74 {
75 	return spmc_attrs.spmc_id;
76 }
77 
78 /*******************************************************************************
79  * Static function declaration.
80  ******************************************************************************/
81 static int32_t spmd_init(void);
82 static int spmd_spmc_init(void *pm_addr);
83 static uint64_t spmd_ffa_error_return(void *handle,
84 				       int error_code);
85 static uint64_t spmd_smc_forward(uint32_t smc_fid,
86 				 bool secure_origin,
87 				 uint64_t x1,
88 				 uint64_t x2,
89 				 uint64_t x3,
90 				 uint64_t x4,
91 				 void *handle);
92 
93 /******************************************************************************
94  * Builds an SPMD to SPMC direct message request.
95  *****************************************************************************/
96 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
97 			     unsigned long long message)
98 {
99 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
100 	write_ctx_reg(gpregs, CTX_GPREG_X1,
101 		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
102 		 spmd_spmc_id_get());
103 	write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
104 	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
105 }
106 
107 
108 /*******************************************************************************
109  * This function takes an SPMC context pointer and performs a synchronous
110  * SPMC entry.
111  ******************************************************************************/
112 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
113 {
114 	uint64_t rc;
115 
116 	assert(spmc_ctx != NULL);
117 
118 	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
119 
120 	/* Restore the context assigned above */
121 #if SPMD_SPM_AT_SEL2
122 	cm_el2_sysregs_context_restore(SECURE);
123 #else
124 	cm_el1_sysregs_context_restore(SECURE);
125 #endif
126 	cm_set_next_eret_context(SECURE);
127 
128 	/* Enter SPMC */
129 	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
130 
131 	/* Save secure state */
132 #if SPMD_SPM_AT_SEL2
133 	cm_el2_sysregs_context_save(SECURE);
134 #else
135 	cm_el1_sysregs_context_save(SECURE);
136 #endif
137 
138 	return rc;
139 }
140 
141 /*******************************************************************************
142  * This function returns to the place where spmd_spm_core_sync_entry() was
143  * called originally.
144  ******************************************************************************/
145 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
146 {
147 	spmd_spm_core_context_t *ctx = spmd_get_context();
148 
149 	/* Get current CPU context from SPMC context */
150 	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
151 
152 	/*
153 	 * The SPMD must have initiated the original request through a
154 	 * synchronous entry into SPMC. Jump back to the original C runtime
155 	 * context with the value of rc in x0;
156 	 */
157 	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
158 
159 	panic();
160 }
161 
162 /*******************************************************************************
163  * Jump to the SPM Core for the first time.
164  ******************************************************************************/
165 static int32_t spmd_init(void)
166 {
167 	spmd_spm_core_context_t *ctx = spmd_get_context();
168 	uint64_t rc;
169 
170 	VERBOSE("SPM Core init start.\n");
171 
172 	/* Primary boot core enters the SPMC for initialization. */
173 	ctx->state = SPMC_STATE_ON_PENDING;
174 
175 	rc = spmd_spm_core_sync_entry(ctx);
176 	if (rc != 0ULL) {
177 		ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
178 		return 0;
179 	}
180 
181 	ctx->state = SPMC_STATE_ON;
182 
183 	VERBOSE("SPM Core init end.\n");
184 
185 	return 1;
186 }
187 
188 /*******************************************************************************
189  * spmd_secure_interrupt_handler
190  * Enter the SPMC for further handling of the secure interrupt by the SPMC
191  * itself or a Secure Partition.
192  ******************************************************************************/
193 static uint64_t spmd_secure_interrupt_handler(uint32_t id,
194 					      uint32_t flags,
195 					      void *handle,
196 					      void *cookie)
197 {
198 	spmd_spm_core_context_t *ctx = spmd_get_context();
199 	gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
200 	unsigned int linear_id = plat_my_core_pos();
201 	int64_t rc;
202 
203 	/* Sanity check the security state when the exception was generated */
204 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
205 
206 	/* Sanity check the pointer to this cpu's context */
207 	assert(handle == cm_get_context(NON_SECURE));
208 
209 	/* Save the non-secure context before entering SPMC */
210 	cm_el1_sysregs_context_save(NON_SECURE);
211 #if SPMD_SPM_AT_SEL2
212 	cm_el2_sysregs_context_save(NON_SECURE);
213 #endif
214 
215 	/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
216 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
217 	write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
218 	write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
219 	write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
220 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
221 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
222 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
223 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
224 
225 	/* Mark current core as handling a secure interrupt. */
226 	ctx->secure_interrupt_ongoing = true;
227 
228 	rc = spmd_spm_core_sync_entry(ctx);
229 	if (rc != 0ULL) {
230 		ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id);
231 	}
232 
233 	ctx->secure_interrupt_ongoing = false;
234 
235 	cm_el1_sysregs_context_restore(NON_SECURE);
236 #if SPMD_SPM_AT_SEL2
237 	cm_el2_sysregs_context_restore(NON_SECURE);
238 #endif
239 	cm_set_next_eret_context(NON_SECURE);
240 
241 	SMC_RET0(&ctx->cpu_ctx);
242 }
243 
244 /*******************************************************************************
245  * Loads SPMC manifest and inits SPMC.
246  ******************************************************************************/
247 static int spmd_spmc_init(void *pm_addr)
248 {
249 	cpu_context_t *cpu_ctx;
250 	unsigned int core_id;
251 	uint32_t ep_attr, flags;
252 	int rc;
253 
254 	/* Load the SPM Core manifest */
255 	rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
256 	if (rc != 0) {
257 		WARN("No or invalid SPM Core manifest image provided by BL2\n");
258 		return rc;
259 	}
260 
261 	/*
262 	 * Ensure that the SPM Core version is compatible with the SPM
263 	 * Dispatcher version.
264 	 */
265 	if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
266 	    (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
267 		WARN("Unsupported FFA version (%u.%u)\n",
268 		     spmc_attrs.major_version, spmc_attrs.minor_version);
269 		return -EINVAL;
270 	}
271 
272 	VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
273 	     spmc_attrs.minor_version);
274 
275 	VERBOSE("SPM Core run time EL%x.\n",
276 	     SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
277 
278 	/* Validate the SPMC ID, Ensure high bit is set */
279 	if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
280 			SPMC_SECURE_ID_MASK) == 0U) {
281 		WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
282 		return -EINVAL;
283 	}
284 
285 	/* Validate the SPM Core execution state */
286 	if ((spmc_attrs.exec_state != MODE_RW_64) &&
287 	    (spmc_attrs.exec_state != MODE_RW_32)) {
288 		WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
289 		     spmc_attrs.exec_state);
290 		return -EINVAL;
291 	}
292 
293 	VERBOSE("%s%x.\n", "SPM Core execution state 0x",
294 		spmc_attrs.exec_state);
295 
296 #if SPMD_SPM_AT_SEL2
297 	/* Ensure manifest has not requested AArch32 state in S-EL2 */
298 	if (spmc_attrs.exec_state == MODE_RW_32) {
299 		WARN("AArch32 state at S-EL2 is not supported.\n");
300 		return -EINVAL;
301 	}
302 
303 	/*
304 	 * Check if S-EL2 is supported on this system if S-EL2
305 	 * is required for SPM
306 	 */
307 	if (!is_armv8_4_sel2_present()) {
308 		WARN("SPM Core run time S-EL2 is not supported.\n");
309 		return -EINVAL;
310 	}
311 #endif /* SPMD_SPM_AT_SEL2 */
312 
313 	/* Initialise an entrypoint to set up the CPU context */
314 	ep_attr = SECURE | EP_ST_ENABLE;
315 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
316 		ep_attr |= EP_EE_BIG;
317 	}
318 
319 	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
320 
321 	/*
322 	 * Populate SPSR for SPM Core based upon validated parameters from the
323 	 * manifest.
324 	 */
325 	if (spmc_attrs.exec_state == MODE_RW_32) {
326 		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
327 						 SPSR_E_LITTLE,
328 						 DAIF_FIQ_BIT |
329 						 DAIF_IRQ_BIT |
330 						 DAIF_ABT_BIT);
331 	} else {
332 
333 #if SPMD_SPM_AT_SEL2
334 		static const uint32_t runtime_el = MODE_EL2;
335 #else
336 		static const uint32_t runtime_el = MODE_EL1;
337 #endif
338 		spmc_ep_info->spsr = SPSR_64(runtime_el,
339 					     MODE_SP_ELX,
340 					     DISABLE_ALL_EXCEPTIONS);
341 	}
342 
343 	/* Set an initial SPMC context state for all cores. */
344 	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
345 		spm_core_context[core_id].state = SPMC_STATE_OFF;
346 
347 		/* Setup an initial cpu context for the SPMC. */
348 		cpu_ctx = &spm_core_context[core_id].cpu_ctx;
349 		cm_setup_context(cpu_ctx, spmc_ep_info);
350 
351 		/*
352 		 * Pass the core linear ID to the SPMC through x4.
353 		 * (TF-A implementation defined behavior helping
354 		 * a legacy TOS migration to adopt FF-A).
355 		 */
356 		write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
357 	}
358 
359 	/* Register power management hooks with PSCI */
360 	psci_register_spd_pm_hook(&spmd_pm);
361 
362 	/* Register init function for deferred init. */
363 	bl31_register_bl32_init(&spmd_init);
364 
365 	INFO("SPM Core setup done.\n");
366 
367 	/*
368 	 * Register an interrupt handler routing secure interrupts to SPMD
369 	 * while the NWd is running.
370 	 */
371 	flags = 0;
372 	set_interrupt_rm_flag(flags, NON_SECURE);
373 	rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
374 					     spmd_secure_interrupt_handler,
375 					     flags);
376 	if (rc != 0) {
377 		panic();
378 	}
379 
380 	return 0;
381 }
382 
383 /*******************************************************************************
384  * Initialize context of SPM Core.
385  ******************************************************************************/
386 int spmd_setup(void)
387 {
388 	void *spmc_manifest;
389 	int rc;
390 
391 	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
392 	if (spmc_ep_info == NULL) {
393 		WARN("No SPM Core image provided by BL2 boot loader.\n");
394 		return -EINVAL;
395 	}
396 
397 	/* Under no circumstances will this parameter be 0 */
398 	assert(spmc_ep_info->pc != 0ULL);
399 
400 	/*
401 	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
402 	 * be used as a manifest for the SPM Core at the next lower EL/mode.
403 	 */
404 	spmc_manifest = (void *)spmc_ep_info->args.arg0;
405 	if (spmc_manifest == NULL) {
406 		ERROR("Invalid or absent SPM Core manifest.\n");
407 		return -EINVAL;
408 	}
409 
410 	/* Load manifest, init SPMC */
411 	rc = spmd_spmc_init(spmc_manifest);
412 	if (rc != 0) {
413 		WARN("Booting device without SPM initialization.\n");
414 	}
415 
416 	return rc;
417 }
418 
419 /*******************************************************************************
420  * Forward SMC to the other security state
421  ******************************************************************************/
422 static uint64_t spmd_smc_forward(uint32_t smc_fid,
423 				 bool secure_origin,
424 				 uint64_t x1,
425 				 uint64_t x2,
426 				 uint64_t x3,
427 				 uint64_t x4,
428 				 void *handle)
429 {
430 	unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
431 	unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
432 
433 	/* Save incoming security state */
434 #if SPMD_SPM_AT_SEL2
435 	if (secure_state_in == NON_SECURE) {
436 		cm_el1_sysregs_context_save(secure_state_in);
437 	}
438 	cm_el2_sysregs_context_save(secure_state_in);
439 #else
440 	cm_el1_sysregs_context_save(secure_state_in);
441 #endif
442 
443 	/* Restore outgoing security state */
444 #if SPMD_SPM_AT_SEL2
445 	if (secure_state_out == NON_SECURE) {
446 		cm_el1_sysregs_context_restore(secure_state_out);
447 	}
448 	cm_el2_sysregs_context_restore(secure_state_out);
449 #else
450 	cm_el1_sysregs_context_restore(secure_state_out);
451 #endif
452 	cm_set_next_eret_context(secure_state_out);
453 
454 	SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
455 			SMC_GET_GP(handle, CTX_GPREG_X5),
456 			SMC_GET_GP(handle, CTX_GPREG_X6),
457 			SMC_GET_GP(handle, CTX_GPREG_X7));
458 }
459 
460 /*******************************************************************************
461  * Return FFA_ERROR with specified error code
462  ******************************************************************************/
463 static uint64_t spmd_ffa_error_return(void *handle, int error_code)
464 {
465 	SMC_RET8(handle, (uint32_t) FFA_ERROR,
466 		 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
467 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
468 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
469 }
470 
471 /*******************************************************************************
472  * spmd_check_address_in_binary_image
473  ******************************************************************************/
474 bool spmd_check_address_in_binary_image(uint64_t address)
475 {
476 	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
477 
478 	return ((address >= spmc_attrs.load_address) &&
479 		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
480 }
481 
482 /******************************************************************************
483  * spmd_is_spmc_message
484  *****************************************************************************/
485 static bool spmd_is_spmc_message(unsigned int ep)
486 {
487 	return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
488 		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
489 }
490 
491 /******************************************************************************
492  * spmd_handle_spmc_message
493  *****************************************************************************/
494 static int spmd_handle_spmc_message(unsigned long long msg,
495 		unsigned long long parm1, unsigned long long parm2,
496 		unsigned long long parm3, unsigned long long parm4)
497 {
498 	VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
499 		msg, parm1, parm2, parm3, parm4);
500 
501 	return -EINVAL;
502 }
503 
504 /*******************************************************************************
505  * This function handles all SMCs in the range reserved for FFA. Each call is
506  * either forwarded to the other security state or handled by the SPM dispatcher
507  ******************************************************************************/
508 uint64_t spmd_smc_handler(uint32_t smc_fid,
509 			  uint64_t x1,
510 			  uint64_t x2,
511 			  uint64_t x3,
512 			  uint64_t x4,
513 			  void *cookie,
514 			  void *handle,
515 			  uint64_t flags)
516 {
517 	unsigned int linear_id = plat_my_core_pos();
518 	spmd_spm_core_context_t *ctx = spmd_get_context();
519 	bool secure_origin;
520 	int32_t ret;
521 	uint32_t input_version;
522 
523 	/* Determine which security state this SMC originated from */
524 	secure_origin = is_caller_secure(flags);
525 
526 	VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
527 		" 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
528 		    linear_id, smc_fid, x1, x2, x3, x4,
529 		    SMC_GET_GP(handle, CTX_GPREG_X5),
530 		    SMC_GET_GP(handle, CTX_GPREG_X6),
531 		    SMC_GET_GP(handle, CTX_GPREG_X7));
532 
533 	switch (smc_fid) {
534 	case FFA_ERROR:
535 		/*
536 		 * Check if this is the first invocation of this interface on
537 		 * this CPU. If so, then indicate that the SPM Core initialised
538 		 * unsuccessfully.
539 		 */
540 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
541 			spmd_spm_core_sync_exit(x2);
542 		}
543 
544 		return spmd_smc_forward(smc_fid, secure_origin,
545 					x1, x2, x3, x4, handle);
546 		break; /* not reached */
547 
548 	case FFA_VERSION:
549 		input_version = (uint32_t)(0xFFFFFFFF & x1);
550 		/*
551 		 * If caller is secure and SPMC was initialized,
552 		 * return FFA_VERSION of SPMD.
553 		 * If caller is non secure and SPMC was initialized,
554 		 * return SPMC's version.
555 		 * Sanity check to "input_version".
556 		 */
557 		if ((input_version & FFA_VERSION_BIT31_MASK) ||
558 			(ctx->state == SPMC_STATE_RESET)) {
559 			ret = FFA_ERROR_NOT_SUPPORTED;
560 		} else if (!secure_origin) {
561 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
562 			uint64_t rc;
563 
564 			if (spmc_attrs.major_version == 1 &&
565 			    spmc_attrs.minor_version == 0) {
566 				ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
567 						       spmc_attrs.minor_version);
568 				SMC_RET8(handle, (uint32_t)ret,
569 					 FFA_TARGET_INFO_MBZ,
570 					 FFA_TARGET_INFO_MBZ,
571 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
572 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
573 					 FFA_PARAM_MBZ);
574 				break;
575 			}
576 			/* Save non-secure system registers context */
577 			cm_el1_sysregs_context_save(NON_SECURE);
578 #if SPMD_SPM_AT_SEL2
579 			cm_el2_sysregs_context_save(NON_SECURE);
580 #endif
581 
582 			/*
583 			 * The incoming request has FFA_VERSION as X0 smc_fid
584 			 * and requested version in x1. Prepare a direct request
585 			 * from SPMD to SPMC with FFA_VERSION framework function
586 			 * identifier in X2 and requested version in X3.
587 			 */
588 			spmd_build_spmc_message(gpregs,
589 						SPMD_FWK_MSG_FFA_VERSION_REQ,
590 						input_version);
591 
592 			rc = spmd_spm_core_sync_entry(ctx);
593 
594 			if ((rc != 0ULL) ||
595 			    (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
596 				FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
597 			    (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
598 				(SPMD_FWK_MSG_BIT |
599 				 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
600 				ERROR("Failed to forward FFA_VERSION\n");
601 				ret = FFA_ERROR_NOT_SUPPORTED;
602 			} else {
603 				ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
604 			}
605 
606 			/*
607 			 * Return here after SPMC has handled FFA_VERSION.
608 			 * The returned SPMC version is held in X3.
609 			 * Forward this version in X0 to the non-secure caller.
610 			 */
611 			return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
612 						FFA_PARAM_MBZ, FFA_PARAM_MBZ,
613 						FFA_PARAM_MBZ, gpregs);
614 		} else {
615 			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
616 					       FFA_VERSION_MINOR);
617 		}
618 
619 		SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
620 			 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
621 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
622 		break; /* not reached */
623 
624 	case FFA_FEATURES:
625 		/*
626 		 * This is an optional interface. Do the minimal checks and
627 		 * forward to SPM Core which will handle it if implemented.
628 		 */
629 
630 		/* Forward SMC from Normal world to the SPM Core */
631 		if (!secure_origin) {
632 			return spmd_smc_forward(smc_fid, secure_origin,
633 						x1, x2, x3, x4, handle);
634 		}
635 
636 		/*
637 		 * Return success if call was from secure world i.e. all
638 		 * FFA functions are supported. This is essentially a
639 		 * nop.
640 		 */
641 		SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
642 			 SMC_GET_GP(handle, CTX_GPREG_X5),
643 			 SMC_GET_GP(handle, CTX_GPREG_X6),
644 			 SMC_GET_GP(handle, CTX_GPREG_X7));
645 
646 		break; /* not reached */
647 
648 	case FFA_ID_GET:
649 		/*
650 		 * Returns the ID of the calling FFA component.
651 		 */
652 		if (!secure_origin) {
653 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
654 				 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
655 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
656 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
657 				 FFA_PARAM_MBZ);
658 		}
659 
660 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
661 			 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
662 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
663 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
664 			 FFA_PARAM_MBZ);
665 
666 		break; /* not reached */
667 
668 	case FFA_SECONDARY_EP_REGISTER_SMC64:
669 		if (secure_origin) {
670 			ret = spmd_pm_secondary_ep_register(x1);
671 
672 			if (ret < 0) {
673 				SMC_RET8(handle, FFA_ERROR_SMC64,
674 					FFA_TARGET_INFO_MBZ, ret,
675 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
676 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
677 					FFA_PARAM_MBZ);
678 			} else {
679 				SMC_RET8(handle, FFA_SUCCESS_SMC64,
680 					FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
681 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
682 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
683 					FFA_PARAM_MBZ);
684 			}
685 		}
686 
687 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
688 		break; /* Not reached */
689 
690 	case FFA_SPM_ID_GET:
691 		if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
692 			return spmd_ffa_error_return(handle,
693 						     FFA_ERROR_NOT_SUPPORTED);
694 		}
695 		/*
696 		 * Returns the ID of the SPMC or SPMD depending on the FF-A
697 		 * instance where this function is invoked
698 		 */
699 		if (!secure_origin) {
700 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
701 				 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
702 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
703 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
704 				 FFA_PARAM_MBZ);
705 		}
706 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
707 			 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
708 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
709 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
710 			 FFA_PARAM_MBZ);
711 
712 		break; /* not reached */
713 
714 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
715 		if (secure_origin && spmd_is_spmc_message(x1)) {
716 			ret = spmd_handle_spmc_message(x3, x4,
717 				SMC_GET_GP(handle, CTX_GPREG_X5),
718 				SMC_GET_GP(handle, CTX_GPREG_X6),
719 				SMC_GET_GP(handle, CTX_GPREG_X7));
720 
721 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
722 				FFA_TARGET_INFO_MBZ, ret,
723 				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
724 				FFA_PARAM_MBZ, FFA_PARAM_MBZ,
725 				FFA_PARAM_MBZ);
726 		} else {
727 			/* Forward direct message to the other world */
728 			return spmd_smc_forward(smc_fid, secure_origin,
729 				x1, x2, x3, x4, handle);
730 		}
731 		break; /* Not reached */
732 
733 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
734 		if (secure_origin && spmd_is_spmc_message(x1)) {
735 			spmd_spm_core_sync_exit(0ULL);
736 		} else {
737 			/* Forward direct message to the other world */
738 			return spmd_smc_forward(smc_fid, secure_origin,
739 				x1, x2, x3, x4, handle);
740 		}
741 		break; /* Not reached */
742 
743 	case FFA_RX_RELEASE:
744 	case FFA_RXTX_MAP_SMC32:
745 	case FFA_RXTX_MAP_SMC64:
746 	case FFA_RXTX_UNMAP:
747 	case FFA_PARTITION_INFO_GET:
748 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
749 	case FFA_NOTIFICATION_BITMAP_CREATE:
750 	case FFA_NOTIFICATION_BITMAP_DESTROY:
751 	case FFA_NOTIFICATION_BIND:
752 	case FFA_NOTIFICATION_UNBIND:
753 	case FFA_NOTIFICATION_SET:
754 	case FFA_NOTIFICATION_GET:
755 	case FFA_NOTIFICATION_INFO_GET:
756 	case FFA_NOTIFICATION_INFO_GET_SMC64:
757 	case FFA_MSG_SEND2:
758 #endif
759 	case FFA_MSG_RUN:
760 		/*
761 		 * Above calls should be invoked only by the Normal world and
762 		 * must not be forwarded from Secure world to Normal world.
763 		 */
764 		if (secure_origin) {
765 			return spmd_ffa_error_return(handle,
766 						     FFA_ERROR_NOT_SUPPORTED);
767 		}
768 
769 		/* Fall through to forward the call to the other world */
770 	case FFA_MSG_SEND:
771 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
772 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
773 	case FFA_MEM_DONATE_SMC32:
774 	case FFA_MEM_DONATE_SMC64:
775 	case FFA_MEM_LEND_SMC32:
776 	case FFA_MEM_LEND_SMC64:
777 	case FFA_MEM_SHARE_SMC32:
778 	case FFA_MEM_SHARE_SMC64:
779 	case FFA_MEM_RETRIEVE_REQ_SMC32:
780 	case FFA_MEM_RETRIEVE_REQ_SMC64:
781 	case FFA_MEM_RETRIEVE_RESP:
782 	case FFA_MEM_RELINQUISH:
783 	case FFA_MEM_RECLAIM:
784 	case FFA_SUCCESS_SMC32:
785 	case FFA_SUCCESS_SMC64:
786 		/*
787 		 * TODO: Assume that no requests originate from EL3 at the
788 		 * moment. This will change if a SP service is required in
789 		 * response to secure interrupts targeted to EL3. Until then
790 		 * simply forward the call to the Normal world.
791 		 */
792 
793 		return spmd_smc_forward(smc_fid, secure_origin,
794 					x1, x2, x3, x4, handle);
795 		break; /* not reached */
796 
797 	case FFA_MSG_WAIT:
798 		/*
799 		 * Check if this is the first invocation of this interface on
800 		 * this CPU from the Secure world. If so, then indicate that the
801 		 * SPM Core initialised successfully.
802 		 */
803 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
804 			spmd_spm_core_sync_exit(0ULL);
805 		}
806 
807 		/* Fall through to forward the call to the other world */
808 	case FFA_INTERRUPT:
809 	case FFA_MSG_YIELD:
810 		/* This interface must be invoked only by the Secure world */
811 		if (!secure_origin) {
812 			return spmd_ffa_error_return(handle,
813 						      FFA_ERROR_NOT_SUPPORTED);
814 		}
815 
816 		return spmd_smc_forward(smc_fid, secure_origin,
817 					x1, x2, x3, x4, handle);
818 		break; /* not reached */
819 
820 	case FFA_NORMAL_WORLD_RESUME:
821 		if (secure_origin && ctx->secure_interrupt_ongoing) {
822 			spmd_spm_core_sync_exit(0ULL);
823 		} else {
824 			return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
825 		}
826 		break; /* Not reached */
827 
828 	default:
829 		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
830 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
831 	}
832 }
833