xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_main.c (revision 02369bfe719bcc93373f6a752641d53da512abb2)
1 /*
2  * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch/aarch64/arch_features.h>
15 #include <bl31/bl31.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <common/tbbr/tbbr_img_def.h>
20 #include <lib/el3_runtime/context_mgmt.h>
21 #include <lib/fconf/fconf.h>
22 #include <lib/fconf/fconf_dyn_cfg_getter.h>
23 #include <lib/per_cpu/per_cpu.h>
24 #include <lib/smccc.h>
25 #include <lib/spinlock.h>
26 #include <lib/utils.h>
27 #include <lib/xlat_tables/xlat_tables_v2.h>
28 #include <plat/common/common_def.h>
29 #include <plat/common/platform.h>
30 #include <platform_def.h>
31 #include <services/el3_spmd_logical_sp.h>
32 #include <services/ffa_svc.h>
33 #include <services/spmc_svc.h>
34 #include <services/spmd_svc.h>
35 #include <smccc_helpers.h>
36 #include "spmd_private.h"
37 #if TRANSFER_LIST
38 #include <transfer_list.h>
39 #endif
40 
41 /*******************************************************************************
42  * SPM Core context information.
43  ******************************************************************************/
44 static PER_CPU_DEFINE(spmd_spm_core_context_t, spm_core_context);
45 
46 /*******************************************************************************
47  * SPM Core attribute information is read from its manifest if the SPMC is not
48  * at EL3. Else, it is populated from the SPMC directly.
49  ******************************************************************************/
50 static spmc_manifest_attribute_t spmc_attrs;
51 
52 /*******************************************************************************
53  * FFA version used by nonsecure endpoint.
54  ******************************************************************************/
55 static uint32_t nonsecure_ffa_version;
56 
57 /*******************************************************************************
58  * Whether the normal world finished negotiating its version.
59  ******************************************************************************/
60 static bool nonsecure_version_negotiated;
61 
62 /*******************************************************************************
63  * FFA version used by SPMC, as seen by the normal world.
64  ******************************************************************************/
65 static uint32_t spmc_nwd_ffa_version;
66 
67 /*******************************************************************************
68  * SPM Core entry point information. Discovered on the primary core and reused
69  * on secondary cores.
70  ******************************************************************************/
71 static entry_point_info_t *spmc_ep_info;
72 
73 /*******************************************************************************
74  * SPM Core context on current CPU get helper.
75  ******************************************************************************/
spmd_get_context(void)76 spmd_spm_core_context_t *spmd_get_context(void)
77 {
78 	return PER_CPU_CUR(spm_core_context);
79 }
80 
81 /*******************************************************************************
82  * SPM Core ID getter.
83  ******************************************************************************/
spmd_spmc_id_get(void)84 uint16_t spmd_spmc_id_get(void)
85 {
86 	return spmc_attrs.spmc_id;
87 }
88 
89 /*******************************************************************************
90  * Static function declaration.
91  ******************************************************************************/
92 static int32_t spmd_init(void);
93 static int spmd_spmc_init(void *pm_addr);
94 
95 static uint64_t spmd_smc_forward(uint32_t smc_fid,
96 				 bool secure_origin,
97 				 uint64_t x1,
98 				 uint64_t x2,
99 				 uint64_t x3,
100 				 uint64_t x4,
101 				 void *cookie,
102 				 void *handle,
103 				 uint64_t flags,
104 				 uint32_t secure_ffa_version);
105 
106 /******************************************************************************
107  * Builds an SPMD to SPMC direct message request.
108  *****************************************************************************/
spmd_build_spmc_message(gp_regs_t * gpregs,uint8_t target_func,unsigned long long message)109 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
110 			     unsigned long long message)
111 {
112 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
113 	write_ctx_reg(gpregs, CTX_GPREG_X1,
114 		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
115 		 spmd_spmc_id_get());
116 	write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
117 	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
118 
119 	/* Zero out x4-x7 for the direct request emitted towards the SPMC. */
120 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
121 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
122 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
123 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
124 }
125 
126 
127 /*******************************************************************************
128  * This function takes an SPMC context pointer and performs a synchronous
129  * SPMC entry.
130  ******************************************************************************/
spmd_spm_core_sync_entry(spmd_spm_core_context_t * spmc_ctx)131 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
132 {
133 	uint64_t rc;
134 
135 	assert(spmc_ctx != NULL);
136 
137 	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
138 
139 	/* Restore the context assigned above */
140 #if SPMD_SPM_AT_SEL2
141 	cm_el2_sysregs_context_restore(SECURE);
142 #else
143 	cm_el1_sysregs_context_restore(SECURE);
144 #endif
145 	cm_set_next_eret_context(SECURE);
146 
147 	/* Enter SPMC */
148 	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
149 
150 	/* Save secure state */
151 #if SPMD_SPM_AT_SEL2
152 	cm_el2_sysregs_context_save(SECURE);
153 #else
154 	cm_el1_sysregs_context_save(SECURE);
155 #endif
156 
157 	return rc;
158 }
159 
160 /*******************************************************************************
161  * This function returns to the place where spmd_spm_core_sync_entry() was
162  * called originally.
163  ******************************************************************************/
spmd_spm_core_sync_exit(uint64_t rc)164 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
165 {
166 	spmd_spm_core_context_t *ctx = spmd_get_context();
167 
168 	/* Get current CPU context from SPMC context */
169 	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
170 
171 	/*
172 	 * The SPMD must have initiated the original request through a
173 	 * synchronous entry into SPMC. Jump back to the original C runtime
174 	 * context with the value of rc in x0;
175 	 */
176 	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
177 
178 	panic();
179 }
180 
spmd_setup_context(unsigned int core_id)181 void spmd_setup_context(unsigned int core_id)
182 {
183 	cpu_context_t *cpu_ctx;
184 
185 	PER_CPU_CUR(spm_core_context)->state = SPMC_STATE_OFF;
186 
187 	/* Setup an initial cpu context for the SPMC. */
188 	cpu_ctx = &(PER_CPU_CUR(spm_core_context)->cpu_ctx);
189 	cm_setup_context(cpu_ctx, spmc_ep_info);
190 
191 	/*
192 	 * Pass the core linear ID to the SPMC through x4.
193 	 * (TF-A implementation defined behavior helping
194 	 * a legacy TOS migration to adopt FF-A).
195 	 */
196 	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
197 }
198 
199 /*******************************************************************************
200  * Jump to the SPM Core for the first time.
201  ******************************************************************************/
spmd_init(void)202 static int32_t spmd_init(void)
203 {
204 	spmd_spm_core_context_t *ctx = spmd_get_context();
205 	uint64_t rc;
206 
207 	VERBOSE("SPM Core init start.\n");
208 
209 	/* Primary boot core enters the SPMC for initialization. */
210 	ctx->state = SPMC_STATE_ON_PENDING;
211 
212 	rc = spmd_spm_core_sync_entry(ctx);
213 	if (rc != 0ULL) {
214 		ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
215 		return 0;
216 	}
217 
218 	ctx->state = SPMC_STATE_ON;
219 
220 	VERBOSE("SPM Core init end.\n");
221 
222 	spmd_logical_sp_set_spmc_initialized();
223 	rc = spmd_logical_sp_init();
224 	if (rc != 0) {
225 		WARN("SPMD Logical partitions failed init.\n");
226 	}
227 
228 	return 1;
229 }
230 
231 /*******************************************************************************
232  * spmd_secure_interrupt_handler
233  * Enter the SPMC for further handling of the secure interrupt by the SPMC
234  * itself or a Secure Partition.
235  ******************************************************************************/
spmd_secure_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)236 static uint64_t spmd_secure_interrupt_handler(uint32_t id,
237 					      uint32_t flags,
238 					      void *handle,
239 					      void *cookie)
240 {
241 	spmd_spm_core_context_t *ctx = spmd_get_context();
242 	gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
243 	int64_t rc;
244 
245 	/* Sanity check the security state when the exception was generated */
246 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
247 
248 	/* Sanity check the pointer to this cpu's context */
249 	assert(handle == cm_get_context(NON_SECURE));
250 
251 	/* Save the non-secure context before entering SPMC */
252 #if SPMD_SPM_AT_SEL2
253 	cm_el2_sysregs_context_save(NON_SECURE);
254 #else
255 	cm_el1_sysregs_context_save(NON_SECURE);
256 
257 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
258 	/*
259 	 * The hint bit denoting absence of SVE live state is effectively false
260 	 * in this scenario where execution was trapped to EL3 due to FIQ.
261 	 */
262 	simd_ctx_save(NON_SECURE, false);
263 	simd_ctx_restore(SECURE);
264 #endif
265 #endif
266 
267 	/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
268 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
269 	write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
270 	write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
271 	write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
272 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
273 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
274 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
275 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
276 
277 	/* Mark current core as handling a secure interrupt. */
278 	ctx->secure_interrupt_ongoing = true;
279 
280 	rc = spmd_spm_core_sync_entry(ctx);
281 
282 	if (rc != 0ULL) {
283 		ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
284 	}
285 
286 	ctx->secure_interrupt_ongoing = false;
287 
288 #if SPMD_SPM_AT_SEL2
289 	cm_el2_sysregs_context_restore(NON_SECURE);
290 #else
291 	cm_el1_sysregs_context_restore(NON_SECURE);
292 
293 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
294 	simd_ctx_save(SECURE, false);
295 	simd_ctx_restore(NON_SECURE);
296 #endif
297 #endif
298 	cm_set_next_eret_context(NON_SECURE);
299 
300 	SMC_RET0(&ctx->cpu_ctx);
301 }
302 
303 #if (EL3_EXCEPTION_HANDLING == 0)
304 /*******************************************************************************
305  * spmd_group0_interrupt_handler_nwd
306  *
307  * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
308  * handling of the interrupt to the platform handler, and return only upon
309  * successfully handling the Group0 interrupt.
310  *
311  * NOTE: the generic handle_interrupt_exception entry calls
312  * plat_ic_get_pending_interrupt_type to perform a first triage and route to
313  * the corresponding interrupt handler based on the interrupt type.
314  * A registered handler must not assume that the HPPI hasn't changed from the
315  * top level handler until reaching to it. The first thing a handler must do is
316  * attempting to acknowledge the interrupt and process it if it's a valid
317  * INTID. Meanwhile, the interrupt might have been acknowledged by another
318  * PE, or another high priority interrupt got asserted, or any other valid
319  * reason for the HPPI to change. The reasoning is the same for an interrupt
320  * delegated by lower EL through the FFA_EL3_INTR_HANDLE interface.
321  * For a G0 interrupt triggered while secure world runs, the first triage is
322  * done by lower EL e.g. S-EL2 and routes it to EL3 for handling. Once there,
323  * the HPPI might have changed so the same rules as above apply.
324  *
325  ******************************************************************************/
spmd_group0_interrupt_handler_nwd(uint32_t id,uint32_t flags,void * handle,void * cookie)326 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
327 						  uint32_t flags,
328 						  void *handle,
329 						  void *cookie)
330 {
331 	uint32_t intid, intr_raw;
332 
333 	/* Sanity check the security state when the exception was generated. */
334 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
335 
336 	/* Sanity check the pointer to this cpu's context. */
337 	assert(handle == cm_get_context(NON_SECURE));
338 
339 	assert(id == INTR_ID_UNAVAILABLE);
340 
341 	intr_raw = plat_ic_acknowledge_interrupt();
342 	intid = plat_ic_get_interrupt_id(intr_raw);
343 
344 	if (intid == INTR_ID_UNAVAILABLE) {
345 		return 0U;
346 	}
347 
348 	if (plat_spmd_handle_group0_interrupt(intid) < 0) {
349 		ERROR("Group0 interrupt %u not handled\n", intid);
350 		panic();
351 	}
352 
353 	/* Deactivate the corresponding Group0 interrupt. */
354 	plat_ic_end_of_interrupt(intid);
355 
356 	return 0U;
357 }
358 #endif
359 
360 /*******************************************************************************
361  * spmd_handle_group0_intr_swd
362  *
363  * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
364  * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
365  * interrupt to the platform handler, and returns only upon successfully
366  * handling the Group0 interrupt.
367  *
368  * NOTE: see spmd_group0_interrupt_handler_nwd note section.
369  ******************************************************************************/
spmd_handle_group0_intr_swd(void * handle)370 static uint64_t spmd_handle_group0_intr_swd(void *handle)
371 {
372 	uint32_t intid, intr_raw;
373 
374 	/* Sanity check the pointer to this cpu's context */
375 	assert(handle == cm_get_context(SECURE));
376 
377 	intr_raw = plat_ic_acknowledge_interrupt();
378 	intid = plat_ic_get_interrupt_id(intr_raw);
379 
380 	if (intid == INTR_ID_UNAVAILABLE) {
381 		return 0U;
382 	}
383 
384 	/*
385 	 * TODO: Currently due to a limitation in SPMD implementation, the
386 	 * platform handler is expected to not delegate handling to NWd while
387 	 * processing Group0 secure interrupt.
388 	 */
389 	if (plat_spmd_handle_group0_interrupt(intid) < 0) {
390 		/* Group0 interrupt was not handled by the platform. */
391 		ERROR("Group0 interrupt %u not handled\n", intid);
392 		panic();
393 	}
394 
395 	/* Deactivate the corresponding Group0 interrupt. */
396 	plat_ic_end_of_interrupt(intid);
397 
398 	/* Return success. */
399 	SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
400 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
401 		 FFA_PARAM_MBZ);
402 }
403 
404 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
spmd_dynamic_map_mem(uintptr_t base_addr,size_t size,unsigned int attr,uintptr_t * align_addr,size_t * align_size)405 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
406 				 unsigned int attr, uintptr_t *align_addr,
407 				 size_t *align_size)
408 {
409 	uintptr_t base_addr_align;
410 	size_t mapped_size_align;
411 	int rc;
412 
413 	/* Page aligned address and size if necessary */
414 	base_addr_align = page_align(base_addr, DOWN);
415 	mapped_size_align = page_align(size, UP);
416 
417 	if ((base_addr != base_addr_align) &&
418 	    (size == mapped_size_align)) {
419 		mapped_size_align += PAGE_SIZE;
420 	}
421 
422 	/*
423 	 * Map dynamically given region with its aligned base address and
424 	 * size
425 	 */
426 	rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
427 				     base_addr_align,
428 				     mapped_size_align,
429 				     attr);
430 	if (rc == 0) {
431 		*align_addr = base_addr_align;
432 		*align_size = mapped_size_align;
433 	}
434 
435 	return rc;
436 }
437 
spmd_do_sec_cpy(uintptr_t root_base_addr,uintptr_t sec_base_addr,size_t size)438 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
439 			    size_t size)
440 {
441 	uintptr_t root_base_addr_align, sec_base_addr_align;
442 	size_t root_mapped_size_align, sec_mapped_size_align;
443 	int rc;
444 
445 	assert(root_base_addr != 0UL);
446 	assert(sec_base_addr != 0UL);
447 	assert(size != 0UL);
448 
449 	/* Map the memory with required attributes */
450 	rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
451 				  &root_base_addr_align,
452 				  &root_mapped_size_align);
453 	if (rc != 0) {
454 		ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
455 		      root_base_addr, rc);
456 		panic();
457 	}
458 
459 	rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
460 				  &sec_base_addr_align, &sec_mapped_size_align);
461 	if (rc != 0) {
462 		ERROR("%s %s %lu (%d)\n", "Error while mapping",
463 		      "secure region", sec_base_addr, rc);
464 		panic();
465 	}
466 
467 	/* Do copy operation */
468 	(void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
469 
470 	/* Unmap root memory region */
471 	rc = mmap_remove_dynamic_region(root_base_addr_align,
472 					root_mapped_size_align);
473 	if (rc != 0) {
474 		ERROR("%s %s %lu (%d)\n", "Error while unmapping",
475 		      "root region", root_base_addr_align, rc);
476 		panic();
477 	}
478 
479 	/* Unmap secure memory region */
480 	rc = mmap_remove_dynamic_region(sec_base_addr_align,
481 					sec_mapped_size_align);
482 	if (rc != 0) {
483 		ERROR("%s %s %lu (%d)\n", "Error while unmapping",
484 		      "secure region", sec_base_addr_align, rc);
485 		panic();
486 	}
487 }
488 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
489 
490 /*******************************************************************************
491  * Loads SPMC manifest and inits SPMC.
492  ******************************************************************************/
spmd_spmc_init(void * pm_addr)493 static int spmd_spmc_init(void *pm_addr)
494 {
495 	uint32_t ep_attr, flags;
496 	int rc;
497 	const struct dyn_cfg_dtb_info_t *image_info __unused;
498 
499 	/* Load the SPM Core manifest */
500 	rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
501 	if (rc != 0) {
502 		WARN("No or invalid SPM Core manifest image provided by BL2\n");
503 		return rc;
504 	}
505 
506 	/*
507 	 * Ensure that the SPM Core version is compatible with the SPM
508 	 * Dispatcher version.
509 	 */
510 	if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
511 	    (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
512 		WARN("Unsupported FFA version (%u.%u)\n",
513 		     spmc_attrs.major_version, spmc_attrs.minor_version);
514 		return -EINVAL;
515 	}
516 
517 	VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
518 	     spmc_attrs.minor_version);
519 
520 	VERBOSE("SPM Core run time EL%x.\n",
521 	     SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
522 
523 	/* Validate the SPMC ID, Ensure high bit is set */
524 	if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
525 			SPMC_SECURE_ID_MASK) == 0U) {
526 		WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
527 		return -EINVAL;
528 	}
529 
530 	/* Validate the SPM Core execution state */
531 	if ((spmc_attrs.exec_state != MODE_RW_64) &&
532 	    (spmc_attrs.exec_state != MODE_RW_32)) {
533 		WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
534 		     spmc_attrs.exec_state);
535 		return -EINVAL;
536 	}
537 
538 	VERBOSE("%s%x.\n", "SPM Core execution state 0x",
539 		spmc_attrs.exec_state);
540 
541 #if SPMD_SPM_AT_SEL2
542 	/* Ensure manifest has not requested AArch32 state in S-EL2 */
543 	if (spmc_attrs.exec_state == MODE_RW_32) {
544 		WARN("AArch32 state at S-EL2 is not supported.\n");
545 		return -EINVAL;
546 	}
547 
548 	/*
549 	 * Check if S-EL2 is supported on this system if S-EL2
550 	 * is required for SPM
551 	 */
552 	if (!is_feat_sel2_supported()) {
553 		WARN("SPM Core run time S-EL2 is not supported.\n");
554 		return -EINVAL;
555 	}
556 #endif /* SPMD_SPM_AT_SEL2 */
557 
558 	/* Initialise an entrypoint to set up the CPU context */
559 	ep_attr = SECURE | EP_ST_ENABLE;
560 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
561 		ep_attr |= EP_EE_BIG;
562 	}
563 
564 	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
565 
566 	/*
567 	 * Populate SPSR for SPM Core based upon validated parameters from the
568 	 * manifest.
569 	 */
570 	if (spmc_attrs.exec_state == MODE_RW_32) {
571 		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
572 						 SPSR_E_LITTLE,
573 						 DAIF_FIQ_BIT |
574 						 DAIF_IRQ_BIT |
575 						 DAIF_ABT_BIT);
576 	} else {
577 
578 #if SPMD_SPM_AT_SEL2
579 		static const uint32_t runtime_el = MODE_EL2;
580 #else
581 		static const uint32_t runtime_el = MODE_EL1;
582 #endif
583 		spmc_ep_info->spsr = SPSR_64(runtime_el,
584 					     MODE_SP_ELX,
585 					     DISABLE_ALL_EXCEPTIONS);
586 	}
587 
588 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
589 	image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
590 	assert(image_info != NULL);
591 
592 	if ((image_info->config_addr == 0UL) ||
593 	    (image_info->secondary_config_addr == 0UL) ||
594 	    (image_info->config_max_size == 0UL)) {
595 		return -EINVAL;
596 	}
597 
598 	/* Copy manifest from root->secure region */
599 	spmd_do_sec_cpy(image_info->config_addr,
600 			image_info->secondary_config_addr,
601 			image_info->config_max_size);
602 
603 	/* Update ep info of BL32 */
604 	assert(spmc_ep_info != NULL);
605 	spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
606 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
607 
608 	spmd_setup_context(plat_my_core_pos());
609 
610 	/* Register power management hooks with PSCI */
611 	psci_register_spd_pm_hook(&spmd_pm);
612 
613 	/* Register init function for deferred init. */
614 	bl31_register_bl32_init(&spmd_init);
615 
616 	INFO("SPM Core setup done.\n");
617 
618 	/*
619 	 * Register an interrupt handler routing secure interrupts to SPMD
620 	 * while the NWd is running.
621 	 */
622 	flags = 0;
623 	set_interrupt_rm_flag(flags, NON_SECURE);
624 	rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
625 					     spmd_secure_interrupt_handler,
626 					     flags);
627 	if (rc != 0) {
628 		panic();
629 	}
630 
631 	/*
632 	 * Permit configurations where the SPM resides at S-EL1/2 and upon a
633 	 * Group0 interrupt triggering while the normal world runs, the
634 	 * interrupt is routed either through the EHF or directly to the SPMD:
635 	 *
636 	 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
637 	 *                   for handling by spmd_group0_interrupt_handler_nwd.
638 	 *
639 	 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
640 	 *
641 	 */
642 #if (EL3_EXCEPTION_HANDLING == 0)
643 	/*
644 	 * If EL3 interrupts are supported by the platform, register an
645 	 * interrupt handler routing Group0 interrupts to SPMD while the NWd is
646 	 * running.
647 	 */
648 	if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) {
649 		rc = register_interrupt_type_handler(INTR_TYPE_EL3,
650 						     spmd_group0_interrupt_handler_nwd,
651 						     flags);
652 		if (rc != 0) {
653 			panic();
654 		}
655 	}
656 #endif
657 
658 	return 0;
659 }
660 
661 /*******************************************************************************
662  * Initialize context of SPM Core.
663  ******************************************************************************/
spmd_setup(void)664 int spmd_setup(void)
665 {
666 	int rc;
667 	void *spmc_manifest;
668 	struct transfer_list_header *tl __maybe_unused;
669 	struct transfer_list_entry *te __maybe_unused;
670 
671 	/*
672 	 * If the SPMC is at EL3, then just initialise it directly. The
673 	 * shenanigans of when it is at a lower EL are not needed.
674 	 */
675 	if (is_spmc_at_el3()) {
676 		/* Allow the SPMC to populate its attributes directly. */
677 		spmc_populate_attrs(&spmc_attrs);
678 
679 		rc = spmc_setup();
680 		if (rc != 0) {
681 			WARN("SPMC initialisation failed 0x%x.\n", rc);
682 		}
683 		return 0;
684 	}
685 
686 	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
687 	if (spmc_ep_info == NULL) {
688 		WARN("No SPM Core image provided by BL2 boot loader.\n");
689 		return 0;
690 	}
691 
692 	/* Under no circumstances will this parameter be 0 */
693 	assert(spmc_ep_info->pc != 0ULL);
694 
695 
696 #if TRANSFER_LIST && !RESET_TO_BL31
697 	tl = (struct transfer_list_header *)spmc_ep_info->args.arg3;
698 	te = transfer_list_find(tl, TL_TAG_DT_SPMC_MANIFEST);
699 	if (te == NULL) {
700 		WARN("SPM Core manifest absent in TRANSFER_LIST.\n");
701 		return -ENOENT;
702 	}
703 
704 	spmc_manifest = (void *)transfer_list_entry_data(te);
705 
706 	/* Change the DT in the handoff */
707 	if (sizeof(spmc_ep_info->args.arg0) == sizeof(uint64_t)) {
708 		spmc_ep_info->args.arg0 = (uintptr_t)spmc_manifest;
709 	} else {
710 		spmc_ep_info->args.arg3 = (uintptr_t)spmc_manifest;
711 	}
712 #else
713 	/*
714 	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
715 	 * be used as a manifest for the SPM Core at the next lower EL/mode.
716 	 */
717 	spmc_manifest = (void *)spmc_ep_info->args.arg0;
718 #endif
719 
720 	if (spmc_manifest == NULL) {
721 		WARN("Invalid or absent SPM Core manifest.\n");
722 		return 0;
723 	}
724 
725 	/* Load manifest, init SPMC */
726 	rc = spmd_spmc_init(spmc_manifest);
727 	if (rc != 0) {
728 		WARN("Booting device without SPM initialization.\n");
729 	}
730 
731 	return 0;
732 }
733 
734 /*******************************************************************************
735  * Forward FF-A SMCs to the other security state.
736  ******************************************************************************/
spmd_smc_switch_state(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,uint64_t flags,uint32_t secure_ffa_version)737 uint64_t spmd_smc_switch_state(uint32_t smc_fid,
738 			       bool secure_origin,
739 			       uint64_t x1,
740 			       uint64_t x2,
741 			       uint64_t x3,
742 			       uint64_t x4,
743 			       void *handle,
744 			       uint64_t flags,
745 			       uint32_t secure_ffa_version)
746 {
747 	unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
748 	unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
749 	uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version;
750 	uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version;
751 	void *ctx_out;
752 
753 #if SPMD_SPM_AT_SEL2
754 	if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) {
755 		/*
756 		 * Set the SVE hint bit in x0 and pass to the lower secure EL,
757 		 * if it was set by the caller.
758 		 */
759 		smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT);
760 	}
761 #endif
762 
763 	/* Save incoming security state */
764 #if SPMD_SPM_AT_SEL2
765 	cm_el2_sysregs_context_save(secure_state_in);
766 #else
767 	cm_el1_sysregs_context_save(secure_state_in);
768 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
769 	/* Forward the hint bit denoting the absence of SVE live state. */
770 	simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
771 #endif
772 #endif
773 
774 	/* Restore outgoing security state */
775 #if SPMD_SPM_AT_SEL2
776 	cm_el2_sysregs_context_restore(secure_state_out);
777 #else
778 	cm_el1_sysregs_context_restore(secure_state_out);
779 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
780 	simd_ctx_restore(secure_state_out);
781 #endif
782 #endif
783 	cm_set_next_eret_context(secure_state_out);
784 
785 	ctx_out = cm_get_context(secure_state_out);
786 	if (smc_fid == FFA_NORMAL_WORLD_RESUME) {
787 		SMC_RET0(ctx_out);
788 	}
789 
790 	if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) {
791 		if (version_in < MAKE_FFA_VERSION(U(1), U(2))) {
792 			/* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */
793 			SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
794 				  SMC_GET_GP(handle, CTX_GPREG_X5),
795 				  SMC_GET_GP(handle, CTX_GPREG_X6),
796 				  SMC_GET_GP(handle, CTX_GPREG_X7),
797 				  0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
798 		} else {
799 			/* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */
800 			SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
801 				  SMC_GET_GP(handle, CTX_GPREG_X5),
802 				  SMC_GET_GP(handle, CTX_GPREG_X6),
803 				  SMC_GET_GP(handle, CTX_GPREG_X7),
804 				  SMC_GET_GP(handle, CTX_GPREG_X8),
805 				  SMC_GET_GP(handle, CTX_GPREG_X9),
806 				  SMC_GET_GP(handle, CTX_GPREG_X10),
807 				  SMC_GET_GP(handle, CTX_GPREG_X11),
808 				  SMC_GET_GP(handle, CTX_GPREG_X12),
809 				  SMC_GET_GP(handle, CTX_GPREG_X13),
810 				  SMC_GET_GP(handle, CTX_GPREG_X14),
811 				  SMC_GET_GP(handle, CTX_GPREG_X15),
812 				  SMC_GET_GP(handle, CTX_GPREG_X16),
813 				  SMC_GET_GP(handle, CTX_GPREG_X17)
814 				);
815 		}
816 	} else {
817 		/* 32 bit call or dest has FFA version < 1.2 or unknown */
818 		SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4,
819 			 SMC_GET_GP(handle, CTX_GPREG_X5),
820 			 SMC_GET_GP(handle, CTX_GPREG_X6),
821 			 SMC_GET_GP(handle, CTX_GPREG_X7));
822 	}
823 }
824 
825 /*******************************************************************************
826  * Forward SMCs to the other security state.
827  ******************************************************************************/
spmd_smc_forward(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags,uint32_t secure_ffa_version)828 static uint64_t spmd_smc_forward(uint32_t smc_fid,
829 				 bool secure_origin,
830 				 uint64_t x1,
831 				 uint64_t x2,
832 				 uint64_t x3,
833 				 uint64_t x4,
834 				 void *cookie,
835 				 void *handle,
836 				 uint64_t flags,
837 				 uint32_t secure_ffa_version)
838 {
839 	if (is_spmc_at_el3() && !secure_origin) {
840 		return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
841 					cookie, handle, flags);
842 	}
843 
844 	return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
845 				     handle, flags, secure_ffa_version);
846 
847 }
848 
849 /*******************************************************************************
850  * Return FFA_ERROR with specified error code
851  ******************************************************************************/
spmd_ffa_error_return(void * handle,int error_code)852 uint64_t spmd_ffa_error_return(void *handle, int error_code)
853 {
854 	SMC_RET8(handle, (uint32_t) FFA_ERROR,
855 		 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
856 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
857 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
858 }
859 
860 /*******************************************************************************
861  * spmd_check_address_in_binary_image
862  ******************************************************************************/
spmd_check_address_in_binary_image(uint64_t address)863 bool spmd_check_address_in_binary_image(uint64_t address)
864 {
865 	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
866 
867 	return ((address >= spmc_attrs.load_address) &&
868 		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
869 }
870 
871 /******************************************************************************
872  * spmd_is_spmc_message
873  *****************************************************************************/
spmd_is_spmc_message(unsigned int ep)874 static bool spmd_is_spmc_message(unsigned int ep)
875 {
876 	if (is_spmc_at_el3()) {
877 		return false;
878 	}
879 
880 	return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
881 		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
882 }
883 
884 /*******************************************************************************
885  * This function forwards FF-A SMCs to either the main SPMD handler or the
886  * SPMC at EL3, depending on the origin security state, if enabled.
887  ******************************************************************************/
spmd_ffa_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)888 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
889 			      uint64_t x1,
890 			      uint64_t x2,
891 			      uint64_t x3,
892 			      uint64_t x4,
893 			      void *cookie,
894 			      void *handle,
895 			      uint64_t flags)
896 {
897 	if (is_spmc_at_el3()) {
898 		/*
899 		 * If we have an SPMC at EL3 allow handling of the SMC first.
900 		 * The SPMC will call back through to SPMD handler if required.
901 		 */
902 		if (is_caller_secure(flags)) {
903 			return spmc_smc_handler(smc_fid,
904 						is_caller_secure(flags),
905 						x1, x2, x3, x4, cookie,
906 						handle, flags);
907 		}
908 	}
909 	return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
910 				handle, flags, spmc_nwd_ffa_version);
911 }
912 
get_common_ffa_version(uint32_t secure_ffa_version)913 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version)
914 {
915 	if (secure_ffa_version <= nonsecure_ffa_version) {
916 		return secure_ffa_version;
917 	} else {
918 		return nonsecure_ffa_version;
919 	}
920 }
921 
922 /*******************************************************************************
923  * This function handles all SMCs in the range reserved for FFA. Each call is
924  * either forwarded to the other security state or handled by the SPM dispatcher
925  ******************************************************************************/
spmd_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags,uint32_t secure_ffa_version)926 uint64_t spmd_smc_handler(uint32_t smc_fid,
927 			  uint64_t x1,
928 			  uint64_t x2,
929 			  uint64_t x3,
930 			  uint64_t x4,
931 			  void *cookie,
932 			  void *handle,
933 			  uint64_t flags,
934 			  uint32_t secure_ffa_version)
935 {
936 	spmd_spm_core_context_t *ctx = spmd_get_context();
937 	bool secure_origin;
938 	int ret;
939 	uint32_t input_version;
940 
941 	/* Determine which security state this SMC originated from */
942 	secure_origin = is_caller_secure(flags);
943 
944 	VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
945 		" 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
946 		    plat_my_core_pos(), smc_fid, x1, x2, x3, x4,
947 		    SMC_GET_GP(handle, CTX_GPREG_X5),
948 		    SMC_GET_GP(handle, CTX_GPREG_X6),
949 		    SMC_GET_GP(handle, CTX_GPREG_X7));
950 
951 	/*
952 	 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
953 	 * return, we don't expect any other FF-A ABIs to be called between
954 	 * calls to FFA_PARTITION_INFO_GET_REGS.
955 	 */
956 	if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
957 		assert(secure_origin);
958 		spmd_spm_core_sync_exit(0ULL);
959 	}
960 
961 	if ((!secure_origin) && (smc_fid != FFA_VERSION)) {
962 		/*
963 		 * Once the caller invokes any FF-A ABI other than FFA_VERSION,
964 		 * the version negotiation phase is complete.
965 		 */
966 		nonsecure_version_negotiated = true;
967 	}
968 
969 	switch (smc_fid) {
970 	case FFA_ERROR:
971 		/*
972 		 * Check if this is the first invocation of this interface on
973 		 * this CPU. If so, then indicate that the SPM Core initialised
974 		 * unsuccessfully.
975 		 */
976 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
977 			spmd_spm_core_sync_exit(x2);
978 		}
979 
980 		/*
981 		 * Perform a synchronous exit:
982 		 * 1. If there was an SPMD logical partition direct request on-going,
983 		 * return back to the SPMD logical partition so the error can be
984 		 * consumed.
985 		 * 2. SPMC sent FFA_ERROR in response to a power management
986 		 * operation sent through direct request.
987 		 */
988 		if (is_spmd_logical_sp_dir_req_in_progress(ctx) ||
989 		    ctx->psci_operation_ongoing) {
990 			assert(secure_origin);
991 			spmd_spm_core_sync_exit(0ULL);
992 		}
993 
994 		return spmd_smc_forward(smc_fid, secure_origin,
995 					x1, x2, x3, x4, cookie,
996 					handle, flags, secure_ffa_version);
997 		break; /* not reached */
998 
999 	case FFA_VERSION:
1000 		input_version = (uint32_t)(0xFFFFFFFF & x1);
1001 		/*
1002 		 * If caller is secure and SPMC was initialized,
1003 		 * return FFA_VERSION of SPMD.
1004 		 * If caller is non secure and SPMC was initialized,
1005 		 * forward to the EL3 SPMC if enabled, otherwise send a
1006 		 * framework message to the SPMC at the lower EL to
1007 		 * negotiate a version that is compatible between the
1008 		 * normal world and the SPMC.
1009 		 * Sanity check to "input_version".
1010 		 * If the EL3 SPMC is enabled, ignore the SPMC state as
1011 		 * this is not used.
1012 		 */
1013 		if ((input_version & FFA_VERSION_BIT31_MASK) ||
1014 		    (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
1015 			ret = FFA_ERROR_NOT_SUPPORTED;
1016 		} else if (!secure_origin) {
1017 			if (!nonsecure_version_negotiated) {
1018 				/*
1019 				 * Once an FF-A version has been negotiated
1020 				 * between a caller and a callee, the version
1021 				 * may not be changed for the lifetime of
1022 				 * the calling component.
1023 				 */
1024 				nonsecure_ffa_version = input_version;
1025 			}
1026 
1027 			if (is_spmc_at_el3()) {
1028 				/*
1029 				 * Forward the call directly to the EL3 SPMC, if
1030 				 * enabled, as we don't need to wrap the call in
1031 				 * a direct request.
1032 				 */
1033 				spmc_nwd_ffa_version =
1034 					MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1035 				return spmc_smc_handler(smc_fid, secure_origin,
1036 							x1, x2, x3, x4, cookie,
1037 							handle, flags);
1038 			}
1039 
1040 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
1041 			uint64_t rc;
1042 
1043 			if (spmc_attrs.major_version == 1 &&
1044 			    spmc_attrs.minor_version == 0) {
1045 				ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
1046 						       spmc_attrs.minor_version);
1047 				spmc_nwd_ffa_version = (uint32_t)ret;
1048 				SMC_RET8(handle, (uint32_t)ret,
1049 					 FFA_TARGET_INFO_MBZ,
1050 					 FFA_TARGET_INFO_MBZ,
1051 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1052 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1053 					 FFA_PARAM_MBZ);
1054 				break;
1055 			}
1056 			/* Save non-secure system registers context */
1057 #if SPMD_SPM_AT_SEL2
1058 			cm_el2_sysregs_context_save(NON_SECURE);
1059 #else
1060 			cm_el1_sysregs_context_save(NON_SECURE);
1061 #endif
1062 
1063 			/*
1064 			 * The incoming request has FFA_VERSION as X0 smc_fid
1065 			 * and requested version in x1. Prepare a direct request
1066 			 * from SPMD to SPMC with FFA_VERSION framework function
1067 			 * identifier in X2 and requested version in X3.
1068 			 */
1069 			spmd_build_spmc_message(gpregs,
1070 						SPMD_FWK_MSG_FFA_VERSION_REQ,
1071 						input_version);
1072 
1073 			/*
1074 			 * Ensure x8-x17 NS GP register values are untouched when returning
1075 			 * from the SPMC.
1076 			 */
1077 			write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
1078 			write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
1079 			write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
1080 			write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
1081 			write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
1082 			write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
1083 			write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
1084 			write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
1085 			write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
1086 			write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
1087 
1088 			rc = spmd_spm_core_sync_entry(ctx);
1089 
1090 			if ((rc != 0ULL) ||
1091 			    (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
1092 				FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
1093 			    (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
1094 				(FFA_FWK_MSG_BIT |
1095 				 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
1096 				ERROR("Failed to forward FFA_VERSION\n");
1097 				ret = FFA_ERROR_NOT_SUPPORTED;
1098 			} else {
1099 				ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
1100 				spmc_nwd_ffa_version = (uint32_t)ret;
1101 			}
1102 
1103 			/*
1104 			 * x0-x4 are updated by spmd_smc_forward below.
1105 			 * Zero out x5-x7 in the FFA_VERSION response.
1106 			 */
1107 			write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
1108 			write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
1109 			write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
1110 
1111 			/*
1112 			 * Return here after SPMC has handled FFA_VERSION.
1113 			 * The returned SPMC version is held in X3.
1114 			 * Forward this version in X0 to the non-secure caller.
1115 			 */
1116 			return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
1117 						FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1118 						FFA_PARAM_MBZ, cookie, gpregs,
1119 						flags, spmc_nwd_ffa_version);
1120 		} else {
1121 			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
1122 					       FFA_VERSION_MINOR);
1123 		}
1124 
1125 		SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
1126 			 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1127 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1128 		break; /* not reached */
1129 
1130 	case FFA_FEATURES:
1131 		/*
1132 		 * This is an optional interface. Do the minimal checks and
1133 		 * forward to SPM Core which will handle it if implemented.
1134 		 */
1135 
1136 		/* Forward SMC from Normal world to the SPM Core */
1137 		if (!secure_origin) {
1138 			return spmd_smc_forward(smc_fid, secure_origin,
1139 						x1, x2, x3, x4, cookie,
1140 						handle, flags, secure_ffa_version);
1141 		}
1142 
1143 		/*
1144 		 * Return success if call was from secure world i.e. all
1145 		 * FFA functions are supported. This is essentially a
1146 		 * nop.
1147 		 */
1148 		SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
1149 			 SMC_GET_GP(handle, CTX_GPREG_X5),
1150 			 SMC_GET_GP(handle, CTX_GPREG_X6),
1151 			 SMC_GET_GP(handle, CTX_GPREG_X7));
1152 
1153 		break; /* not reached */
1154 
1155 	case FFA_ID_GET:
1156 		/*
1157 		 * Returns the ID of the calling FFA component.
1158 		 */
1159 		if (!secure_origin) {
1160 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
1161 				 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1162 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1163 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1164 				 FFA_PARAM_MBZ);
1165 		}
1166 
1167 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
1168 			 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1169 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1170 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1171 			 FFA_PARAM_MBZ);
1172 
1173 		break; /* not reached */
1174 
1175 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1176 		if (secure_origin) {
1177 			ret = spmd_pm_secondary_ep_register(x1);
1178 
1179 			if (ret < 0) {
1180 				SMC_RET8(handle, FFA_ERROR_SMC64,
1181 					FFA_TARGET_INFO_MBZ, ret,
1182 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1183 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1184 					FFA_PARAM_MBZ);
1185 			} else {
1186 				SMC_RET8(handle, FFA_SUCCESS_SMC64,
1187 					FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1188 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1189 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1190 					FFA_PARAM_MBZ);
1191 			}
1192 		}
1193 
1194 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1195 		break; /* Not reached */
1196 
1197 	case FFA_SPM_ID_GET:
1198 		if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1199 			return spmd_ffa_error_return(handle,
1200 						     FFA_ERROR_NOT_SUPPORTED);
1201 		}
1202 		/*
1203 		 * Returns the ID of the SPMC or SPMD depending on the FF-A
1204 		 * instance where this function is invoked
1205 		 */
1206 		if (!secure_origin) {
1207 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
1208 				 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1209 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1210 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1211 				 FFA_PARAM_MBZ);
1212 		}
1213 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
1214 			 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1215 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1216 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1217 			 FFA_PARAM_MBZ);
1218 
1219 		break; /* not reached */
1220 
1221 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1222 		if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) {
1223 			/* Call not supported at this version */
1224 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1225 		}
1226 		/* fallthrough */
1227 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1228 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1229 		/*
1230 		 * Regardless of secure_origin, SPMD logical partitions cannot
1231 		 * handle direct messages. They can only initiate direct
1232 		 * messages and consume direct responses or errors.
1233 		 */
1234 		if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1235 				  is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1236 			return spmd_ffa_error_return(handle,
1237 						     FFA_ERROR_INVALID_PARAMETER
1238 						     );
1239 		}
1240 
1241 		/*
1242 		 * When there is an ongoing SPMD logical partition direct
1243 		 * request, there cannot be another direct request. Return
1244 		 * error in this case. Panic'ing is an option but that does
1245 		 * not provide the opportunity for caller to abort based on
1246 		 * error codes.
1247 		 */
1248 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1249 			assert(secure_origin);
1250 			return spmd_ffa_error_return(handle,
1251 						     FFA_ERROR_DENIED);
1252 		}
1253 
1254 		if (!secure_origin) {
1255 			/* Validate source endpoint is non-secure for non-secure caller. */
1256 			if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1257 				return spmd_ffa_error_return(handle,
1258 						FFA_ERROR_INVALID_PARAMETER);
1259 			}
1260 		}
1261 		if (secure_origin && spmd_is_spmc_message(x1)) {
1262 				return spmd_ffa_error_return(handle,
1263 						FFA_ERROR_DENIED);
1264 		} else {
1265 			/* Forward direct message to the other world */
1266 			return spmd_smc_forward(smc_fid, secure_origin,
1267 						x1, x2, x3, x4, cookie,
1268 						handle, flags, secure_ffa_version);
1269 		}
1270 		break; /* Not reached */
1271 
1272 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1273 		if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) {
1274 			/* Call not supported at this version */
1275 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1276 		}
1277 		/* fallthrough */
1278 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1279 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1280 		if (secure_origin && (spmd_is_spmc_message(x1) ||
1281 		    is_spmd_logical_sp_dir_req_in_progress(ctx))) {
1282 			spmd_spm_core_sync_exit(0ULL);
1283 		} else {
1284 			/* Forward direct message to the other world */
1285 			return spmd_smc_forward(smc_fid, secure_origin,
1286 						x1, x2, x3, x4, cookie,
1287 						handle, flags, secure_ffa_version);
1288 		}
1289 		break; /* Not reached */
1290 	case FFA_RX_RELEASE:
1291 	case FFA_RXTX_MAP_SMC32:
1292 	case FFA_RXTX_MAP_SMC64:
1293 	case FFA_RXTX_UNMAP:
1294 	case FFA_PARTITION_INFO_GET:
1295 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1296 	case FFA_NOTIFICATION_BITMAP_CREATE:
1297 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1298 	case FFA_NOTIFICATION_BIND:
1299 	case FFA_NOTIFICATION_UNBIND:
1300 	case FFA_NOTIFICATION_SET:
1301 	case FFA_NOTIFICATION_GET:
1302 	case FFA_NOTIFICATION_INFO_GET:
1303 	case FFA_NOTIFICATION_INFO_GET_SMC64:
1304 	case FFA_MSG_SEND2:
1305 	case FFA_RX_ACQUIRE:
1306 	case FFA_NS_RES_INFO_GET_SMC64:
1307 #endif
1308 	case FFA_MSG_RUN:
1309 		/*
1310 		 * Above calls should be invoked only by the Normal world and
1311 		 * must not be forwarded from Secure world to Normal world.
1312 		 */
1313 		if (secure_origin) {
1314 			return spmd_ffa_error_return(handle,
1315 						     FFA_ERROR_NOT_SUPPORTED);
1316 		}
1317 
1318 		/* Forward the call to the other world */
1319 		/* fallthrough */
1320 	case FFA_MSG_SEND:
1321 	case FFA_MEM_DONATE_SMC32:
1322 	case FFA_MEM_DONATE_SMC64:
1323 	case FFA_MEM_LEND_SMC32:
1324 	case FFA_MEM_LEND_SMC64:
1325 	case FFA_MEM_SHARE_SMC32:
1326 	case FFA_MEM_SHARE_SMC64:
1327 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1328 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1329 	case FFA_MEM_RETRIEVE_RESP:
1330 	case FFA_MEM_RELINQUISH:
1331 	case FFA_MEM_RECLAIM:
1332 	case FFA_MEM_FRAG_TX:
1333 	case FFA_MEM_FRAG_RX:
1334 	case FFA_SUCCESS_SMC32:
1335 	case FFA_SUCCESS_SMC64:
1336 		/*
1337 		 * If there is an ongoing direct request from an SPMD logical
1338 		 * partition, return an error.
1339 		 */
1340 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1341 			assert(secure_origin);
1342 			return spmd_ffa_error_return(handle,
1343 					FFA_ERROR_DENIED);
1344 		}
1345 
1346 		return spmd_smc_forward(smc_fid, secure_origin,
1347 					x1, x2, x3, x4, cookie,
1348 					handle, flags, secure_ffa_version);
1349 		break; /* not reached */
1350 
1351 	case FFA_MSG_WAIT:
1352 		/*
1353 		 * Check if this is the first invocation of this interface on
1354 		 * this CPU from the Secure world. If so, then indicate that the
1355 		 * SPM Core initialised successfully.
1356 		 */
1357 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
1358 			spmd_spm_core_sync_exit(0ULL);
1359 		}
1360 
1361 		/* Forward the call to the other world */
1362 		/* fallthrough */
1363 	case FFA_INTERRUPT:
1364 	case FFA_MSG_YIELD:
1365 		/* This interface must be invoked only by the Secure world */
1366 		if (!secure_origin) {
1367 			return spmd_ffa_error_return(handle,
1368 						      FFA_ERROR_NOT_SUPPORTED);
1369 		}
1370 
1371 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1372 			assert(secure_origin);
1373 			return spmd_ffa_error_return(handle,
1374 					FFA_ERROR_DENIED);
1375 		}
1376 
1377 		return spmd_smc_forward(smc_fid, secure_origin,
1378 					x1, x2, x3, x4, cookie,
1379 					handle, flags, secure_ffa_version);
1380 		break; /* not reached */
1381 
1382 	case FFA_NORMAL_WORLD_RESUME:
1383 		if (secure_origin && ctx->secure_interrupt_ongoing) {
1384 			spmd_spm_core_sync_exit(0ULL);
1385 		} else {
1386 			return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1387 		}
1388 		break; /* Not reached */
1389 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1390 	case FFA_PARTITION_INFO_GET_REGS_SMC64:
1391 		if (secure_origin) {
1392 			return spmd_el3_populate_logical_partition_info(handle, x1,
1393 								   x2, x3);
1394 		}
1395 
1396 		/* Call only supported with SMCCC 1.2+ */
1397 		if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1398 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1399 		}
1400 
1401 		return spmd_smc_forward(smc_fid, secure_origin,
1402 					x1, x2, x3, x4, cookie,
1403 					handle, flags, secure_ffa_version);
1404 		break; /* Not reached */
1405 #endif
1406 	case FFA_CONSOLE_LOG_SMC32:
1407 	case FFA_CONSOLE_LOG_SMC64:
1408 		/* This interface must not be forwarded to other worlds. */
1409 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1410 		break; /* not reached */
1411 
1412 	case FFA_EL3_INTR_HANDLE:
1413 		if (secure_origin) {
1414 			return spmd_handle_group0_intr_swd(handle);
1415 		} else {
1416 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1417 		}
1418 	case FFA_ABORT_SMC32:
1419 	case FFA_ABORT_SMC64:
1420 		/* This interface must be invoked only by the Secure world */
1421 		if (!secure_origin) {
1422 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1423 		}
1424 
1425 		ERROR("SPMC encountered a fatal error. Aborting now\n");
1426 		panic();
1427 
1428 		/* Not reached. */
1429 		SMC_RET0(handle);
1430 	default:
1431 		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
1432 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1433 	}
1434 }
1435