1 /*
2 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <arch_helpers.h>
14 #include <arch/aarch64/arch_features.h>
15 #include <bl31/bl31.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <common/tbbr/tbbr_img_def.h>
20 #include <lib/el3_runtime/context_mgmt.h>
21 #include <lib/fconf/fconf.h>
22 #include <lib/fconf/fconf_dyn_cfg_getter.h>
23 #include <lib/per_cpu/per_cpu.h>
24 #include <lib/smccc.h>
25 #include <lib/spinlock.h>
26 #include <lib/utils.h>
27 #include <lib/xlat_tables/xlat_tables_v2.h>
28 #include <plat/common/common_def.h>
29 #include <plat/common/platform.h>
30 #include <platform_def.h>
31 #include <services/el3_spmd_logical_sp.h>
32 #include <services/ffa_svc.h>
33 #include <services/spmc_svc.h>
34 #include <services/spmd_svc.h>
35 #include <smccc_helpers.h>
36 #include "spmd_private.h"
37 #if TRANSFER_LIST
38 #include <transfer_list.h>
39 #endif
40
41 /*******************************************************************************
42 * SPM Core context information.
43 ******************************************************************************/
44 static PER_CPU_DEFINE(spmd_spm_core_context_t, spm_core_context);
45
46 /*******************************************************************************
47 * SPM Core attribute information is read from its manifest if the SPMC is not
48 * at EL3. Else, it is populated from the SPMC directly.
49 ******************************************************************************/
50 static spmc_manifest_attribute_t spmc_attrs;
51
52 /*******************************************************************************
53 * FFA version used by nonsecure endpoint.
54 ******************************************************************************/
55 static uint32_t nonsecure_ffa_version;
56
57 /*******************************************************************************
58 * Whether the normal world finished negotiating its version.
59 ******************************************************************************/
60 static bool nonsecure_version_negotiated;
61
62 /*******************************************************************************
63 * FFA version used by SPMC, as seen by the normal world.
64 ******************************************************************************/
65 static uint32_t spmc_nwd_ffa_version;
66
67 /*******************************************************************************
68 * SPM Core entry point information. Discovered on the primary core and reused
69 * on secondary cores.
70 ******************************************************************************/
71 static entry_point_info_t *spmc_ep_info;
72
73 /*******************************************************************************
74 * SPM Core context on current CPU get helper.
75 ******************************************************************************/
spmd_get_context(void)76 spmd_spm_core_context_t *spmd_get_context(void)
77 {
78 return PER_CPU_CUR(spm_core_context);
79 }
80
81 /*******************************************************************************
82 * SPM Core ID getter.
83 ******************************************************************************/
spmd_spmc_id_get(void)84 uint16_t spmd_spmc_id_get(void)
85 {
86 return spmc_attrs.spmc_id;
87 }
88
89 /*******************************************************************************
90 * Static function declaration.
91 ******************************************************************************/
92 static int32_t spmd_init(void);
93 static int spmd_spmc_init(void *pm_addr);
94
95 static uint64_t spmd_smc_forward(uint32_t smc_fid,
96 bool secure_origin,
97 uint64_t x1,
98 uint64_t x2,
99 uint64_t x3,
100 uint64_t x4,
101 void *cookie,
102 void *handle,
103 uint64_t flags,
104 uint32_t secure_ffa_version);
105
106 /******************************************************************************
107 * Builds an SPMD to SPMC direct message request.
108 *****************************************************************************/
spmd_build_spmc_message(gp_regs_t * gpregs,uint8_t target_func,unsigned long long message)109 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
110 unsigned long long message)
111 {
112 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
113 write_ctx_reg(gpregs, CTX_GPREG_X1,
114 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
115 spmd_spmc_id_get());
116 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
117 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
118
119 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */
120 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
121 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
122 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
123 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
124 }
125
126
127 /*******************************************************************************
128 * This function takes an SPMC context pointer and performs a synchronous
129 * SPMC entry.
130 ******************************************************************************/
spmd_spm_core_sync_entry(spmd_spm_core_context_t * spmc_ctx)131 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
132 {
133 uint64_t rc;
134
135 assert(spmc_ctx != NULL);
136
137 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
138
139 /* Restore the context assigned above */
140 #if SPMD_SPM_AT_SEL2
141 cm_el2_sysregs_context_restore(SECURE);
142 cm_el2_sysregs_context_restore_gic(SECURE);
143 #else
144 cm_el1_sysregs_context_restore(SECURE);
145 #endif
146 cm_set_next_eret_context(SECURE);
147
148 /* Enter SPMC */
149 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
150
151 /* Save secure state */
152 #if SPMD_SPM_AT_SEL2
153 cm_el2_sysregs_context_save(SECURE);
154 cm_el2_sysregs_context_save_gic(SECURE);
155 #else
156 cm_el1_sysregs_context_save(SECURE);
157 #endif
158
159 return rc;
160 }
161
162 /*******************************************************************************
163 * This function returns to the place where spmd_spm_core_sync_entry() was
164 * called originally.
165 ******************************************************************************/
spmd_spm_core_sync_exit(uint64_t rc)166 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
167 {
168 spmd_spm_core_context_t *ctx = spmd_get_context();
169
170 /* Get current CPU context from SPMC context */
171 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
172
173 /*
174 * The SPMD must have initiated the original request through a
175 * synchronous entry into SPMC. Jump back to the original C runtime
176 * context with the value of rc in x0;
177 */
178 spmd_spm_core_exit(ctx->c_rt_ctx, rc);
179
180 panic();
181 }
182
spmd_setup_context(unsigned int core_id)183 void spmd_setup_context(unsigned int core_id)
184 {
185 cpu_context_t *cpu_ctx;
186
187 PER_CPU_CUR(spm_core_context)->state = SPMC_STATE_OFF;
188
189 /* Setup an initial cpu context for the SPMC. */
190 cpu_ctx = &(PER_CPU_CUR(spm_core_context)->cpu_ctx);
191 cm_setup_context(cpu_ctx, spmc_ep_info);
192
193 /*
194 * Pass the core linear ID to the SPMC through x4.
195 * (TF-A implementation defined behavior helping
196 * a legacy TOS migration to adopt FF-A).
197 */
198 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
199 }
200
201 /*******************************************************************************
202 * Jump to the SPM Core for the first time.
203 ******************************************************************************/
spmd_init(void)204 static int32_t spmd_init(void)
205 {
206 spmd_spm_core_context_t *ctx = spmd_get_context();
207 uint64_t rc;
208
209 VERBOSE("SPM Core init start.\n");
210
211 /* Primary boot core enters the SPMC for initialization. */
212 ctx->state = SPMC_STATE_ON_PENDING;
213
214 rc = spmd_spm_core_sync_entry(ctx);
215 if (rc != 0ULL) {
216 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
217 return 0;
218 }
219
220 ctx->state = SPMC_STATE_ON;
221
222 VERBOSE("SPM Core init end.\n");
223
224 spmd_logical_sp_set_spmc_initialized();
225 rc = spmd_logical_sp_init();
226 if (rc != 0) {
227 WARN("SPMD Logical partitions failed init.\n");
228 }
229
230 return 1;
231 }
232
233 /*******************************************************************************
234 * spmd_secure_interrupt_handler
235 * Enter the SPMC for further handling of the secure interrupt by the SPMC
236 * itself or a Secure Partition.
237 ******************************************************************************/
spmd_secure_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)238 static uint64_t spmd_secure_interrupt_handler(uint32_t id,
239 uint32_t flags,
240 void *handle,
241 void *cookie)
242 {
243 spmd_spm_core_context_t *ctx = spmd_get_context();
244 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
245 int64_t rc;
246
247 /* Sanity check the security state when the exception was generated */
248 assert(get_interrupt_src_ss(flags) == NON_SECURE);
249
250 /* Sanity check the pointer to this cpu's context */
251 assert(handle == cm_get_context(NON_SECURE));
252
253 /* Save the non-secure context before entering SPMC */
254 #if SPMD_SPM_AT_SEL2
255 cm_el2_sysregs_context_save(NON_SECURE);
256 cm_el2_sysregs_context_save_gic(NON_SECURE);
257 #else
258 cm_el1_sysregs_context_save(NON_SECURE);
259
260 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
261 /*
262 * The hint bit denoting absence of SVE live state is effectively false
263 * in this scenario where execution was trapped to EL3 due to FIQ.
264 */
265 simd_ctx_save(NON_SECURE, false);
266 simd_ctx_restore(SECURE);
267 #endif
268 #endif
269
270 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
271 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
272 write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
273 write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
274 write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
275 write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
276 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
277 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
278 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
279
280 /* Mark current core as handling a secure interrupt. */
281 ctx->secure_interrupt_ongoing = true;
282
283 rc = spmd_spm_core_sync_entry(ctx);
284
285 if (rc != 0ULL) {
286 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
287 }
288
289 ctx->secure_interrupt_ongoing = false;
290
291 #if SPMD_SPM_AT_SEL2
292 cm_el2_sysregs_context_restore(NON_SECURE);
293 cm_el2_sysregs_context_restore_gic(NON_SECURE);
294 #else
295 cm_el1_sysregs_context_restore(NON_SECURE);
296
297 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
298 simd_ctx_save(SECURE, false);
299 simd_ctx_restore(NON_SECURE);
300 #endif
301 #endif
302 cm_set_next_eret_context(NON_SECURE);
303
304 SMC_RET0(&ctx->cpu_ctx);
305 }
306
307 #if (EL3_EXCEPTION_HANDLING == 0)
308 /*******************************************************************************
309 * spmd_group0_interrupt_handler_nwd
310 *
311 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
312 * handling of the interrupt to the platform handler, and return only upon
313 * successfully handling the Group0 interrupt.
314 *
315 * NOTE: the generic handle_interrupt_exception entry calls
316 * plat_ic_get_pending_interrupt_type to perform a first triage and route to
317 * the corresponding interrupt handler based on the interrupt type.
318 * A registered handler must not assume that the HPPI hasn't changed from the
319 * top level handler until reaching to it. The first thing a handler must do is
320 * attempting to acknowledge the interrupt and process it if it's a valid
321 * INTID. Meanwhile, the interrupt might have been acknowledged by another
322 * PE, or another high priority interrupt got asserted, or any other valid
323 * reason for the HPPI to change. The reasoning is the same for an interrupt
324 * delegated by lower EL through the FFA_EL3_INTR_HANDLE interface.
325 * For a G0 interrupt triggered while secure world runs, the first triage is
326 * done by lower EL e.g. S-EL2 and routes it to EL3 for handling. Once there,
327 * the HPPI might have changed so the same rules as above apply.
328 *
329 ******************************************************************************/
spmd_group0_interrupt_handler_nwd(uint32_t id,uint32_t flags,void * handle,void * cookie)330 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
331 uint32_t flags,
332 void *handle,
333 void *cookie)
334 {
335 uint32_t intid, intr_raw;
336
337 /* Sanity check the security state when the exception was generated. */
338 assert(get_interrupt_src_ss(flags) == NON_SECURE);
339
340 /* Sanity check the pointer to this cpu's context. */
341 assert(handle == cm_get_context(NON_SECURE));
342
343 assert(id == INTR_ID_UNAVAILABLE);
344
345 intr_raw = plat_ic_acknowledge_interrupt();
346 intid = plat_ic_get_interrupt_id(intr_raw);
347
348 if (intid == INTR_ID_UNAVAILABLE) {
349 return 0U;
350 }
351
352 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
353 ERROR("Group0 interrupt %u not handled\n", intid);
354 panic();
355 }
356
357 /* Deactivate the corresponding Group0 interrupt. */
358 plat_ic_end_of_interrupt(intid);
359
360 return 0U;
361 }
362 #endif
363
364 /*******************************************************************************
365 * spmd_handle_group0_intr_swd
366 *
367 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
368 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
369 * interrupt to the platform handler, and returns only upon successfully
370 * handling the Group0 interrupt.
371 *
372 * NOTE: see spmd_group0_interrupt_handler_nwd note section.
373 ******************************************************************************/
spmd_handle_group0_intr_swd(void * handle)374 static uint64_t spmd_handle_group0_intr_swd(void *handle)
375 {
376 uint32_t intid, intr_raw;
377
378 /* Sanity check the pointer to this cpu's context */
379 assert(handle == cm_get_context(SECURE));
380
381 intr_raw = plat_ic_acknowledge_interrupt();
382 intid = plat_ic_get_interrupt_id(intr_raw);
383
384 if (intid == INTR_ID_UNAVAILABLE) {
385 return 0U;
386 }
387
388 /*
389 * TODO: Currently due to a limitation in SPMD implementation, the
390 * platform handler is expected to not delegate handling to NWd while
391 * processing Group0 secure interrupt.
392 */
393 if (plat_spmd_handle_group0_interrupt(intid) < 0) {
394 /* Group0 interrupt was not handled by the platform. */
395 ERROR("Group0 interrupt %u not handled\n", intid);
396 panic();
397 }
398
399 /* Deactivate the corresponding Group0 interrupt. */
400 plat_ic_end_of_interrupt(intid);
401
402 /* Return success. */
403 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
404 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
405 FFA_PARAM_MBZ);
406 }
407
408 #if SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
spmd_dynamic_map_mem(uintptr_t base_addr,size_t size,unsigned int attr,uintptr_t * align_addr,size_t * align_size)409 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
410 unsigned int attr, uintptr_t *align_addr,
411 size_t *align_size)
412 {
413 uintptr_t base_addr_align;
414 size_t mapped_size_align;
415 int rc;
416
417 /* Page aligned address and size if necessary */
418 base_addr_align = page_align(base_addr, DOWN);
419 mapped_size_align = page_align(size, UP);
420
421 if ((base_addr != base_addr_align) &&
422 (size == mapped_size_align)) {
423 mapped_size_align += PAGE_SIZE;
424 }
425
426 /*
427 * Map dynamically given region with its aligned base address and
428 * size
429 */
430 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
431 base_addr_align,
432 mapped_size_align,
433 attr);
434 if (rc == 0) {
435 *align_addr = base_addr_align;
436 *align_size = mapped_size_align;
437 }
438
439 return rc;
440 }
441
spmd_do_sec_cpy(uintptr_t root_base_addr,uintptr_t sec_base_addr,size_t size)442 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
443 size_t size)
444 {
445 uintptr_t root_base_addr_align, sec_base_addr_align;
446 size_t root_mapped_size_align, sec_mapped_size_align;
447 int rc;
448
449 assert(root_base_addr != 0UL);
450 assert(sec_base_addr != 0UL);
451 assert(size != 0UL);
452
453 /* Map the memory with required attributes */
454 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
455 &root_base_addr_align,
456 &root_mapped_size_align);
457 if (rc != 0) {
458 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
459 root_base_addr, rc);
460 panic();
461 }
462
463 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
464 &sec_base_addr_align, &sec_mapped_size_align);
465 if (rc != 0) {
466 ERROR("%s %s %lu (%d)\n", "Error while mapping",
467 "secure region", sec_base_addr, rc);
468 panic();
469 }
470
471 /* Do copy operation */
472 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
473
474 /* Unmap root memory region */
475 rc = mmap_remove_dynamic_region(root_base_addr_align,
476 root_mapped_size_align);
477 if (rc != 0) {
478 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
479 "root region", root_base_addr_align, rc);
480 panic();
481 }
482
483 /* Unmap secure memory region */
484 rc = mmap_remove_dynamic_region(sec_base_addr_align,
485 sec_mapped_size_align);
486 if (rc != 0) {
487 ERROR("%s %s %lu (%d)\n", "Error while unmapping",
488 "secure region", sec_base_addr_align, rc);
489 panic();
490 }
491 }
492 #endif /* SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
493
494 /*******************************************************************************
495 * Loads SPMC manifest and inits SPMC.
496 ******************************************************************************/
spmd_spmc_init(void * pm_addr)497 static int spmd_spmc_init(void *pm_addr)
498 {
499 uint32_t ep_attr, flags;
500 int rc;
501 const struct dyn_cfg_dtb_info_t *image_info __unused;
502
503 /* Load the SPM Core manifest */
504 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
505 if (rc != 0) {
506 WARN("No or invalid SPM Core manifest image provided by BL2\n");
507 return rc;
508 }
509
510 /*
511 * Ensure that the SPM Core version is compatible with the SPM
512 * Dispatcher version.
513 */
514 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
515 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
516 WARN("Unsupported FFA version (%u.%u)\n",
517 spmc_attrs.major_version, spmc_attrs.minor_version);
518 return -EINVAL;
519 }
520
521 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
522 spmc_attrs.minor_version);
523
524 VERBOSE("SPM Core run time EL%x.\n",
525 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
526
527 /* Validate the SPMC ID, Ensure high bit is set */
528 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
529 SPMC_SECURE_ID_MASK) == 0U) {
530 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
531 return -EINVAL;
532 }
533
534 /* Validate the SPM Core execution state */
535 if ((spmc_attrs.exec_state != MODE_RW_64) &&
536 (spmc_attrs.exec_state != MODE_RW_32)) {
537 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
538 spmc_attrs.exec_state);
539 return -EINVAL;
540 }
541
542 VERBOSE("%s%x.\n", "SPM Core execution state 0x",
543 spmc_attrs.exec_state);
544
545 #if SPMD_SPM_AT_SEL2
546 /* Ensure manifest has not requested AArch32 state in S-EL2 */
547 if (spmc_attrs.exec_state == MODE_RW_32) {
548 WARN("AArch32 state at S-EL2 is not supported.\n");
549 return -EINVAL;
550 }
551
552 /*
553 * Check if S-EL2 is supported on this system if S-EL2
554 * is required for SPM
555 */
556 if (!is_feat_sel2_supported()) {
557 WARN("SPM Core run time S-EL2 is not supported.\n");
558 return -EINVAL;
559 }
560 #endif /* SPMD_SPM_AT_SEL2 */
561
562 /* Initialise an entrypoint to set up the CPU context */
563 ep_attr = SECURE | EP_ST_ENABLE;
564 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
565 ep_attr |= EP_EE_BIG;
566 }
567
568 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
569
570 /*
571 * Populate SPSR for SPM Core based upon validated parameters from the
572 * manifest.
573 */
574 if (spmc_attrs.exec_state == MODE_RW_32) {
575 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
576 SPSR_E_LITTLE,
577 DAIF_FIQ_BIT |
578 DAIF_IRQ_BIT |
579 DAIF_ABT_BIT);
580 } else {
581
582 #if SPMD_SPM_AT_SEL2
583 static const uint32_t runtime_el = MODE_EL2;
584 #else
585 static const uint32_t runtime_el = MODE_EL1;
586 #endif
587 spmc_ep_info->spsr = SPSR_64(runtime_el,
588 MODE_SP_ELX,
589 DISABLE_ALL_EXCEPTIONS);
590 }
591
592 #if SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
593 if (is_feat_rme_supported()) {
594 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
595 assert(image_info != NULL);
596
597 if ((image_info->config_addr == 0UL) ||
598 (image_info->secondary_config_addr == 0UL) ||
599 (image_info->config_max_size == 0UL)) {
600 return -EINVAL;
601 }
602
603 /* Copy manifest from root->secure region */
604 spmd_do_sec_cpy(image_info->config_addr,
605 image_info->secondary_config_addr,
606 image_info->config_max_size);
607
608 /* Update ep info of BL32 */
609 assert(spmc_ep_info != NULL);
610 spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
611 }
612 #endif /* SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
613
614 spmd_setup_context(plat_my_core_pos());
615
616 /* Register power management hooks with PSCI */
617 psci_register_spd_pm_hook(&spmd_pm);
618
619 /* Register init function for deferred init. */
620 bl31_register_bl32_init(&spmd_init);
621
622 INFO("SPM Core setup done.\n");
623
624 /*
625 * Register an interrupt handler routing secure interrupts to SPMD
626 * while the NWd is running.
627 */
628 flags = 0;
629 set_interrupt_rm_flag(flags, NON_SECURE);
630 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
631 spmd_secure_interrupt_handler,
632 flags);
633 if (rc != 0) {
634 panic();
635 }
636
637 /*
638 * Permit configurations where the SPM resides at S-EL1/2 and upon a
639 * Group0 interrupt triggering while the normal world runs, the
640 * interrupt is routed either through the EHF or directly to the SPMD:
641 *
642 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
643 * for handling by spmd_group0_interrupt_handler_nwd.
644 *
645 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
646 *
647 */
648 #if (EL3_EXCEPTION_HANDLING == 0)
649 /*
650 * If EL3 interrupts are supported by the platform, register an
651 * interrupt handler routing Group0 interrupts to SPMD while the NWd is
652 * running.
653 */
654 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) {
655 rc = register_interrupt_type_handler(INTR_TYPE_EL3,
656 spmd_group0_interrupt_handler_nwd,
657 flags);
658 if (rc != 0) {
659 panic();
660 }
661 }
662 #endif
663
664 return 0;
665 }
666
667 /*******************************************************************************
668 * Initialize context of SPM Core.
669 ******************************************************************************/
spmd_setup(void)670 int spmd_setup(void)
671 {
672 int rc;
673 void *spmc_manifest;
674 struct transfer_list_header *tl __maybe_unused;
675 struct transfer_list_entry *te __maybe_unused;
676
677 /*
678 * If the SPMC is at EL3, then just initialise it directly. The
679 * shenanigans of when it is at a lower EL are not needed.
680 */
681 if (is_spmc_at_el3()) {
682 /* Allow the SPMC to populate its attributes directly. */
683 spmc_populate_attrs(&spmc_attrs);
684
685 rc = spmc_setup();
686 if (rc != 0) {
687 WARN("SPMC initialisation failed 0x%x.\n", rc);
688 }
689 return 0;
690 }
691
692 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
693 if (spmc_ep_info == NULL) {
694 WARN("No SPM Core image provided by BL2 boot loader.\n");
695 return 0;
696 }
697
698 /* Under no circumstances will this parameter be 0 */
699 assert(spmc_ep_info->pc != 0ULL);
700
701
702 #if TRANSFER_LIST && !RESET_TO_BL31
703 tl = (struct transfer_list_header *)spmc_ep_info->args.arg3;
704 te = transfer_list_find(tl, TL_TAG_DT_SPMC_MANIFEST);
705 if (te == NULL) {
706 WARN("SPM Core manifest absent in TRANSFER_LIST.\n");
707 return -ENOENT;
708 }
709
710 spmc_manifest = (void *)transfer_list_entry_data(te);
711
712 /* Change the DT in the handoff */
713 if (sizeof(spmc_ep_info->args.arg0) == sizeof(uint64_t)) {
714 spmc_ep_info->args.arg0 = (uintptr_t)spmc_manifest;
715 } else {
716 spmc_ep_info->args.arg3 = (uintptr_t)spmc_manifest;
717 }
718 #else
719 /*
720 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
721 * be used as a manifest for the SPM Core at the next lower EL/mode.
722 */
723 spmc_manifest = (void *)spmc_ep_info->args.arg0;
724 #endif
725
726 if (spmc_manifest == NULL) {
727 WARN("Invalid or absent SPM Core manifest.\n");
728 return 0;
729 }
730
731 /* Load manifest, init SPMC */
732 rc = spmd_spmc_init(spmc_manifest);
733 if (rc != 0) {
734 WARN("Booting device without SPM initialization.\n");
735 }
736
737 return 0;
738 }
739
740 /*******************************************************************************
741 * Forward FF-A SMCs to the other security state.
742 ******************************************************************************/
spmd_smc_switch_state(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,uint64_t flags,uint32_t secure_ffa_version)743 uint64_t spmd_smc_switch_state(uint32_t smc_fid,
744 bool secure_origin,
745 uint64_t x1,
746 uint64_t x2,
747 uint64_t x3,
748 uint64_t x4,
749 void *handle,
750 uint64_t flags,
751 uint32_t secure_ffa_version)
752 {
753 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
754 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
755 uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version;
756 uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version;
757 void *ctx_out;
758
759 #if SPMD_SPM_AT_SEL2
760 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) {
761 /*
762 * Set the SVE hint bit in x0 and pass to the lower secure EL,
763 * if it was set by the caller.
764 */
765 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT);
766 }
767 #endif
768
769 /* Save incoming security state */
770 #if SPMD_SPM_AT_SEL2
771 cm_el2_sysregs_context_save(secure_state_in);
772 cm_el2_sysregs_context_save_gic(secure_state_in);
773 #else
774 cm_el1_sysregs_context_save(secure_state_in);
775 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
776 /* Forward the hint bit denoting the absence of SVE live state. */
777 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
778 #endif
779 #endif
780
781 /* Restore outgoing security state */
782 #if SPMD_SPM_AT_SEL2
783 cm_el2_sysregs_context_restore(secure_state_out);
784 cm_el2_sysregs_context_restore_gic(secure_state_out);
785 #else
786 cm_el1_sysregs_context_restore(secure_state_out);
787 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
788 simd_ctx_restore(secure_state_out);
789 #endif
790 #endif
791 cm_set_next_eret_context(secure_state_out);
792
793 ctx_out = cm_get_context(secure_state_out);
794 if (smc_fid == FFA_NORMAL_WORLD_RESUME) {
795 SMC_RET0(ctx_out);
796 }
797
798 if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) {
799 if (version_in < MAKE_FFA_VERSION(U(1), U(2))) {
800 /* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */
801 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
802 SMC_GET_GP(handle, CTX_GPREG_X5),
803 SMC_GET_GP(handle, CTX_GPREG_X6),
804 SMC_GET_GP(handle, CTX_GPREG_X7),
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
806 } else {
807 /* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */
808 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
809 SMC_GET_GP(handle, CTX_GPREG_X5),
810 SMC_GET_GP(handle, CTX_GPREG_X6),
811 SMC_GET_GP(handle, CTX_GPREG_X7),
812 SMC_GET_GP(handle, CTX_GPREG_X8),
813 SMC_GET_GP(handle, CTX_GPREG_X9),
814 SMC_GET_GP(handle, CTX_GPREG_X10),
815 SMC_GET_GP(handle, CTX_GPREG_X11),
816 SMC_GET_GP(handle, CTX_GPREG_X12),
817 SMC_GET_GP(handle, CTX_GPREG_X13),
818 SMC_GET_GP(handle, CTX_GPREG_X14),
819 SMC_GET_GP(handle, CTX_GPREG_X15),
820 SMC_GET_GP(handle, CTX_GPREG_X16),
821 SMC_GET_GP(handle, CTX_GPREG_X17)
822 );
823 }
824 } else {
825 /* 32 bit call or dest has FFA version < 1.2 or unknown */
826 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4,
827 SMC_GET_GP(handle, CTX_GPREG_X5),
828 SMC_GET_GP(handle, CTX_GPREG_X6),
829 SMC_GET_GP(handle, CTX_GPREG_X7));
830 }
831 }
832
833 /*******************************************************************************
834 * Forward SMCs to the other security state.
835 ******************************************************************************/
spmd_smc_forward(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags,uint32_t secure_ffa_version)836 static uint64_t spmd_smc_forward(uint32_t smc_fid,
837 bool secure_origin,
838 uint64_t x1,
839 uint64_t x2,
840 uint64_t x3,
841 uint64_t x4,
842 void *cookie,
843 void *handle,
844 uint64_t flags,
845 uint32_t secure_ffa_version)
846 {
847 if (is_spmc_at_el3() && !secure_origin) {
848 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
849 cookie, handle, flags);
850 }
851
852 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
853 handle, flags, secure_ffa_version);
854
855 }
856
857 /*******************************************************************************
858 * Return FFA_ERROR with specified error code
859 ******************************************************************************/
spmd_ffa_error_return(void * handle,int error_code)860 uint64_t spmd_ffa_error_return(void *handle, int error_code)
861 {
862 SMC_RET8(handle, (uint32_t) FFA_ERROR,
863 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
864 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
865 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
866 }
867
868 /*******************************************************************************
869 * spmd_check_address_in_binary_image
870 ******************************************************************************/
spmd_check_address_in_binary_image(uint64_t address)871 bool spmd_check_address_in_binary_image(uint64_t address)
872 {
873 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
874
875 return ((address >= spmc_attrs.load_address) &&
876 (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
877 }
878
879 /******************************************************************************
880 * spmd_is_spmc_message
881 *****************************************************************************/
spmd_is_spmc_message(unsigned int ep)882 static bool spmd_is_spmc_message(unsigned int ep)
883 {
884 if (is_spmc_at_el3()) {
885 return false;
886 }
887
888 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
889 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
890 }
891
892 /*******************************************************************************
893 * This function forwards FF-A SMCs to either the main SPMD handler or the
894 * SPMC at EL3, depending on the origin security state, if enabled.
895 ******************************************************************************/
spmd_ffa_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)896 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
897 uint64_t x1,
898 uint64_t x2,
899 uint64_t x3,
900 uint64_t x4,
901 void *cookie,
902 void *handle,
903 uint64_t flags)
904 {
905 if (is_spmc_at_el3()) {
906 /*
907 * If we have an SPMC at EL3 allow handling of the SMC first.
908 * The SPMC will call back through to SPMD handler if required.
909 */
910 if (is_caller_secure(flags)) {
911 return spmc_smc_handler(smc_fid,
912 is_caller_secure(flags),
913 x1, x2, x3, x4, cookie,
914 handle, flags);
915 }
916 }
917 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
918 handle, flags, spmc_nwd_ffa_version);
919 }
920
get_common_ffa_version(uint32_t secure_ffa_version)921 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version)
922 {
923 if (secure_ffa_version <= nonsecure_ffa_version) {
924 return secure_ffa_version;
925 } else {
926 return nonsecure_ffa_version;
927 }
928 }
929
930 /*******************************************************************************
931 * This function handles all SMCs in the range reserved for FFA. Each call is
932 * either forwarded to the other security state or handled by the SPM dispatcher
933 ******************************************************************************/
spmd_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags,uint32_t secure_ffa_version)934 uint64_t spmd_smc_handler(uint32_t smc_fid,
935 uint64_t x1,
936 uint64_t x2,
937 uint64_t x3,
938 uint64_t x4,
939 void *cookie,
940 void *handle,
941 uint64_t flags,
942 uint32_t secure_ffa_version)
943 {
944 spmd_spm_core_context_t *ctx = spmd_get_context();
945 bool secure_origin;
946 int ret;
947 uint32_t input_version;
948
949 /* Determine which security state this SMC originated from */
950 secure_origin = is_caller_secure(flags);
951
952 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
953 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
954 plat_my_core_pos(), smc_fid, x1, x2, x3, x4,
955 SMC_GET_GP(handle, CTX_GPREG_X5),
956 SMC_GET_GP(handle, CTX_GPREG_X6),
957 SMC_GET_GP(handle, CTX_GPREG_X7));
958
959 /*
960 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
961 * return, we don't expect any other FF-A ABIs to be called between
962 * calls to FFA_PARTITION_INFO_GET_REGS.
963 */
964 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
965 assert(secure_origin);
966 spmd_spm_core_sync_exit(0ULL);
967 }
968
969 if ((!secure_origin) && (smc_fid != FFA_VERSION)) {
970 /*
971 * Once the caller invokes any FF-A ABI other than FFA_VERSION,
972 * the version negotiation phase is complete.
973 */
974 nonsecure_version_negotiated = true;
975 }
976
977 switch (smc_fid) {
978 case FFA_ERROR:
979 /*
980 * Check if this is the first invocation of this interface on
981 * this CPU. If so, then indicate that the SPM Core initialised
982 * unsuccessfully.
983 */
984 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
985 spmd_spm_core_sync_exit(x2);
986 }
987
988 /*
989 * Perform a synchronous exit:
990 * 1. If there was an SPMD logical partition direct request on-going,
991 * return back to the SPMD logical partition so the error can be
992 * consumed.
993 * 2. SPMC sent FFA_ERROR in response to a power management
994 * operation sent through direct request.
995 */
996 if (is_spmd_logical_sp_dir_req_in_progress(ctx) ||
997 ctx->psci_operation_ongoing) {
998 assert(secure_origin);
999 spmd_spm_core_sync_exit(0ULL);
1000 }
1001
1002 return spmd_smc_forward(smc_fid, secure_origin,
1003 x1, x2, x3, x4, cookie,
1004 handle, flags, secure_ffa_version);
1005 break; /* not reached */
1006
1007 case FFA_VERSION:
1008 input_version = (uint32_t)(0xFFFFFFFF & x1);
1009 /*
1010 * If caller is secure and SPMC was initialized,
1011 * return FFA_VERSION of SPMD.
1012 * If caller is non secure and SPMC was initialized,
1013 * forward to the EL3 SPMC if enabled, otherwise send a
1014 * framework message to the SPMC at the lower EL to
1015 * negotiate a version that is compatible between the
1016 * normal world and the SPMC.
1017 * Sanity check to "input_version".
1018 * If the EL3 SPMC is enabled, ignore the SPMC state as
1019 * this is not used.
1020 */
1021 if ((input_version & FFA_VERSION_BIT31_MASK) ||
1022 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
1023 ret = FFA_ERROR_NOT_SUPPORTED;
1024 } else if (!secure_origin) {
1025 if (!nonsecure_version_negotiated) {
1026 /*
1027 * Once an FF-A version has been negotiated
1028 * between a caller and a callee, the version
1029 * may not be changed for the lifetime of
1030 * the calling component.
1031 */
1032 nonsecure_ffa_version = input_version;
1033 }
1034
1035 if (is_spmc_at_el3()) {
1036 /*
1037 * Forward the call directly to the EL3 SPMC, if
1038 * enabled, as we don't need to wrap the call in
1039 * a direct request.
1040 */
1041 spmc_nwd_ffa_version =
1042 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1043 return spmc_smc_handler(smc_fid, secure_origin,
1044 x1, x2, x3, x4, cookie,
1045 handle, flags);
1046 }
1047
1048 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
1049 uint64_t rc;
1050
1051 if (spmc_attrs.major_version == 1 &&
1052 spmc_attrs.minor_version == 0) {
1053 ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
1054 spmc_attrs.minor_version);
1055 spmc_nwd_ffa_version = (uint32_t)ret;
1056 SMC_RET8(handle, (uint32_t)ret,
1057 FFA_TARGET_INFO_MBZ,
1058 FFA_TARGET_INFO_MBZ,
1059 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1060 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1061 FFA_PARAM_MBZ);
1062 break;
1063 }
1064 /* Save non-secure system registers context */
1065 #if SPMD_SPM_AT_SEL2
1066 cm_el2_sysregs_context_save(NON_SECURE);
1067 cm_el2_sysregs_context_save_gic(NON_SECURE);
1068 #else
1069 cm_el1_sysregs_context_save(NON_SECURE);
1070 #endif
1071
1072 /*
1073 * The incoming request has FFA_VERSION as X0 smc_fid
1074 * and requested version in x1. Prepare a direct request
1075 * from SPMD to SPMC with FFA_VERSION framework function
1076 * identifier in X2 and requested version in X3.
1077 */
1078 spmd_build_spmc_message(gpregs,
1079 SPMD_FWK_MSG_FFA_VERSION_REQ,
1080 input_version);
1081
1082 /*
1083 * Ensure x8-x17 NS GP register values are untouched when returning
1084 * from the SPMC.
1085 */
1086 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
1087 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
1088 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
1089 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
1090 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
1091 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
1092 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
1093 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
1094 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
1095 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
1096
1097 rc = spmd_spm_core_sync_entry(ctx);
1098
1099 if ((rc != 0ULL) ||
1100 (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
1101 FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
1102 (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
1103 (FFA_FWK_MSG_BIT |
1104 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
1105 ERROR("Failed to forward FFA_VERSION\n");
1106 ret = FFA_ERROR_NOT_SUPPORTED;
1107 } else {
1108 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
1109 spmc_nwd_ffa_version = (uint32_t)ret;
1110 }
1111
1112 /*
1113 * x0-x4 are updated by spmd_smc_forward below.
1114 * Zero out x5-x7 in the FFA_VERSION response.
1115 */
1116 write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
1117 write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
1118 write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
1119
1120 /*
1121 * Return here after SPMC has handled FFA_VERSION.
1122 * The returned SPMC version is held in X3.
1123 * Forward this version in X0 to the non-secure caller.
1124 */
1125 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
1126 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1127 FFA_PARAM_MBZ, cookie, gpregs,
1128 flags, spmc_nwd_ffa_version);
1129 } else {
1130 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
1131 FFA_VERSION_MINOR);
1132 }
1133
1134 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
1135 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1136 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1137 break; /* not reached */
1138
1139 case FFA_FEATURES:
1140 /*
1141 * This is an optional interface. Do the minimal checks and
1142 * forward to SPM Core which will handle it if implemented.
1143 */
1144
1145 /* Forward SMC from Normal world to the SPM Core */
1146 if (!secure_origin) {
1147 return spmd_smc_forward(smc_fid, secure_origin,
1148 x1, x2, x3, x4, cookie,
1149 handle, flags, secure_ffa_version);
1150 }
1151
1152 /*
1153 * Return success if call was from secure world i.e. all
1154 * FFA functions are supported. This is essentially a
1155 * nop.
1156 */
1157 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
1158 SMC_GET_GP(handle, CTX_GPREG_X5),
1159 SMC_GET_GP(handle, CTX_GPREG_X6),
1160 SMC_GET_GP(handle, CTX_GPREG_X7));
1161
1162 break; /* not reached */
1163
1164 case FFA_ID_GET:
1165 /*
1166 * Returns the ID of the calling FFA component.
1167 */
1168 if (!secure_origin) {
1169 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1170 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1171 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1172 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1173 FFA_PARAM_MBZ);
1174 }
1175
1176 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1177 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1178 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1179 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1180 FFA_PARAM_MBZ);
1181
1182 break; /* not reached */
1183
1184 case FFA_SECONDARY_EP_REGISTER_SMC64:
1185 if (secure_origin) {
1186 ret = spmd_pm_secondary_ep_register(x1);
1187
1188 if (ret < 0) {
1189 SMC_RET8(handle, FFA_ERROR_SMC64,
1190 FFA_TARGET_INFO_MBZ, ret,
1191 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1192 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1193 FFA_PARAM_MBZ);
1194 } else {
1195 SMC_RET8(handle, FFA_SUCCESS_SMC64,
1196 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1197 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1198 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1199 FFA_PARAM_MBZ);
1200 }
1201 }
1202
1203 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1204 break; /* Not reached */
1205
1206 case FFA_SPM_ID_GET:
1207 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1208 return spmd_ffa_error_return(handle,
1209 FFA_ERROR_NOT_SUPPORTED);
1210 }
1211 /*
1212 * Returns the ID of the SPMC or SPMD depending on the FF-A
1213 * instance where this function is invoked
1214 */
1215 if (!secure_origin) {
1216 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1217 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1218 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1219 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1220 FFA_PARAM_MBZ);
1221 }
1222 SMC_RET8(handle, FFA_SUCCESS_SMC32,
1223 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1224 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1225 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1226 FFA_PARAM_MBZ);
1227
1228 break; /* not reached */
1229
1230 case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1231 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) {
1232 /* Call not supported at this version */
1233 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1234 }
1235 /* fallthrough */
1236 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1237 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1238 /*
1239 * Regardless of secure_origin, SPMD logical partitions cannot
1240 * handle direct messages. They can only initiate direct
1241 * messages and consume direct responses or errors.
1242 */
1243 if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1244 is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1245 return spmd_ffa_error_return(handle,
1246 FFA_ERROR_INVALID_PARAMETER
1247 );
1248 }
1249
1250 /*
1251 * When there is an ongoing SPMD logical partition direct
1252 * request, there cannot be another direct request. Return
1253 * error in this case. Panic'ing is an option but that does
1254 * not provide the opportunity for caller to abort based on
1255 * error codes.
1256 */
1257 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1258 assert(secure_origin);
1259 return spmd_ffa_error_return(handle,
1260 FFA_ERROR_DENIED);
1261 }
1262
1263 if (!secure_origin) {
1264 /* Validate source endpoint is non-secure for non-secure caller. */
1265 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1266 return spmd_ffa_error_return(handle,
1267 FFA_ERROR_INVALID_PARAMETER);
1268 }
1269 }
1270 if (secure_origin && spmd_is_spmc_message(x1)) {
1271 return spmd_ffa_error_return(handle,
1272 FFA_ERROR_DENIED);
1273 } else {
1274 /* Forward direct message to the other world */
1275 return spmd_smc_forward(smc_fid, secure_origin,
1276 x1, x2, x3, x4, cookie,
1277 handle, flags, secure_ffa_version);
1278 }
1279 break; /* Not reached */
1280
1281 case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1282 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) {
1283 /* Call not supported at this version */
1284 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1285 }
1286 /* fallthrough */
1287 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1288 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1289 if (secure_origin && (spmd_is_spmc_message(x1) ||
1290 is_spmd_logical_sp_dir_req_in_progress(ctx))) {
1291 spmd_spm_core_sync_exit(0ULL);
1292 } else {
1293 /* Forward direct message to the other world */
1294 return spmd_smc_forward(smc_fid, secure_origin,
1295 x1, x2, x3, x4, cookie,
1296 handle, flags, secure_ffa_version);
1297 }
1298 break; /* Not reached */
1299 case FFA_RX_RELEASE:
1300 case FFA_RXTX_MAP_SMC32:
1301 case FFA_RXTX_MAP_SMC64:
1302 case FFA_RXTX_UNMAP:
1303 case FFA_PARTITION_INFO_GET:
1304 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1305 case FFA_NOTIFICATION_BITMAP_CREATE:
1306 case FFA_NOTIFICATION_BITMAP_DESTROY:
1307 case FFA_NOTIFICATION_BIND:
1308 case FFA_NOTIFICATION_UNBIND:
1309 case FFA_NOTIFICATION_SET:
1310 case FFA_NOTIFICATION_GET:
1311 case FFA_NOTIFICATION_INFO_GET:
1312 case FFA_NOTIFICATION_INFO_GET_SMC64:
1313 case FFA_MSG_SEND2:
1314 case FFA_RX_ACQUIRE:
1315 case FFA_NS_RES_INFO_GET_SMC64:
1316 #endif
1317 case FFA_MSG_RUN:
1318 /*
1319 * Above calls should be invoked only by the Normal world and
1320 * must not be forwarded from Secure world to Normal world.
1321 */
1322 if (secure_origin) {
1323 return spmd_ffa_error_return(handle,
1324 FFA_ERROR_NOT_SUPPORTED);
1325 }
1326
1327 /* Forward the call to the other world */
1328 /* fallthrough */
1329 case FFA_MSG_SEND:
1330 case FFA_MEM_DONATE_SMC32:
1331 case FFA_MEM_DONATE_SMC64:
1332 case FFA_MEM_LEND_SMC32:
1333 case FFA_MEM_LEND_SMC64:
1334 case FFA_MEM_SHARE_SMC32:
1335 case FFA_MEM_SHARE_SMC64:
1336 case FFA_MEM_RETRIEVE_REQ_SMC32:
1337 case FFA_MEM_RETRIEVE_REQ_SMC64:
1338 case FFA_MEM_RETRIEVE_RESP:
1339 case FFA_MEM_RELINQUISH:
1340 case FFA_MEM_RECLAIM:
1341 case FFA_MEM_FRAG_TX:
1342 case FFA_MEM_FRAG_RX:
1343 case FFA_SUCCESS_SMC32:
1344 case FFA_SUCCESS_SMC64:
1345 /*
1346 * If there is an ongoing direct request from an SPMD logical
1347 * partition, return an error.
1348 */
1349 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1350 assert(secure_origin);
1351 return spmd_ffa_error_return(handle,
1352 FFA_ERROR_DENIED);
1353 }
1354
1355 return spmd_smc_forward(smc_fid, secure_origin,
1356 x1, x2, x3, x4, cookie,
1357 handle, flags, secure_ffa_version);
1358 break; /* not reached */
1359
1360 case FFA_MSG_WAIT:
1361 /*
1362 * Check if this is the first invocation of this interface on
1363 * this CPU from the Secure world. If so, then indicate that the
1364 * SPM Core initialised successfully.
1365 */
1366 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
1367 spmd_spm_core_sync_exit(0ULL);
1368 }
1369
1370 /* Forward the call to the other world */
1371 /* fallthrough */
1372 case FFA_INTERRUPT:
1373 case FFA_MSG_YIELD:
1374 /* This interface must be invoked only by the Secure world */
1375 if (!secure_origin) {
1376 return spmd_ffa_error_return(handle,
1377 FFA_ERROR_NOT_SUPPORTED);
1378 }
1379
1380 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1381 assert(secure_origin);
1382 return spmd_ffa_error_return(handle,
1383 FFA_ERROR_DENIED);
1384 }
1385
1386 return spmd_smc_forward(smc_fid, secure_origin,
1387 x1, x2, x3, x4, cookie,
1388 handle, flags, secure_ffa_version);
1389 break; /* not reached */
1390
1391 case FFA_NORMAL_WORLD_RESUME:
1392 if (secure_origin && ctx->secure_interrupt_ongoing) {
1393 spmd_spm_core_sync_exit(0ULL);
1394 } else {
1395 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1396 }
1397 break; /* Not reached */
1398 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1399 case FFA_PARTITION_INFO_GET_REGS_SMC64:
1400 if (secure_origin) {
1401 return spmd_el3_populate_logical_partition_info(handle, x1,
1402 x2, x3);
1403 }
1404
1405 /* Call only supported with SMCCC 1.2+ */
1406 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1407 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1408 }
1409
1410 return spmd_smc_forward(smc_fid, secure_origin,
1411 x1, x2, x3, x4, cookie,
1412 handle, flags, secure_ffa_version);
1413 break; /* Not reached */
1414 #endif
1415 case FFA_CONSOLE_LOG_SMC32:
1416 case FFA_CONSOLE_LOG_SMC64:
1417 /* This interface must not be forwarded to other worlds. */
1418 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1419 break; /* not reached */
1420
1421 case FFA_EL3_INTR_HANDLE:
1422 if (secure_origin) {
1423 return spmd_handle_group0_intr_swd(handle);
1424 } else {
1425 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1426 }
1427 case FFA_ABORT_SMC32:
1428 case FFA_ABORT_SMC64:
1429 /* This interface must be invoked only by the Secure world */
1430 if (!secure_origin) {
1431 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1432 }
1433
1434 ERROR("SPMC encountered a fatal error. Aborting now\n");
1435 panic();
1436
1437 /* Not reached. */
1438 SMC_RET0(handle);
1439 default:
1440 WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
1441 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1442 }
1443 }
1444