1bdd2596dSAchin Gupta /* 20cea2ae0SManish V Badarkhe * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved. 3bdd2596dSAchin Gupta * 4bdd2596dSAchin Gupta * SPDX-License-Identifier: BSD-3-Clause 5bdd2596dSAchin Gupta */ 6bdd2596dSAchin Gupta 7bdd2596dSAchin Gupta #include <assert.h> 8bdd2596dSAchin Gupta #include <errno.h> 94ce3e99aSScott Branden #include <inttypes.h> 104ce3e99aSScott Branden #include <stdint.h> 11bdd2596dSAchin Gupta #include <string.h> 12bdd2596dSAchin Gupta 13bdd2596dSAchin Gupta #include <arch_helpers.h> 1452696946SOlivier Deprez #include <arch/aarch64/arch_features.h> 15bdd2596dSAchin Gupta #include <bl31/bl31.h> 168cb99c3fSOlivier Deprez #include <bl31/interrupt_mgmt.h> 17bdd2596dSAchin Gupta #include <common/debug.h> 18bdd2596dSAchin Gupta #include <common/runtime_svc.h> 190cea2ae0SManish V Badarkhe #include <common/tbbr/tbbr_img_def.h> 20bdd2596dSAchin Gupta #include <lib/el3_runtime/context_mgmt.h> 210cea2ae0SManish V Badarkhe #include <lib/fconf/fconf.h> 220cea2ae0SManish V Badarkhe #include <lib/fconf/fconf_dyn_cfg_getter.h> 23bdd2596dSAchin Gupta #include <lib/smccc.h> 24bdd2596dSAchin Gupta #include <lib/spinlock.h> 25bdd2596dSAchin Gupta #include <lib/utils.h> 260cea2ae0SManish V Badarkhe #include <lib/xlat_tables/xlat_tables_v2.h> 27bdd2596dSAchin Gupta #include <plat/common/common_def.h> 28bdd2596dSAchin Gupta #include <plat/common/platform.h> 29bdd2596dSAchin Gupta #include <platform_def.h> 30662af36dSJ-Alves #include <services/ffa_svc.h> 316da76075SMarc Bonnici #include <services/spmc_svc.h> 32bdd2596dSAchin Gupta #include <services/spmd_svc.h> 33bdd2596dSAchin Gupta #include <smccc_helpers.h> 34bdd2596dSAchin Gupta #include "spmd_private.h" 35bdd2596dSAchin Gupta 36bdd2596dSAchin Gupta /******************************************************************************* 37bdd2596dSAchin Gupta * SPM Core context information. 38bdd2596dSAchin Gupta ******************************************************************************/ 3952696946SOlivier Deprez static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 40bdd2596dSAchin Gupta 41bdd2596dSAchin Gupta /******************************************************************************* 426da76075SMarc Bonnici * SPM Core attribute information is read from its manifest if the SPMC is not 436da76075SMarc Bonnici * at EL3. Else, it is populated from the SPMC directly. 44bdd2596dSAchin Gupta ******************************************************************************/ 4552696946SOlivier Deprez static spmc_manifest_attribute_t spmc_attrs; 460f14d02fSMax Shvetsov 470f14d02fSMax Shvetsov /******************************************************************************* 480f14d02fSMax Shvetsov * SPM Core entry point information. Discovered on the primary core and reused 490f14d02fSMax Shvetsov * on secondary cores. 500f14d02fSMax Shvetsov ******************************************************************************/ 510f14d02fSMax Shvetsov static entry_point_info_t *spmc_ep_info; 520f14d02fSMax Shvetsov 530f14d02fSMax Shvetsov /******************************************************************************* 5402d50bb0SOlivier Deprez * SPM Core context on CPU based on mpidr. 5502d50bb0SOlivier Deprez ******************************************************************************/ 5602d50bb0SOlivier Deprez spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 5702d50bb0SOlivier Deprez { 58f7fb0bf7SMax Shvetsov int core_idx = plat_core_pos_by_mpidr(mpidr); 59f7fb0bf7SMax Shvetsov 60f7fb0bf7SMax Shvetsov if (core_idx < 0) { 614ce3e99aSScott Branden ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx); 62f7fb0bf7SMax Shvetsov panic(); 63f7fb0bf7SMax Shvetsov } 64f7fb0bf7SMax Shvetsov 65f7fb0bf7SMax Shvetsov return &spm_core_context[core_idx]; 6602d50bb0SOlivier Deprez } 6702d50bb0SOlivier Deprez 6802d50bb0SOlivier Deprez /******************************************************************************* 6952696946SOlivier Deprez * SPM Core context on current CPU get helper. 7052696946SOlivier Deprez ******************************************************************************/ 7152696946SOlivier Deprez spmd_spm_core_context_t *spmd_get_context(void) 7252696946SOlivier Deprez { 7302d50bb0SOlivier Deprez return spmd_get_context_by_mpidr(read_mpidr()); 7452696946SOlivier Deprez } 7552696946SOlivier Deprez 7652696946SOlivier Deprez /******************************************************************************* 77a92bc73bSOlivier Deprez * SPM Core ID getter. 78a92bc73bSOlivier Deprez ******************************************************************************/ 79a92bc73bSOlivier Deprez uint16_t spmd_spmc_id_get(void) 80a92bc73bSOlivier Deprez { 81a92bc73bSOlivier Deprez return spmc_attrs.spmc_id; 82a92bc73bSOlivier Deprez } 83a92bc73bSOlivier Deprez 84a92bc73bSOlivier Deprez /******************************************************************************* 850f14d02fSMax Shvetsov * Static function declaration. 860f14d02fSMax Shvetsov ******************************************************************************/ 870f14d02fSMax Shvetsov static int32_t spmd_init(void); 8823d5ba86SOlivier Deprez static int spmd_spmc_init(void *pm_addr); 89662af36dSJ-Alves static uint64_t spmd_ffa_error_return(void *handle, 9052696946SOlivier Deprez int error_code); 9152696946SOlivier Deprez static uint64_t spmd_smc_forward(uint32_t smc_fid, 9252696946SOlivier Deprez bool secure_origin, 9352696946SOlivier Deprez uint64_t x1, 9452696946SOlivier Deprez uint64_t x2, 9552696946SOlivier Deprez uint64_t x3, 9652696946SOlivier Deprez uint64_t x4, 97bb01a673SMarc Bonnici void *cookie, 98bb01a673SMarc Bonnici void *handle, 99bb01a673SMarc Bonnici uint64_t flags); 100bdd2596dSAchin Gupta 1019944f557SDaniel Boulby /****************************************************************************** 1029944f557SDaniel Boulby * Builds an SPMD to SPMC direct message request. 1039944f557SDaniel Boulby *****************************************************************************/ 1049944f557SDaniel Boulby void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 1059944f557SDaniel Boulby unsigned long long message) 1069944f557SDaniel Boulby { 1079944f557SDaniel Boulby write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 1089944f557SDaniel Boulby write_ctx_reg(gpregs, CTX_GPREG_X1, 1099944f557SDaniel Boulby (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 1109944f557SDaniel Boulby spmd_spmc_id_get()); 1119944f557SDaniel Boulby write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 1129944f557SDaniel Boulby write_ctx_reg(gpregs, CTX_GPREG_X3, message); 1139944f557SDaniel Boulby } 1149944f557SDaniel Boulby 1159944f557SDaniel Boulby 116bdd2596dSAchin Gupta /******************************************************************************* 11752696946SOlivier Deprez * This function takes an SPMC context pointer and performs a synchronous 11852696946SOlivier Deprez * SPMC entry. 119bdd2596dSAchin Gupta ******************************************************************************/ 120bdd2596dSAchin Gupta uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 121bdd2596dSAchin Gupta { 122bdd2596dSAchin Gupta uint64_t rc; 123bdd2596dSAchin Gupta 124bdd2596dSAchin Gupta assert(spmc_ctx != NULL); 125bdd2596dSAchin Gupta 126bdd2596dSAchin Gupta cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 127bdd2596dSAchin Gupta 128bdd2596dSAchin Gupta /* Restore the context assigned above */ 129033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 13028f39f02SMax Shvetsov cm_el2_sysregs_context_restore(SECURE); 131678ce223SOlivier Deprez #else 132678ce223SOlivier Deprez cm_el1_sysregs_context_restore(SECURE); 133033039f8SMax Shvetsov #endif 134bdd2596dSAchin Gupta cm_set_next_eret_context(SECURE); 135bdd2596dSAchin Gupta 136033039f8SMax Shvetsov /* Enter SPMC */ 137bdd2596dSAchin Gupta rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 138bdd2596dSAchin Gupta 139bdd2596dSAchin Gupta /* Save secure state */ 140033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 14128f39f02SMax Shvetsov cm_el2_sysregs_context_save(SECURE); 142678ce223SOlivier Deprez #else 143678ce223SOlivier Deprez cm_el1_sysregs_context_save(SECURE); 144033039f8SMax Shvetsov #endif 145bdd2596dSAchin Gupta 146bdd2596dSAchin Gupta return rc; 147bdd2596dSAchin Gupta } 148bdd2596dSAchin Gupta 149bdd2596dSAchin Gupta /******************************************************************************* 15052696946SOlivier Deprez * This function returns to the place where spmd_spm_core_sync_entry() was 151bdd2596dSAchin Gupta * called originally. 152bdd2596dSAchin Gupta ******************************************************************************/ 153bdd2596dSAchin Gupta __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 154bdd2596dSAchin Gupta { 15552696946SOlivier Deprez spmd_spm_core_context_t *ctx = spmd_get_context(); 156bdd2596dSAchin Gupta 15752696946SOlivier Deprez /* Get current CPU context from SPMC context */ 158bdd2596dSAchin Gupta assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 159bdd2596dSAchin Gupta 160bdd2596dSAchin Gupta /* 161bdd2596dSAchin Gupta * The SPMD must have initiated the original request through a 162bdd2596dSAchin Gupta * synchronous entry into SPMC. Jump back to the original C runtime 163bdd2596dSAchin Gupta * context with the value of rc in x0; 164bdd2596dSAchin Gupta */ 165bdd2596dSAchin Gupta spmd_spm_core_exit(ctx->c_rt_ctx, rc); 166bdd2596dSAchin Gupta 167bdd2596dSAchin Gupta panic(); 168bdd2596dSAchin Gupta } 169bdd2596dSAchin Gupta 170bdd2596dSAchin Gupta /******************************************************************************* 17152696946SOlivier Deprez * Jump to the SPM Core for the first time. 172bdd2596dSAchin Gupta ******************************************************************************/ 173bdd2596dSAchin Gupta static int32_t spmd_init(void) 174bdd2596dSAchin Gupta { 17552696946SOlivier Deprez spmd_spm_core_context_t *ctx = spmd_get_context(); 17652696946SOlivier Deprez uint64_t rc; 177bdd2596dSAchin Gupta 17852696946SOlivier Deprez VERBOSE("SPM Core init start.\n"); 1799dcf63ddSOlivier Deprez 180f2dcf418SOlivier Deprez /* Primary boot core enters the SPMC for initialization. */ 181f2dcf418SOlivier Deprez ctx->state = SPMC_STATE_ON_PENDING; 182bdd2596dSAchin Gupta 183bdd2596dSAchin Gupta rc = spmd_spm_core_sync_entry(ctx); 18452696946SOlivier Deprez if (rc != 0ULL) { 1854ce3e99aSScott Branden ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 18652696946SOlivier Deprez return 0; 187bdd2596dSAchin Gupta } 188bdd2596dSAchin Gupta 1899dcf63ddSOlivier Deprez ctx->state = SPMC_STATE_ON; 1909dcf63ddSOlivier Deprez 19152696946SOlivier Deprez VERBOSE("SPM Core init end.\n"); 192bdd2596dSAchin Gupta 193bdd2596dSAchin Gupta return 1; 194bdd2596dSAchin Gupta } 195bdd2596dSAchin Gupta 196bdd2596dSAchin Gupta /******************************************************************************* 1978cb99c3fSOlivier Deprez * spmd_secure_interrupt_handler 1988cb99c3fSOlivier Deprez * Enter the SPMC for further handling of the secure interrupt by the SPMC 1998cb99c3fSOlivier Deprez * itself or a Secure Partition. 2008cb99c3fSOlivier Deprez ******************************************************************************/ 2018cb99c3fSOlivier Deprez static uint64_t spmd_secure_interrupt_handler(uint32_t id, 2028cb99c3fSOlivier Deprez uint32_t flags, 2038cb99c3fSOlivier Deprez void *handle, 2048cb99c3fSOlivier Deprez void *cookie) 2058cb99c3fSOlivier Deprez { 2068cb99c3fSOlivier Deprez spmd_spm_core_context_t *ctx = spmd_get_context(); 2078cb99c3fSOlivier Deprez gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 2088cb99c3fSOlivier Deprez unsigned int linear_id = plat_my_core_pos(); 2098cb99c3fSOlivier Deprez int64_t rc; 2108cb99c3fSOlivier Deprez 2118cb99c3fSOlivier Deprez /* Sanity check the security state when the exception was generated */ 2128cb99c3fSOlivier Deprez assert(get_interrupt_src_ss(flags) == NON_SECURE); 2138cb99c3fSOlivier Deprez 2148cb99c3fSOlivier Deprez /* Sanity check the pointer to this cpu's context */ 2158cb99c3fSOlivier Deprez assert(handle == cm_get_context(NON_SECURE)); 2168cb99c3fSOlivier Deprez 2178cb99c3fSOlivier Deprez /* Save the non-secure context before entering SPMC */ 2188cb99c3fSOlivier Deprez cm_el1_sysregs_context_save(NON_SECURE); 2198cb99c3fSOlivier Deprez #if SPMD_SPM_AT_SEL2 2208cb99c3fSOlivier Deprez cm_el2_sysregs_context_save(NON_SECURE); 2218cb99c3fSOlivier Deprez #endif 2228cb99c3fSOlivier Deprez 2238cb99c3fSOlivier Deprez /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 2248cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 2258cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 2268cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 2278cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 2288cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 2298cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 2308cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 2318cb99c3fSOlivier Deprez write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 2328cb99c3fSOlivier Deprez 2338cb99c3fSOlivier Deprez /* Mark current core as handling a secure interrupt. */ 2348cb99c3fSOlivier Deprez ctx->secure_interrupt_ongoing = true; 2358cb99c3fSOlivier Deprez 2368cb99c3fSOlivier Deprez rc = spmd_spm_core_sync_entry(ctx); 2378cb99c3fSOlivier Deprez if (rc != 0ULL) { 2380c23e6f4SOlivier Deprez ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id); 2398cb99c3fSOlivier Deprez } 2408cb99c3fSOlivier Deprez 2418cb99c3fSOlivier Deprez ctx->secure_interrupt_ongoing = false; 2428cb99c3fSOlivier Deprez 2438cb99c3fSOlivier Deprez cm_el1_sysregs_context_restore(NON_SECURE); 2448cb99c3fSOlivier Deprez #if SPMD_SPM_AT_SEL2 2458cb99c3fSOlivier Deprez cm_el2_sysregs_context_restore(NON_SECURE); 2468cb99c3fSOlivier Deprez #endif 2478cb99c3fSOlivier Deprez cm_set_next_eret_context(NON_SECURE); 2488cb99c3fSOlivier Deprez 2498cb99c3fSOlivier Deprez SMC_RET0(&ctx->cpu_ctx); 2508cb99c3fSOlivier Deprez } 2518cb99c3fSOlivier Deprez 252a1e0e871SMadhukar Pappireddy /******************************************************************************* 253a1e0e871SMadhukar Pappireddy * spmd_group0_interrupt_handler_nwd 254a1e0e871SMadhukar Pappireddy * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the 255a1e0e871SMadhukar Pappireddy * handling of the interrupt to the platform handler, and return only upon 256a1e0e871SMadhukar Pappireddy * successfully handling the Group0 interrupt. 257a1e0e871SMadhukar Pappireddy ******************************************************************************/ 258a1e0e871SMadhukar Pappireddy static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id, 259a1e0e871SMadhukar Pappireddy uint32_t flags, 260a1e0e871SMadhukar Pappireddy void *handle, 261a1e0e871SMadhukar Pappireddy void *cookie) 262a1e0e871SMadhukar Pappireddy { 263a1e0e871SMadhukar Pappireddy uint32_t intid; 264a1e0e871SMadhukar Pappireddy 265a1e0e871SMadhukar Pappireddy /* Sanity check the security state when the exception was generated. */ 266a1e0e871SMadhukar Pappireddy assert(get_interrupt_src_ss(flags) == NON_SECURE); 267a1e0e871SMadhukar Pappireddy 268a1e0e871SMadhukar Pappireddy /* Sanity check the pointer to this cpu's context. */ 269a1e0e871SMadhukar Pappireddy assert(handle == cm_get_context(NON_SECURE)); 270a1e0e871SMadhukar Pappireddy 271a1e0e871SMadhukar Pappireddy assert(id == INTR_ID_UNAVAILABLE); 272a1e0e871SMadhukar Pappireddy 273a1e0e871SMadhukar Pappireddy assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 274a1e0e871SMadhukar Pappireddy 275a1e0e871SMadhukar Pappireddy intid = plat_ic_get_pending_interrupt_id(); 276a1e0e871SMadhukar Pappireddy 277a1e0e871SMadhukar Pappireddy if (plat_spmd_handle_group0_interrupt(intid) < 0) { 278a1e0e871SMadhukar Pappireddy ERROR("Group0 interrupt %u not handled\n", intid); 279a1e0e871SMadhukar Pappireddy panic(); 280a1e0e871SMadhukar Pappireddy } 281a1e0e871SMadhukar Pappireddy 282a1e0e871SMadhukar Pappireddy return 0U; 283a1e0e871SMadhukar Pappireddy } 284a1e0e871SMadhukar Pappireddy 285*6671b3d8SMadhukar Pappireddy /******************************************************************************* 286*6671b3d8SMadhukar Pappireddy * spmd_handle_group0_intr_swd 287*6671b3d8SMadhukar Pappireddy * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using 288*6671b3d8SMadhukar Pappireddy * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the 289*6671b3d8SMadhukar Pappireddy * interrupt to the platform handler, and returns only upon successfully 290*6671b3d8SMadhukar Pappireddy * handling the Group0 interrupt. 291*6671b3d8SMadhukar Pappireddy ******************************************************************************/ 292*6671b3d8SMadhukar Pappireddy static uint64_t spmd_handle_group0_intr_swd(void *handle) 293*6671b3d8SMadhukar Pappireddy { 294*6671b3d8SMadhukar Pappireddy uint32_t intid; 295*6671b3d8SMadhukar Pappireddy 296*6671b3d8SMadhukar Pappireddy /* Sanity check the pointer to this cpu's context */ 297*6671b3d8SMadhukar Pappireddy assert(handle == cm_get_context(SECURE)); 298*6671b3d8SMadhukar Pappireddy 299*6671b3d8SMadhukar Pappireddy assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 300*6671b3d8SMadhukar Pappireddy 301*6671b3d8SMadhukar Pappireddy intid = plat_ic_get_pending_interrupt_id(); 302*6671b3d8SMadhukar Pappireddy 303*6671b3d8SMadhukar Pappireddy /* 304*6671b3d8SMadhukar Pappireddy * TODO: Currently due to a limitation in SPMD implementation, the 305*6671b3d8SMadhukar Pappireddy * platform handler is expected to not delegate handling to NWd while 306*6671b3d8SMadhukar Pappireddy * processing Group0 secure interrupt. 307*6671b3d8SMadhukar Pappireddy */ 308*6671b3d8SMadhukar Pappireddy if (plat_spmd_handle_group0_interrupt(intid) < 0) { 309*6671b3d8SMadhukar Pappireddy /* Group0 interrupt was not handled by the platform. */ 310*6671b3d8SMadhukar Pappireddy ERROR("Group0 interrupt %u not handled\n", intid); 311*6671b3d8SMadhukar Pappireddy panic(); 312*6671b3d8SMadhukar Pappireddy } 313*6671b3d8SMadhukar Pappireddy 314*6671b3d8SMadhukar Pappireddy /* Return success. */ 315*6671b3d8SMadhukar Pappireddy SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 316*6671b3d8SMadhukar Pappireddy FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 317*6671b3d8SMadhukar Pappireddy FFA_PARAM_MBZ); 318*6671b3d8SMadhukar Pappireddy } 319*6671b3d8SMadhukar Pappireddy 3200cea2ae0SManish V Badarkhe #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 3210cea2ae0SManish V Badarkhe static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 3220cea2ae0SManish V Badarkhe unsigned int attr, uintptr_t *align_addr, 3230cea2ae0SManish V Badarkhe size_t *align_size) 3240cea2ae0SManish V Badarkhe { 3250cea2ae0SManish V Badarkhe uintptr_t base_addr_align; 3260cea2ae0SManish V Badarkhe size_t mapped_size_align; 3270cea2ae0SManish V Badarkhe int rc; 3280cea2ae0SManish V Badarkhe 3290cea2ae0SManish V Badarkhe /* Page aligned address and size if necessary */ 3300cea2ae0SManish V Badarkhe base_addr_align = page_align(base_addr, DOWN); 3310cea2ae0SManish V Badarkhe mapped_size_align = page_align(size, UP); 3320cea2ae0SManish V Badarkhe 3330cea2ae0SManish V Badarkhe if ((base_addr != base_addr_align) && 3340cea2ae0SManish V Badarkhe (size == mapped_size_align)) { 3350cea2ae0SManish V Badarkhe mapped_size_align += PAGE_SIZE; 3360cea2ae0SManish V Badarkhe } 3370cea2ae0SManish V Badarkhe 3380cea2ae0SManish V Badarkhe /* 3390cea2ae0SManish V Badarkhe * Map dynamically given region with its aligned base address and 3400cea2ae0SManish V Badarkhe * size 3410cea2ae0SManish V Badarkhe */ 3420cea2ae0SManish V Badarkhe rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 3430cea2ae0SManish V Badarkhe base_addr_align, 3440cea2ae0SManish V Badarkhe mapped_size_align, 3450cea2ae0SManish V Badarkhe attr); 3460cea2ae0SManish V Badarkhe if (rc == 0) { 3470cea2ae0SManish V Badarkhe *align_addr = base_addr_align; 3480cea2ae0SManish V Badarkhe *align_size = mapped_size_align; 3490cea2ae0SManish V Badarkhe } 3500cea2ae0SManish V Badarkhe 3510cea2ae0SManish V Badarkhe return rc; 3520cea2ae0SManish V Badarkhe } 3530cea2ae0SManish V Badarkhe 3540cea2ae0SManish V Badarkhe static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 3550cea2ae0SManish V Badarkhe size_t size) 3560cea2ae0SManish V Badarkhe { 3570cea2ae0SManish V Badarkhe uintptr_t root_base_addr_align, sec_base_addr_align; 3580cea2ae0SManish V Badarkhe size_t root_mapped_size_align, sec_mapped_size_align; 3590cea2ae0SManish V Badarkhe int rc; 3600cea2ae0SManish V Badarkhe 3610cea2ae0SManish V Badarkhe assert(root_base_addr != 0UL); 3620cea2ae0SManish V Badarkhe assert(sec_base_addr != 0UL); 3630cea2ae0SManish V Badarkhe assert(size != 0UL); 3640cea2ae0SManish V Badarkhe 3650cea2ae0SManish V Badarkhe /* Map the memory with required attributes */ 3660cea2ae0SManish V Badarkhe rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 3670cea2ae0SManish V Badarkhe &root_base_addr_align, 3680cea2ae0SManish V Badarkhe &root_mapped_size_align); 3690cea2ae0SManish V Badarkhe if (rc != 0) { 3700cea2ae0SManish V Badarkhe ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 3710cea2ae0SManish V Badarkhe root_base_addr, rc); 3720cea2ae0SManish V Badarkhe panic(); 3730cea2ae0SManish V Badarkhe } 3740cea2ae0SManish V Badarkhe 3750cea2ae0SManish V Badarkhe rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 3760cea2ae0SManish V Badarkhe &sec_base_addr_align, &sec_mapped_size_align); 3770cea2ae0SManish V Badarkhe if (rc != 0) { 3780cea2ae0SManish V Badarkhe ERROR("%s %s %lu (%d)\n", "Error while mapping", 3790cea2ae0SManish V Badarkhe "secure region", sec_base_addr, rc); 3800cea2ae0SManish V Badarkhe panic(); 3810cea2ae0SManish V Badarkhe } 3820cea2ae0SManish V Badarkhe 3830cea2ae0SManish V Badarkhe /* Do copy operation */ 3840cea2ae0SManish V Badarkhe (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 3850cea2ae0SManish V Badarkhe 3860cea2ae0SManish V Badarkhe /* Unmap root memory region */ 3870cea2ae0SManish V Badarkhe rc = mmap_remove_dynamic_region(root_base_addr_align, 3880cea2ae0SManish V Badarkhe root_mapped_size_align); 3890cea2ae0SManish V Badarkhe if (rc != 0) { 3900cea2ae0SManish V Badarkhe ERROR("%s %s %lu (%d)\n", "Error while unmapping", 3910cea2ae0SManish V Badarkhe "root region", root_base_addr_align, rc); 3920cea2ae0SManish V Badarkhe panic(); 3930cea2ae0SManish V Badarkhe } 3940cea2ae0SManish V Badarkhe 3950cea2ae0SManish V Badarkhe /* Unmap secure memory region */ 3960cea2ae0SManish V Badarkhe rc = mmap_remove_dynamic_region(sec_base_addr_align, 3970cea2ae0SManish V Badarkhe sec_mapped_size_align); 3980cea2ae0SManish V Badarkhe if (rc != 0) { 3990cea2ae0SManish V Badarkhe ERROR("%s %s %lu (%d)\n", "Error while unmapping", 4000cea2ae0SManish V Badarkhe "secure region", sec_base_addr_align, rc); 4010cea2ae0SManish V Badarkhe panic(); 4020cea2ae0SManish V Badarkhe } 4030cea2ae0SManish V Badarkhe } 4040cea2ae0SManish V Badarkhe #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 4050cea2ae0SManish V Badarkhe 4068cb99c3fSOlivier Deprez /******************************************************************************* 40752696946SOlivier Deprez * Loads SPMC manifest and inits SPMC. 4080f14d02fSMax Shvetsov ******************************************************************************/ 40923d5ba86SOlivier Deprez static int spmd_spmc_init(void *pm_addr) 4100f14d02fSMax Shvetsov { 411f2dcf418SOlivier Deprez cpu_context_t *cpu_ctx; 412f2dcf418SOlivier Deprez unsigned int core_id; 4138cb99c3fSOlivier Deprez uint32_t ep_attr, flags; 41452696946SOlivier Deprez int rc; 4150cea2ae0SManish V Badarkhe const struct dyn_cfg_dtb_info_t *image_info __unused; 4160f14d02fSMax Shvetsov 41752696946SOlivier Deprez /* Load the SPM Core manifest */ 41823d5ba86SOlivier Deprez rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 4190f14d02fSMax Shvetsov if (rc != 0) { 42052696946SOlivier Deprez WARN("No or invalid SPM Core manifest image provided by BL2\n"); 42152696946SOlivier Deprez return rc; 4220f14d02fSMax Shvetsov } 4230f14d02fSMax Shvetsov 4240f14d02fSMax Shvetsov /* 42552696946SOlivier Deprez * Ensure that the SPM Core version is compatible with the SPM 42652696946SOlivier Deprez * Dispatcher version. 4270f14d02fSMax Shvetsov */ 428662af36dSJ-Alves if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 429662af36dSJ-Alves (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 430662af36dSJ-Alves WARN("Unsupported FFA version (%u.%u)\n", 4310f14d02fSMax Shvetsov spmc_attrs.major_version, spmc_attrs.minor_version); 43252696946SOlivier Deprez return -EINVAL; 4330f14d02fSMax Shvetsov } 4340f14d02fSMax Shvetsov 435662af36dSJ-Alves VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 4360f14d02fSMax Shvetsov spmc_attrs.minor_version); 4370f14d02fSMax Shvetsov 43852696946SOlivier Deprez VERBOSE("SPM Core run time EL%x.\n", 439033039f8SMax Shvetsov SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 4400f14d02fSMax Shvetsov 441ac03ac5eSMax Shvetsov /* Validate the SPMC ID, Ensure high bit is set */ 44252696946SOlivier Deprez if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 44352696946SOlivier Deprez SPMC_SECURE_ID_MASK) == 0U) { 44452696946SOlivier Deprez WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 44552696946SOlivier Deprez return -EINVAL; 446ac03ac5eSMax Shvetsov } 447ac03ac5eSMax Shvetsov 44852696946SOlivier Deprez /* Validate the SPM Core execution state */ 4490f14d02fSMax Shvetsov if ((spmc_attrs.exec_state != MODE_RW_64) && 4500f14d02fSMax Shvetsov (spmc_attrs.exec_state != MODE_RW_32)) { 45123d5ba86SOlivier Deprez WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 4520f14d02fSMax Shvetsov spmc_attrs.exec_state); 45352696946SOlivier Deprez return -EINVAL; 4540f14d02fSMax Shvetsov } 4550f14d02fSMax Shvetsov 45623d5ba86SOlivier Deprez VERBOSE("%s%x.\n", "SPM Core execution state 0x", 45723d5ba86SOlivier Deprez spmc_attrs.exec_state); 4580f14d02fSMax Shvetsov 459033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 460033039f8SMax Shvetsov /* Ensure manifest has not requested AArch32 state in S-EL2 */ 461033039f8SMax Shvetsov if (spmc_attrs.exec_state == MODE_RW_32) { 462033039f8SMax Shvetsov WARN("AArch32 state at S-EL2 is not supported.\n"); 46352696946SOlivier Deprez return -EINVAL; 4640f14d02fSMax Shvetsov } 4650f14d02fSMax Shvetsov 4660f14d02fSMax Shvetsov /* 4670f14d02fSMax Shvetsov * Check if S-EL2 is supported on this system if S-EL2 4680f14d02fSMax Shvetsov * is required for SPM 4690f14d02fSMax Shvetsov */ 470623f6140SAndre Przywara if (!is_feat_sel2_supported()) { 47152696946SOlivier Deprez WARN("SPM Core run time S-EL2 is not supported.\n"); 47252696946SOlivier Deprez return -EINVAL; 4730f14d02fSMax Shvetsov } 474033039f8SMax Shvetsov #endif /* SPMD_SPM_AT_SEL2 */ 4750f14d02fSMax Shvetsov 4760f14d02fSMax Shvetsov /* Initialise an entrypoint to set up the CPU context */ 4770f14d02fSMax Shvetsov ep_attr = SECURE | EP_ST_ENABLE; 47852696946SOlivier Deprez if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 4790f14d02fSMax Shvetsov ep_attr |= EP_EE_BIG; 4800f14d02fSMax Shvetsov } 4810f14d02fSMax Shvetsov 4820f14d02fSMax Shvetsov SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 4830f14d02fSMax Shvetsov 4840f14d02fSMax Shvetsov /* 48552696946SOlivier Deprez * Populate SPSR for SPM Core based upon validated parameters from the 48652696946SOlivier Deprez * manifest. 4870f14d02fSMax Shvetsov */ 4880f14d02fSMax Shvetsov if (spmc_attrs.exec_state == MODE_RW_32) { 4890f14d02fSMax Shvetsov spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 4900f14d02fSMax Shvetsov SPSR_E_LITTLE, 4910f14d02fSMax Shvetsov DAIF_FIQ_BIT | 4920f14d02fSMax Shvetsov DAIF_IRQ_BIT | 4930f14d02fSMax Shvetsov DAIF_ABT_BIT); 4940f14d02fSMax Shvetsov } else { 495033039f8SMax Shvetsov 496033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 497033039f8SMax Shvetsov static const uint32_t runtime_el = MODE_EL2; 498033039f8SMax Shvetsov #else 499033039f8SMax Shvetsov static const uint32_t runtime_el = MODE_EL1; 500033039f8SMax Shvetsov #endif 501033039f8SMax Shvetsov spmc_ep_info->spsr = SPSR_64(runtime_el, 5020f14d02fSMax Shvetsov MODE_SP_ELX, 5030f14d02fSMax Shvetsov DISABLE_ALL_EXCEPTIONS); 5040f14d02fSMax Shvetsov } 5050f14d02fSMax Shvetsov 5060cea2ae0SManish V Badarkhe #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 5070cea2ae0SManish V Badarkhe image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 5080cea2ae0SManish V Badarkhe assert(image_info != NULL); 5090cea2ae0SManish V Badarkhe 5100cea2ae0SManish V Badarkhe if ((image_info->config_addr == 0UL) || 5110cea2ae0SManish V Badarkhe (image_info->secondary_config_addr == 0UL) || 5120cea2ae0SManish V Badarkhe (image_info->config_max_size == 0UL)) { 5130cea2ae0SManish V Badarkhe return -EINVAL; 5140cea2ae0SManish V Badarkhe } 5150cea2ae0SManish V Badarkhe 5160cea2ae0SManish V Badarkhe /* Copy manifest from root->secure region */ 5170cea2ae0SManish V Badarkhe spmd_do_sec_cpy(image_info->config_addr, 5180cea2ae0SManish V Badarkhe image_info->secondary_config_addr, 5190cea2ae0SManish V Badarkhe image_info->config_max_size); 5200cea2ae0SManish V Badarkhe 5210cea2ae0SManish V Badarkhe /* Update ep info of BL32 */ 5220cea2ae0SManish V Badarkhe assert(spmc_ep_info != NULL); 5230cea2ae0SManish V Badarkhe spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 5240cea2ae0SManish V Badarkhe #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 5250cea2ae0SManish V Badarkhe 526f2dcf418SOlivier Deprez /* Set an initial SPMC context state for all cores. */ 527f2dcf418SOlivier Deprez for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 528f2dcf418SOlivier Deprez spm_core_context[core_id].state = SPMC_STATE_OFF; 5290f14d02fSMax Shvetsov 530f2dcf418SOlivier Deprez /* Setup an initial cpu context for the SPMC. */ 531f2dcf418SOlivier Deprez cpu_ctx = &spm_core_context[core_id].cpu_ctx; 532f2dcf418SOlivier Deprez cm_setup_context(cpu_ctx, spmc_ep_info); 5330f14d02fSMax Shvetsov 534f2dcf418SOlivier Deprez /* 535f2dcf418SOlivier Deprez * Pass the core linear ID to the SPMC through x4. 536f2dcf418SOlivier Deprez * (TF-A implementation defined behavior helping 537f2dcf418SOlivier Deprez * a legacy TOS migration to adopt FF-A). 538f2dcf418SOlivier Deprez */ 539f2dcf418SOlivier Deprez write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 540f2dcf418SOlivier Deprez } 5410f14d02fSMax Shvetsov 542a334c4e6SOlivier Deprez /* Register power management hooks with PSCI */ 543a334c4e6SOlivier Deprez psci_register_spd_pm_hook(&spmd_pm); 544a334c4e6SOlivier Deprez 5450f14d02fSMax Shvetsov /* Register init function for deferred init. */ 5460f14d02fSMax Shvetsov bl31_register_bl32_init(&spmd_init); 5470f14d02fSMax Shvetsov 548f2dcf418SOlivier Deprez INFO("SPM Core setup done.\n"); 549f2dcf418SOlivier Deprez 5508cb99c3fSOlivier Deprez /* 5518cb99c3fSOlivier Deprez * Register an interrupt handler routing secure interrupts to SPMD 5528cb99c3fSOlivier Deprez * while the NWd is running. 5538cb99c3fSOlivier Deprez */ 5548cb99c3fSOlivier Deprez flags = 0; 5558cb99c3fSOlivier Deprez set_interrupt_rm_flag(flags, NON_SECURE); 5568cb99c3fSOlivier Deprez rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 5578cb99c3fSOlivier Deprez spmd_secure_interrupt_handler, 5588cb99c3fSOlivier Deprez flags); 5598cb99c3fSOlivier Deprez if (rc != 0) { 5608cb99c3fSOlivier Deprez panic(); 5618cb99c3fSOlivier Deprez } 5628cb99c3fSOlivier Deprez 563a1e0e871SMadhukar Pappireddy /* 564a1e0e871SMadhukar Pappireddy * Register an interrupt handler routing Group0 interrupts to SPMD 565a1e0e871SMadhukar Pappireddy * while the NWd is running. 566a1e0e871SMadhukar Pappireddy */ 567a1e0e871SMadhukar Pappireddy rc = register_interrupt_type_handler(INTR_TYPE_EL3, 568a1e0e871SMadhukar Pappireddy spmd_group0_interrupt_handler_nwd, 569a1e0e871SMadhukar Pappireddy flags); 570a1e0e871SMadhukar Pappireddy if (rc != 0) { 571a1e0e871SMadhukar Pappireddy panic(); 572a1e0e871SMadhukar Pappireddy } 5730f14d02fSMax Shvetsov return 0; 5740f14d02fSMax Shvetsov } 5750f14d02fSMax Shvetsov 5760f14d02fSMax Shvetsov /******************************************************************************* 57752696946SOlivier Deprez * Initialize context of SPM Core. 578bdd2596dSAchin Gupta ******************************************************************************/ 5790f14d02fSMax Shvetsov int spmd_setup(void) 580bdd2596dSAchin Gupta { 581bdd2596dSAchin Gupta int rc; 5826da76075SMarc Bonnici void *spmc_manifest; 5836da76075SMarc Bonnici 5846da76075SMarc Bonnici /* 5856da76075SMarc Bonnici * If the SPMC is at EL3, then just initialise it directly. The 5866da76075SMarc Bonnici * shenanigans of when it is at a lower EL are not needed. 5876da76075SMarc Bonnici */ 5886da76075SMarc Bonnici if (is_spmc_at_el3()) { 5896da76075SMarc Bonnici /* Allow the SPMC to populate its attributes directly. */ 5906da76075SMarc Bonnici spmc_populate_attrs(&spmc_attrs); 5916da76075SMarc Bonnici 5926da76075SMarc Bonnici rc = spmc_setup(); 5936da76075SMarc Bonnici if (rc != 0) { 5940d33649eSOlivier Deprez WARN("SPMC initialisation failed 0x%x.\n", rc); 5956da76075SMarc Bonnici } 5960d33649eSOlivier Deprez return 0; 5976da76075SMarc Bonnici } 598bdd2596dSAchin Gupta 599bdd2596dSAchin Gupta spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 60052696946SOlivier Deprez if (spmc_ep_info == NULL) { 60152696946SOlivier Deprez WARN("No SPM Core image provided by BL2 boot loader.\n"); 6020d33649eSOlivier Deprez return 0; 603bdd2596dSAchin Gupta } 604bdd2596dSAchin Gupta 605bdd2596dSAchin Gupta /* Under no circumstances will this parameter be 0 */ 60652696946SOlivier Deprez assert(spmc_ep_info->pc != 0ULL); 607bdd2596dSAchin Gupta 608bdd2596dSAchin Gupta /* 609bdd2596dSAchin Gupta * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 61052696946SOlivier Deprez * be used as a manifest for the SPM Core at the next lower EL/mode. 611bdd2596dSAchin Gupta */ 61223d5ba86SOlivier Deprez spmc_manifest = (void *)spmc_ep_info->args.arg0; 61323d5ba86SOlivier Deprez if (spmc_manifest == NULL) { 6140d33649eSOlivier Deprez WARN("Invalid or absent SPM Core manifest.\n"); 6150d33649eSOlivier Deprez return 0; 616bdd2596dSAchin Gupta } 617bdd2596dSAchin Gupta 6180f14d02fSMax Shvetsov /* Load manifest, init SPMC */ 61923d5ba86SOlivier Deprez rc = spmd_spmc_init(spmc_manifest); 6200f14d02fSMax Shvetsov if (rc != 0) { 62152696946SOlivier Deprez WARN("Booting device without SPM initialization.\n"); 622bdd2596dSAchin Gupta } 623bdd2596dSAchin Gupta 6240d33649eSOlivier Deprez return 0; 6250f14d02fSMax Shvetsov } 6260f14d02fSMax Shvetsov 6270f14d02fSMax Shvetsov /******************************************************************************* 628bb01a673SMarc Bonnici * Forward FF-A SMCs to the other security state. 6290f14d02fSMax Shvetsov ******************************************************************************/ 630bb01a673SMarc Bonnici uint64_t spmd_smc_switch_state(uint32_t smc_fid, 63152696946SOlivier Deprez bool secure_origin, 63252696946SOlivier Deprez uint64_t x1, 63352696946SOlivier Deprez uint64_t x2, 63452696946SOlivier Deprez uint64_t x3, 63552696946SOlivier Deprez uint64_t x4, 63652696946SOlivier Deprez void *handle) 6370f14d02fSMax Shvetsov { 638c2901419SOlivier Deprez unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 639c2901419SOlivier Deprez unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 64093ff138bSOlivier Deprez 6410f14d02fSMax Shvetsov /* Save incoming security state */ 642033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 643678ce223SOlivier Deprez if (secure_state_in == NON_SECURE) { 644678ce223SOlivier Deprez cm_el1_sysregs_context_save(secure_state_in); 645678ce223SOlivier Deprez } 64693ff138bSOlivier Deprez cm_el2_sysregs_context_save(secure_state_in); 647678ce223SOlivier Deprez #else 648678ce223SOlivier Deprez cm_el1_sysregs_context_save(secure_state_in); 649033039f8SMax Shvetsov #endif 6500f14d02fSMax Shvetsov 6510f14d02fSMax Shvetsov /* Restore outgoing security state */ 652033039f8SMax Shvetsov #if SPMD_SPM_AT_SEL2 653678ce223SOlivier Deprez if (secure_state_out == NON_SECURE) { 654678ce223SOlivier Deprez cm_el1_sysregs_context_restore(secure_state_out); 655678ce223SOlivier Deprez } 65693ff138bSOlivier Deprez cm_el2_sysregs_context_restore(secure_state_out); 657678ce223SOlivier Deprez #else 658678ce223SOlivier Deprez cm_el1_sysregs_context_restore(secure_state_out); 659033039f8SMax Shvetsov #endif 66093ff138bSOlivier Deprez cm_set_next_eret_context(secure_state_out); 6610f14d02fSMax Shvetsov 662eaaf517cSRaghu Krishnamurthy #if SPMD_SPM_AT_SEL2 663eaaf517cSRaghu Krishnamurthy /* 664eaaf517cSRaghu Krishnamurthy * If SPMC is at SEL2, save additional registers x8-x17, which may 665eaaf517cSRaghu Krishnamurthy * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS. 666eaaf517cSRaghu Krishnamurthy * Note that technically, all SPMCs can support this, but this code is 667eaaf517cSRaghu Krishnamurthy * under ifdef to minimize breakage in case other SPMCs do not save 668eaaf517cSRaghu Krishnamurthy * and restore x8-x17. 669eaaf517cSRaghu Krishnamurthy * We also need to pass through these registers since not all FF-A ABIs 670eaaf517cSRaghu Krishnamurthy * modify x8-x17, in which case, SMCCC requires that these registers be 671eaaf517cSRaghu Krishnamurthy * preserved, so the SPMD passes through these registers and expects the 672eaaf517cSRaghu Krishnamurthy * SPMC to save and restore (potentially also modify) them. 673eaaf517cSRaghu Krishnamurthy */ 674eaaf517cSRaghu Krishnamurthy SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 675eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X5), 676eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X6), 677eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X7), 678eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X8), 679eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X9), 680eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X10), 681eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X11), 682eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X12), 683eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X13), 684eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X14), 685eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X15), 686eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X16), 687eaaf517cSRaghu Krishnamurthy SMC_GET_GP(handle, CTX_GPREG_X17) 688eaaf517cSRaghu Krishnamurthy ); 689eaaf517cSRaghu Krishnamurthy 690eaaf517cSRaghu Krishnamurthy #else 69193ff138bSOlivier Deprez SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 6920f14d02fSMax Shvetsov SMC_GET_GP(handle, CTX_GPREG_X5), 6930f14d02fSMax Shvetsov SMC_GET_GP(handle, CTX_GPREG_X6), 6940f14d02fSMax Shvetsov SMC_GET_GP(handle, CTX_GPREG_X7)); 695eaaf517cSRaghu Krishnamurthy #endif 6960f14d02fSMax Shvetsov } 6970f14d02fSMax Shvetsov 6980f14d02fSMax Shvetsov /******************************************************************************* 699bb01a673SMarc Bonnici * Forward SMCs to the other security state. 700bb01a673SMarc Bonnici ******************************************************************************/ 701bb01a673SMarc Bonnici static uint64_t spmd_smc_forward(uint32_t smc_fid, 702bb01a673SMarc Bonnici bool secure_origin, 703bb01a673SMarc Bonnici uint64_t x1, 704bb01a673SMarc Bonnici uint64_t x2, 705bb01a673SMarc Bonnici uint64_t x3, 706bb01a673SMarc Bonnici uint64_t x4, 707bb01a673SMarc Bonnici void *cookie, 708bb01a673SMarc Bonnici void *handle, 709bb01a673SMarc Bonnici uint64_t flags) 710bb01a673SMarc Bonnici { 711bb01a673SMarc Bonnici if (is_spmc_at_el3() && !secure_origin) { 712bb01a673SMarc Bonnici return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 713bb01a673SMarc Bonnici cookie, handle, flags); 714bb01a673SMarc Bonnici } 715bb01a673SMarc Bonnici return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 716bb01a673SMarc Bonnici handle); 717bb01a673SMarc Bonnici 718bb01a673SMarc Bonnici } 719bb01a673SMarc Bonnici 720bb01a673SMarc Bonnici /******************************************************************************* 721662af36dSJ-Alves * Return FFA_ERROR with specified error code 7220f14d02fSMax Shvetsov ******************************************************************************/ 723662af36dSJ-Alves static uint64_t spmd_ffa_error_return(void *handle, int error_code) 7240f14d02fSMax Shvetsov { 725e46b2fd2SJ-Alves SMC_RET8(handle, (uint32_t) FFA_ERROR, 726e46b2fd2SJ-Alves FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 727662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 728662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ); 729bdd2596dSAchin Gupta } 730bdd2596dSAchin Gupta 731f0d743dbSOlivier Deprez /******************************************************************************* 732f0d743dbSOlivier Deprez * spmd_check_address_in_binary_image 733f0d743dbSOlivier Deprez ******************************************************************************/ 734f0d743dbSOlivier Deprez bool spmd_check_address_in_binary_image(uint64_t address) 735f0d743dbSOlivier Deprez { 736f0d743dbSOlivier Deprez assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 737f0d743dbSOlivier Deprez 738f0d743dbSOlivier Deprez return ((address >= spmc_attrs.load_address) && 739f0d743dbSOlivier Deprez (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 740f0d743dbSOlivier Deprez } 741f0d743dbSOlivier Deprez 742c2901419SOlivier Deprez /****************************************************************************** 743c2901419SOlivier Deprez * spmd_is_spmc_message 744c2901419SOlivier Deprez *****************************************************************************/ 745c2901419SOlivier Deprez static bool spmd_is_spmc_message(unsigned int ep) 746c2901419SOlivier Deprez { 747bb01a673SMarc Bonnici if (is_spmc_at_el3()) { 748bb01a673SMarc Bonnici return false; 749bb01a673SMarc Bonnici } 750bb01a673SMarc Bonnici 751c2901419SOlivier Deprez return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 752c2901419SOlivier Deprez && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 753c2901419SOlivier Deprez } 754c2901419SOlivier Deprez 755f0d743dbSOlivier Deprez /****************************************************************************** 756f0d743dbSOlivier Deprez * spmd_handle_spmc_message 757f0d743dbSOlivier Deprez *****************************************************************************/ 758a92bc73bSOlivier Deprez static int spmd_handle_spmc_message(unsigned long long msg, 759a92bc73bSOlivier Deprez unsigned long long parm1, unsigned long long parm2, 760a92bc73bSOlivier Deprez unsigned long long parm3, unsigned long long parm4) 761f0d743dbSOlivier Deprez { 762f0d743dbSOlivier Deprez VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 763f0d743dbSOlivier Deprez msg, parm1, parm2, parm3, parm4); 764f0d743dbSOlivier Deprez 765f0d743dbSOlivier Deprez return -EINVAL; 766f0d743dbSOlivier Deprez } 767f0d743dbSOlivier Deprez 768bdd2596dSAchin Gupta /******************************************************************************* 769bb01a673SMarc Bonnici * This function forwards FF-A SMCs to either the main SPMD handler or the 770bb01a673SMarc Bonnici * SPMC at EL3, depending on the origin security state, if enabled. 771bb01a673SMarc Bonnici ******************************************************************************/ 772bb01a673SMarc Bonnici uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 773bb01a673SMarc Bonnici uint64_t x1, 774bb01a673SMarc Bonnici uint64_t x2, 775bb01a673SMarc Bonnici uint64_t x3, 776bb01a673SMarc Bonnici uint64_t x4, 777bb01a673SMarc Bonnici void *cookie, 778bb01a673SMarc Bonnici void *handle, 779bb01a673SMarc Bonnici uint64_t flags) 780bb01a673SMarc Bonnici { 781bb01a673SMarc Bonnici if (is_spmc_at_el3()) { 782bb01a673SMarc Bonnici /* 783bb01a673SMarc Bonnici * If we have an SPMC at EL3 allow handling of the SMC first. 784bb01a673SMarc Bonnici * The SPMC will call back through to SPMD handler if required. 785bb01a673SMarc Bonnici */ 786bb01a673SMarc Bonnici if (is_caller_secure(flags)) { 787bb01a673SMarc Bonnici return spmc_smc_handler(smc_fid, 788bb01a673SMarc Bonnici is_caller_secure(flags), 789bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 790bb01a673SMarc Bonnici handle, flags); 791bb01a673SMarc Bonnici } 792bb01a673SMarc Bonnici } 793bb01a673SMarc Bonnici return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 794bb01a673SMarc Bonnici handle, flags); 795bb01a673SMarc Bonnici } 796bb01a673SMarc Bonnici 797bb01a673SMarc Bonnici /******************************************************************************* 798662af36dSJ-Alves * This function handles all SMCs in the range reserved for FFA. Each call is 799bdd2596dSAchin Gupta * either forwarded to the other security state or handled by the SPM dispatcher 800bdd2596dSAchin Gupta ******************************************************************************/ 80152696946SOlivier Deprez uint64_t spmd_smc_handler(uint32_t smc_fid, 80252696946SOlivier Deprez uint64_t x1, 80352696946SOlivier Deprez uint64_t x2, 80452696946SOlivier Deprez uint64_t x3, 80552696946SOlivier Deprez uint64_t x4, 80652696946SOlivier Deprez void *cookie, 80752696946SOlivier Deprez void *handle, 808bdd2596dSAchin Gupta uint64_t flags) 809bdd2596dSAchin Gupta { 810cdb49d47SOlivier Deprez unsigned int linear_id = plat_my_core_pos(); 81152696946SOlivier Deprez spmd_spm_core_context_t *ctx = spmd_get_context(); 81293ff138bSOlivier Deprez bool secure_origin; 81393ff138bSOlivier Deprez int32_t ret; 8144388f28fSJ-Alves uint32_t input_version; 815bdd2596dSAchin Gupta 816bdd2596dSAchin Gupta /* Determine which security state this SMC originated from */ 81793ff138bSOlivier Deprez secure_origin = is_caller_secure(flags); 818bdd2596dSAchin Gupta 8194ce3e99aSScott Branden VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 8204ce3e99aSScott Branden " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 821cdb49d47SOlivier Deprez linear_id, smc_fid, x1, x2, x3, x4, 822cdb49d47SOlivier Deprez SMC_GET_GP(handle, CTX_GPREG_X5), 823bdd2596dSAchin Gupta SMC_GET_GP(handle, CTX_GPREG_X6), 824bdd2596dSAchin Gupta SMC_GET_GP(handle, CTX_GPREG_X7)); 825bdd2596dSAchin Gupta 826bdd2596dSAchin Gupta switch (smc_fid) { 827662af36dSJ-Alves case FFA_ERROR: 828bdd2596dSAchin Gupta /* 829bdd2596dSAchin Gupta * Check if this is the first invocation of this interface on 83052696946SOlivier Deprez * this CPU. If so, then indicate that the SPM Core initialised 831bdd2596dSAchin Gupta * unsuccessfully. 832bdd2596dSAchin Gupta */ 8339dcf63ddSOlivier Deprez if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 834bdd2596dSAchin Gupta spmd_spm_core_sync_exit(x2); 8350f14d02fSMax Shvetsov } 836bdd2596dSAchin Gupta 83793ff138bSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 838bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 839bb01a673SMarc Bonnici handle, flags); 840bdd2596dSAchin Gupta break; /* not reached */ 841bdd2596dSAchin Gupta 842662af36dSJ-Alves case FFA_VERSION: 8434388f28fSJ-Alves input_version = (uint32_t)(0xFFFFFFFF & x1); 844bdd2596dSAchin Gupta /* 8454388f28fSJ-Alves * If caller is secure and SPMC was initialized, 8464388f28fSJ-Alves * return FFA_VERSION of SPMD. 8474388f28fSJ-Alves * If caller is non secure and SPMC was initialized, 8489576fa93SMarc Bonnici * forward to the EL3 SPMC if enabled, otherwise return 8499576fa93SMarc Bonnici * the SPMC version if implemented at a lower EL. 8504388f28fSJ-Alves * Sanity check to "input_version". 851bb01a673SMarc Bonnici * If the EL3 SPMC is enabled, ignore the SPMC state as 852bb01a673SMarc Bonnici * this is not used. 853bdd2596dSAchin Gupta */ 8544388f28fSJ-Alves if ((input_version & FFA_VERSION_BIT31_MASK) || 855bb01a673SMarc Bonnici (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 8564388f28fSJ-Alves ret = FFA_ERROR_NOT_SUPPORTED; 8574388f28fSJ-Alves } else if (!secure_origin) { 8589576fa93SMarc Bonnici if (is_spmc_at_el3()) { 8599576fa93SMarc Bonnici /* 8609576fa93SMarc Bonnici * Forward the call directly to the EL3 SPMC, if 8619576fa93SMarc Bonnici * enabled, as we don't need to wrap the call in 8629576fa93SMarc Bonnici * a direct request. 8639576fa93SMarc Bonnici */ 8649576fa93SMarc Bonnici return spmd_smc_forward(smc_fid, secure_origin, 8659576fa93SMarc Bonnici x1, x2, x3, x4, cookie, 8669576fa93SMarc Bonnici handle, flags); 8679576fa93SMarc Bonnici } 8689576fa93SMarc Bonnici 8699944f557SDaniel Boulby gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 8709944f557SDaniel Boulby uint64_t rc; 8719944f557SDaniel Boulby 8729944f557SDaniel Boulby if (spmc_attrs.major_version == 1 && 8739944f557SDaniel Boulby spmc_attrs.minor_version == 0) { 874e46b2fd2SJ-Alves ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 875e46b2fd2SJ-Alves spmc_attrs.minor_version); 8769944f557SDaniel Boulby SMC_RET8(handle, (uint32_t)ret, 8779944f557SDaniel Boulby FFA_TARGET_INFO_MBZ, 8789944f557SDaniel Boulby FFA_TARGET_INFO_MBZ, 8799944f557SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 8809944f557SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 8819944f557SDaniel Boulby FFA_PARAM_MBZ); 8829944f557SDaniel Boulby break; 8839944f557SDaniel Boulby } 8849944f557SDaniel Boulby /* Save non-secure system registers context */ 8859944f557SDaniel Boulby cm_el1_sysregs_context_save(NON_SECURE); 8869944f557SDaniel Boulby #if SPMD_SPM_AT_SEL2 8879944f557SDaniel Boulby cm_el2_sysregs_context_save(NON_SECURE); 8889944f557SDaniel Boulby #endif 8899944f557SDaniel Boulby 8909944f557SDaniel Boulby /* 8919944f557SDaniel Boulby * The incoming request has FFA_VERSION as X0 smc_fid 8929944f557SDaniel Boulby * and requested version in x1. Prepare a direct request 8939944f557SDaniel Boulby * from SPMD to SPMC with FFA_VERSION framework function 8949944f557SDaniel Boulby * identifier in X2 and requested version in X3. 8959944f557SDaniel Boulby */ 8969944f557SDaniel Boulby spmd_build_spmc_message(gpregs, 8979944f557SDaniel Boulby SPMD_FWK_MSG_FFA_VERSION_REQ, 8989944f557SDaniel Boulby input_version); 8999944f557SDaniel Boulby 9009944f557SDaniel Boulby rc = spmd_spm_core_sync_entry(ctx); 9019944f557SDaniel Boulby 9029944f557SDaniel Boulby if ((rc != 0ULL) || 9039944f557SDaniel Boulby (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 9049944f557SDaniel Boulby FFA_MSG_SEND_DIRECT_RESP_SMC32) || 9059944f557SDaniel Boulby (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 90659bd2ad8SMarc Bonnici (FFA_FWK_MSG_BIT | 9079944f557SDaniel Boulby SPMD_FWK_MSG_FFA_VERSION_RESP))) { 9089944f557SDaniel Boulby ERROR("Failed to forward FFA_VERSION\n"); 9099944f557SDaniel Boulby ret = FFA_ERROR_NOT_SUPPORTED; 9109944f557SDaniel Boulby } else { 9119944f557SDaniel Boulby ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 9129944f557SDaniel Boulby } 9139944f557SDaniel Boulby 9149944f557SDaniel Boulby /* 9159944f557SDaniel Boulby * Return here after SPMC has handled FFA_VERSION. 9169944f557SDaniel Boulby * The returned SPMC version is held in X3. 9179944f557SDaniel Boulby * Forward this version in X0 to the non-secure caller. 9189944f557SDaniel Boulby */ 9199944f557SDaniel Boulby return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 9209944f557SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 921bb01a673SMarc Bonnici FFA_PARAM_MBZ, cookie, gpregs, 922bb01a673SMarc Bonnici flags); 9234388f28fSJ-Alves } else { 924e46b2fd2SJ-Alves ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 925e46b2fd2SJ-Alves FFA_VERSION_MINOR); 9264388f28fSJ-Alves } 9274388f28fSJ-Alves 928e46b2fd2SJ-Alves SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 929e46b2fd2SJ-Alves FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 930e46b2fd2SJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 931bdd2596dSAchin Gupta break; /* not reached */ 932bdd2596dSAchin Gupta 933662af36dSJ-Alves case FFA_FEATURES: 934bdd2596dSAchin Gupta /* 935bdd2596dSAchin Gupta * This is an optional interface. Do the minimal checks and 93652696946SOlivier Deprez * forward to SPM Core which will handle it if implemented. 937bdd2596dSAchin Gupta */ 938bdd2596dSAchin Gupta 93952696946SOlivier Deprez /* Forward SMC from Normal world to the SPM Core */ 94093ff138bSOlivier Deprez if (!secure_origin) { 94193ff138bSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 942bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 943bb01a673SMarc Bonnici handle, flags); 94452696946SOlivier Deprez } 94552696946SOlivier Deprez 946bdd2596dSAchin Gupta /* 947bdd2596dSAchin Gupta * Return success if call was from secure world i.e. all 948662af36dSJ-Alves * FFA functions are supported. This is essentially a 949bdd2596dSAchin Gupta * nop. 950bdd2596dSAchin Gupta */ 951662af36dSJ-Alves SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 952bdd2596dSAchin Gupta SMC_GET_GP(handle, CTX_GPREG_X5), 953bdd2596dSAchin Gupta SMC_GET_GP(handle, CTX_GPREG_X6), 954bdd2596dSAchin Gupta SMC_GET_GP(handle, CTX_GPREG_X7)); 9550f14d02fSMax Shvetsov 956bdd2596dSAchin Gupta break; /* not reached */ 957bdd2596dSAchin Gupta 958662af36dSJ-Alves case FFA_ID_GET: 959ac03ac5eSMax Shvetsov /* 960662af36dSJ-Alves * Returns the ID of the calling FFA component. 961ac03ac5eSMax Shvetsov */ 962ac03ac5eSMax Shvetsov if (!secure_origin) { 963662af36dSJ-Alves SMC_RET8(handle, FFA_SUCCESS_SMC32, 964662af36dSJ-Alves FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 965662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, 966662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, 967662af36dSJ-Alves FFA_PARAM_MBZ); 96852696946SOlivier Deprez } 96952696946SOlivier Deprez 970662af36dSJ-Alves SMC_RET8(handle, FFA_SUCCESS_SMC32, 971662af36dSJ-Alves FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 972662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, 973662af36dSJ-Alves FFA_PARAM_MBZ, FFA_PARAM_MBZ, 974662af36dSJ-Alves FFA_PARAM_MBZ); 975ac03ac5eSMax Shvetsov 976ac03ac5eSMax Shvetsov break; /* not reached */ 977ac03ac5eSMax Shvetsov 978cdb49d47SOlivier Deprez case FFA_SECONDARY_EP_REGISTER_SMC64: 979cdb49d47SOlivier Deprez if (secure_origin) { 980cdb49d47SOlivier Deprez ret = spmd_pm_secondary_ep_register(x1); 981cdb49d47SOlivier Deprez 982cdb49d47SOlivier Deprez if (ret < 0) { 983cdb49d47SOlivier Deprez SMC_RET8(handle, FFA_ERROR_SMC64, 984cdb49d47SOlivier Deprez FFA_TARGET_INFO_MBZ, ret, 985cdb49d47SOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 986cdb49d47SOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 987cdb49d47SOlivier Deprez FFA_PARAM_MBZ); 988cdb49d47SOlivier Deprez } else { 989cdb49d47SOlivier Deprez SMC_RET8(handle, FFA_SUCCESS_SMC64, 990cdb49d47SOlivier Deprez FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 991cdb49d47SOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 992cdb49d47SOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 993cdb49d47SOlivier Deprez FFA_PARAM_MBZ); 994cdb49d47SOlivier Deprez } 995cdb49d47SOlivier Deprez } 996cdb49d47SOlivier Deprez 997cdb49d47SOlivier Deprez return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 998cdb49d47SOlivier Deprez break; /* Not reached */ 999cdb49d47SOlivier Deprez 100070c121a2SDaniel Boulby case FFA_SPM_ID_GET: 100170c121a2SDaniel Boulby if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 100270c121a2SDaniel Boulby return spmd_ffa_error_return(handle, 100370c121a2SDaniel Boulby FFA_ERROR_NOT_SUPPORTED); 100470c121a2SDaniel Boulby } 100570c121a2SDaniel Boulby /* 100670c121a2SDaniel Boulby * Returns the ID of the SPMC or SPMD depending on the FF-A 100770c121a2SDaniel Boulby * instance where this function is invoked 100870c121a2SDaniel Boulby */ 100970c121a2SDaniel Boulby if (!secure_origin) { 101070c121a2SDaniel Boulby SMC_RET8(handle, FFA_SUCCESS_SMC32, 101170c121a2SDaniel Boulby FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 101270c121a2SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 101370c121a2SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 101470c121a2SDaniel Boulby FFA_PARAM_MBZ); 101570c121a2SDaniel Boulby } 101670c121a2SDaniel Boulby SMC_RET8(handle, FFA_SUCCESS_SMC32, 101770c121a2SDaniel Boulby FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 101870c121a2SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 101970c121a2SDaniel Boulby FFA_PARAM_MBZ, FFA_PARAM_MBZ, 102070c121a2SDaniel Boulby FFA_PARAM_MBZ); 102170c121a2SDaniel Boulby 102270c121a2SDaniel Boulby break; /* not reached */ 102370c121a2SDaniel Boulby 1024f0d743dbSOlivier Deprez case FFA_MSG_SEND_DIRECT_REQ_SMC32: 10255519f07cSShruti case FFA_MSG_SEND_DIRECT_REQ_SMC64: 10265519f07cSShruti if (!secure_origin) { 10275519f07cSShruti /* Validate source endpoint is non-secure for non-secure caller. */ 10285519f07cSShruti if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 10295519f07cSShruti return spmd_ffa_error_return(handle, 10305519f07cSShruti FFA_ERROR_INVALID_PARAMETER); 10315519f07cSShruti } 10325519f07cSShruti } 1033f0d743dbSOlivier Deprez if (secure_origin && spmd_is_spmc_message(x1)) { 1034f0d743dbSOlivier Deprez ret = spmd_handle_spmc_message(x3, x4, 1035f0d743dbSOlivier Deprez SMC_GET_GP(handle, CTX_GPREG_X5), 1036f0d743dbSOlivier Deprez SMC_GET_GP(handle, CTX_GPREG_X6), 1037f0d743dbSOlivier Deprez SMC_GET_GP(handle, CTX_GPREG_X7)); 1038f0d743dbSOlivier Deprez 1039f0d743dbSOlivier Deprez SMC_RET8(handle, FFA_SUCCESS_SMC32, 1040f0d743dbSOlivier Deprez FFA_TARGET_INFO_MBZ, ret, 1041f0d743dbSOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1042f0d743dbSOlivier Deprez FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1043f0d743dbSOlivier Deprez FFA_PARAM_MBZ); 1044f0d743dbSOlivier Deprez } else { 1045f0d743dbSOlivier Deprez /* Forward direct message to the other world */ 1046f0d743dbSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 1047bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 1048bb01a673SMarc Bonnici handle, flags); 1049f0d743dbSOlivier Deprez } 1050f0d743dbSOlivier Deprez break; /* Not reached */ 1051f0d743dbSOlivier Deprez 1052f0d743dbSOlivier Deprez case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1053f0d743dbSOlivier Deprez if (secure_origin && spmd_is_spmc_message(x1)) { 10548cb99c3fSOlivier Deprez spmd_spm_core_sync_exit(0ULL); 1055f0d743dbSOlivier Deprez } else { 1056f0d743dbSOlivier Deprez /* Forward direct message to the other world */ 1057f0d743dbSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 1058bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 1059bb01a673SMarc Bonnici handle, flags); 1060f0d743dbSOlivier Deprez } 1061f0d743dbSOlivier Deprez break; /* Not reached */ 1062f0d743dbSOlivier Deprez 1063662af36dSJ-Alves case FFA_RX_RELEASE: 1064662af36dSJ-Alves case FFA_RXTX_MAP_SMC32: 1065662af36dSJ-Alves case FFA_RXTX_MAP_SMC64: 1066662af36dSJ-Alves case FFA_RXTX_UNMAP: 1067545b8eb3SRuari Phipps case FFA_PARTITION_INFO_GET: 1068fc3f4800SJ-Alves #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1069fc3f4800SJ-Alves case FFA_NOTIFICATION_BITMAP_CREATE: 1070fc3f4800SJ-Alves case FFA_NOTIFICATION_BITMAP_DESTROY: 1071fc3f4800SJ-Alves case FFA_NOTIFICATION_BIND: 1072fc3f4800SJ-Alves case FFA_NOTIFICATION_UNBIND: 1073fc3f4800SJ-Alves case FFA_NOTIFICATION_SET: 1074fc3f4800SJ-Alves case FFA_NOTIFICATION_GET: 1075fc3f4800SJ-Alves case FFA_NOTIFICATION_INFO_GET: 1076fc3f4800SJ-Alves case FFA_NOTIFICATION_INFO_GET_SMC64: 1077c2eba07cSFederico Recanati case FFA_MSG_SEND2: 1078d555233fSFederico Recanati case FFA_RX_ACQUIRE: 1079fc3f4800SJ-Alves #endif 1080662af36dSJ-Alves case FFA_MSG_RUN: 1081c2eba07cSFederico Recanati /* 1082c2eba07cSFederico Recanati * Above calls should be invoked only by the Normal world and 1083c2eba07cSFederico Recanati * must not be forwarded from Secure world to Normal world. 1084c2eba07cSFederico Recanati */ 108593ff138bSOlivier Deprez if (secure_origin) { 1086662af36dSJ-Alves return spmd_ffa_error_return(handle, 1087662af36dSJ-Alves FFA_ERROR_NOT_SUPPORTED); 1088bdd2596dSAchin Gupta } 1089bdd2596dSAchin Gupta 1090e138400dSBoyan Karatotev /* Forward the call to the other world */ 1091e138400dSBoyan Karatotev /* fallthrough */ 1092662af36dSJ-Alves case FFA_MSG_SEND: 1093662af36dSJ-Alves case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1094662af36dSJ-Alves case FFA_MEM_DONATE_SMC32: 1095662af36dSJ-Alves case FFA_MEM_DONATE_SMC64: 1096662af36dSJ-Alves case FFA_MEM_LEND_SMC32: 1097662af36dSJ-Alves case FFA_MEM_LEND_SMC64: 1098662af36dSJ-Alves case FFA_MEM_SHARE_SMC32: 1099662af36dSJ-Alves case FFA_MEM_SHARE_SMC64: 1100662af36dSJ-Alves case FFA_MEM_RETRIEVE_REQ_SMC32: 1101662af36dSJ-Alves case FFA_MEM_RETRIEVE_REQ_SMC64: 1102662af36dSJ-Alves case FFA_MEM_RETRIEVE_RESP: 1103662af36dSJ-Alves case FFA_MEM_RELINQUISH: 1104662af36dSJ-Alves case FFA_MEM_RECLAIM: 1105642db984SMarc Bonnici case FFA_MEM_FRAG_TX: 1106642db984SMarc Bonnici case FFA_MEM_FRAG_RX: 1107662af36dSJ-Alves case FFA_SUCCESS_SMC32: 1108662af36dSJ-Alves case FFA_SUCCESS_SMC64: 1109bdd2596dSAchin Gupta /* 1110bdd2596dSAchin Gupta * TODO: Assume that no requests originate from EL3 at the 1111bdd2596dSAchin Gupta * moment. This will change if a SP service is required in 1112bdd2596dSAchin Gupta * response to secure interrupts targeted to EL3. Until then 1113bdd2596dSAchin Gupta * simply forward the call to the Normal world. 1114bdd2596dSAchin Gupta */ 1115bdd2596dSAchin Gupta 111693ff138bSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 1117bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 1118bb01a673SMarc Bonnici handle, flags); 1119bdd2596dSAchin Gupta break; /* not reached */ 1120bdd2596dSAchin Gupta 1121662af36dSJ-Alves case FFA_MSG_WAIT: 1122bdd2596dSAchin Gupta /* 1123bdd2596dSAchin Gupta * Check if this is the first invocation of this interface on 1124bdd2596dSAchin Gupta * this CPU from the Secure world. If so, then indicate that the 112552696946SOlivier Deprez * SPM Core initialised successfully. 1126bdd2596dSAchin Gupta */ 11279dcf63ddSOlivier Deprez if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 11288cb99c3fSOlivier Deprez spmd_spm_core_sync_exit(0ULL); 1129bdd2596dSAchin Gupta } 1130bdd2596dSAchin Gupta 1131e138400dSBoyan Karatotev /* Forward the call to the other world */ 1132e138400dSBoyan Karatotev /* fallthrough */ 1133386dc365SOlivier Deprez case FFA_INTERRUPT: 1134662af36dSJ-Alves case FFA_MSG_YIELD: 1135bdd2596dSAchin Gupta /* This interface must be invoked only by the Secure world */ 113693ff138bSOlivier Deprez if (!secure_origin) { 1137662af36dSJ-Alves return spmd_ffa_error_return(handle, 1138662af36dSJ-Alves FFA_ERROR_NOT_SUPPORTED); 1139bdd2596dSAchin Gupta } 1140bdd2596dSAchin Gupta 114193ff138bSOlivier Deprez return spmd_smc_forward(smc_fid, secure_origin, 1142bb01a673SMarc Bonnici x1, x2, x3, x4, cookie, 1143bb01a673SMarc Bonnici handle, flags); 1144bdd2596dSAchin Gupta break; /* not reached */ 1145bdd2596dSAchin Gupta 11468cb99c3fSOlivier Deprez case FFA_NORMAL_WORLD_RESUME: 11478cb99c3fSOlivier Deprez if (secure_origin && ctx->secure_interrupt_ongoing) { 11488cb99c3fSOlivier Deprez spmd_spm_core_sync_exit(0ULL); 11498cb99c3fSOlivier Deprez } else { 11508cb99c3fSOlivier Deprez return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 11518cb99c3fSOlivier Deprez } 11528cb99c3fSOlivier Deprez break; /* Not reached */ 1153eaaf517cSRaghu Krishnamurthy #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1154eaaf517cSRaghu Krishnamurthy case FFA_PARTITION_INFO_GET_REGS_SMC64: 1155eaaf517cSRaghu Krishnamurthy if (secure_origin) { 1156eaaf517cSRaghu Krishnamurthy /* TODO: Future patches to enable support for this */ 1157eaaf517cSRaghu Krishnamurthy return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1158eaaf517cSRaghu Krishnamurthy } 11598cb99c3fSOlivier Deprez 1160eaaf517cSRaghu Krishnamurthy /* Call only supported with SMCCC 1.2+ */ 1161eaaf517cSRaghu Krishnamurthy if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1162eaaf517cSRaghu Krishnamurthy return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1163eaaf517cSRaghu Krishnamurthy } 1164eaaf517cSRaghu Krishnamurthy 1165eaaf517cSRaghu Krishnamurthy return spmd_smc_forward(smc_fid, secure_origin, 1166eaaf517cSRaghu Krishnamurthy x1, x2, x3, x4, cookie, 1167eaaf517cSRaghu Krishnamurthy handle, flags); 1168eaaf517cSRaghu Krishnamurthy break; /* Not reached */ 1169eaaf517cSRaghu Krishnamurthy #endif 1170*6671b3d8SMadhukar Pappireddy case FFA_EL3_INTR_HANDLE: 1171*6671b3d8SMadhukar Pappireddy if (secure_origin) { 1172*6671b3d8SMadhukar Pappireddy return spmd_handle_group0_intr_swd(handle); 1173*6671b3d8SMadhukar Pappireddy } else { 1174*6671b3d8SMadhukar Pappireddy return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1175*6671b3d8SMadhukar Pappireddy } 1176bdd2596dSAchin Gupta default: 1177bdd2596dSAchin Gupta WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1178662af36dSJ-Alves return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1179bdd2596dSAchin Gupta } 1180bdd2596dSAchin Gupta } 1181