1 /* 2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 12 #include <lib/el3_runtime/context_mgmt.h> 13 #include <lib/spinlock.h> 14 #include "spmd_private.h" 15 16 static struct { 17 bool secondary_ep_locked; 18 uintptr_t secondary_ep; 19 spinlock_t lock; 20 } g_spmd_pm; 21 22 /******************************************************************************* 23 * spmd_build_spmc_message 24 * 25 * Builds an SPMD to SPMC direct message request. 26 ******************************************************************************/ 27 static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message) 28 { 29 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 30 write_ctx_reg(gpregs, CTX_GPREG_X1, 31 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 32 spmd_spmc_id_get()); 33 write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ); 34 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 35 } 36 37 /******************************************************************************* 38 * spmd_pm_secondary_ep_register 39 ******************************************************************************/ 40 int spmd_pm_secondary_ep_register(uintptr_t entry_point) 41 { 42 int ret = FFA_ERROR_INVALID_PARAMETER; 43 44 spin_lock(&g_spmd_pm.lock); 45 46 if (g_spmd_pm.secondary_ep_locked == true) { 47 goto out; 48 } 49 50 /* 51 * Check entry_point address is a PA within 52 * load_address <= entry_point < load_address + binary_size 53 */ 54 if (!spmd_check_address_in_binary_image(entry_point)) { 55 ERROR("%s entry point is not within image boundaries\n", 56 __func__); 57 goto out; 58 } 59 60 g_spmd_pm.secondary_ep = entry_point; 61 g_spmd_pm.secondary_ep_locked = true; 62 63 VERBOSE("%s %lx\n", __func__, entry_point); 64 65 ret = 0; 66 67 out: 68 spin_unlock(&g_spmd_pm.lock); 69 70 return ret; 71 } 72 73 /******************************************************************************* 74 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part 75 * of the SPMC initialization path, they will initialize any SPs that they 76 * manage. Entry into SPMC is done after initialising minimal architectural 77 * state that guarantees safe execution. 78 ******************************************************************************/ 79 static void spmd_cpu_on_finish_handler(u_register_t unused) 80 { 81 spmd_spm_core_context_t *ctx = spmd_get_context(); 82 unsigned int linear_id = plat_my_core_pos(); 83 el3_state_t *el3_state; 84 uintptr_t entry_point; 85 uint64_t rc; 86 87 assert(ctx != NULL); 88 assert(ctx->state != SPMC_STATE_ON); 89 90 spin_lock(&g_spmd_pm.lock); 91 92 /* 93 * Leave the possibility that the SPMC does not call 94 * FFA_SECONDARY_EP_REGISTER in which case re-use the 95 * primary core address for booting secondary cores. 96 */ 97 if (g_spmd_pm.secondary_ep_locked == true) { 98 /* 99 * The CPU context has already been initialized at boot time 100 * (in spmd_spmc_init by a call to cm_setup_context). Adjust 101 * below the target core entry point based on the address 102 * passed to by FFA_SECONDARY_EP_REGISTER. 103 */ 104 entry_point = g_spmd_pm.secondary_ep; 105 el3_state = get_el3state_ctx(&ctx->cpu_ctx); 106 write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point); 107 } 108 109 spin_unlock(&g_spmd_pm.lock); 110 111 /* Mark CPU as initiating ON operation. */ 112 ctx->state = SPMC_STATE_ON_PENDING; 113 114 rc = spmd_spm_core_sync_entry(ctx); 115 if (rc != 0ULL) { 116 ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, 117 linear_id); 118 ctx->state = SPMC_STATE_OFF; 119 return; 120 } 121 122 ctx->state = SPMC_STATE_ON; 123 124 VERBOSE("CPU %u on!\n", linear_id); 125 } 126 127 /******************************************************************************* 128 * spmd_cpu_off_handler 129 ******************************************************************************/ 130 static int32_t spmd_cpu_off_handler(u_register_t unused) 131 { 132 spmd_spm_core_context_t *ctx = spmd_get_context(); 133 unsigned int linear_id = plat_my_core_pos(); 134 int64_t rc; 135 136 assert(ctx != NULL); 137 assert(ctx->state != SPMC_STATE_OFF); 138 139 /* Build an SPMD to SPMC direct message request. */ 140 spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF); 141 142 rc = spmd_spm_core_sync_entry(ctx); 143 if (rc != 0ULL) { 144 ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, linear_id); 145 } 146 147 /* Expect a direct message response from the SPMC. */ 148 u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), 149 CTX_GPREG_X0); 150 if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) { 151 ERROR("%s invalid SPMC response (%lx).\n", 152 __func__, ffa_resp_func); 153 return -EINVAL; 154 } 155 156 ctx->state = SPMC_STATE_OFF; 157 158 VERBOSE("CPU %u off!\n", linear_id); 159 160 return 0; 161 } 162 163 /******************************************************************************* 164 * Structure populated by the SPM Dispatcher to perform any bookkeeping before 165 * PSCI executes a power mgmt. operation. 166 ******************************************************************************/ 167 const spd_pm_ops_t spmd_pm = { 168 .svc_on_finish = spmd_cpu_on_finish_handler, 169 .svc_off = spmd_cpu_off_handler 170 }; 171