1 /* 2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <lib/el3_runtime/context_mgmt.h> 10 #include "spmd_private.h" 11 12 static struct { 13 bool secondary_ep_locked; 14 uintptr_t secondary_ep; 15 } g_spmd_pm; 16 17 /******************************************************************************* 18 * spmd_build_spmc_message 19 * 20 * Builds an SPMD to SPMC direct message request. 21 ******************************************************************************/ 22 static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message) 23 { 24 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 25 write_ctx_reg(gpregs, CTX_GPREG_X1, 26 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 27 spmd_spmc_id_get()); 28 write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ); 29 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 30 } 31 32 /******************************************************************************* 33 * spmd_pm_secondary_ep_register 34 ******************************************************************************/ 35 int spmd_pm_secondary_ep_register(uintptr_t entry_point) 36 { 37 if (g_spmd_pm.secondary_ep_locked == true) { 38 return FFA_ERROR_INVALID_PARAMETER; 39 } 40 41 /* 42 * Check entry_point address is a PA within 43 * load_address <= entry_point < load_address + binary_size 44 */ 45 if (!spmd_check_address_in_binary_image(entry_point)) { 46 ERROR("%s entry point is not within image boundaries\n", 47 __func__); 48 return FFA_ERROR_INVALID_PARAMETER; 49 } 50 51 g_spmd_pm.secondary_ep = entry_point; 52 g_spmd_pm.secondary_ep_locked = true; 53 54 VERBOSE("%s %lx\n", __func__, entry_point); 55 56 return 0; 57 } 58 59 /******************************************************************************* 60 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part 61 * of the SPMC initialization path, they will initialize any SPs that they 62 * manage. Entry into SPMC is done after initialising minimal architectural 63 * state that guarantees safe execution. 64 ******************************************************************************/ 65 static void spmd_cpu_on_finish_handler(u_register_t unused) 66 { 67 entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get(); 68 spmd_spm_core_context_t *ctx = spmd_get_context(); 69 unsigned int linear_id = plat_my_core_pos(); 70 uint64_t rc; 71 72 assert(ctx != NULL); 73 assert(ctx->state != SPMC_STATE_ON); 74 assert(spmc_ep_info != NULL); 75 76 /* 77 * Leave the possibility that the SPMC does not call 78 * FFA_SECONDARY_EP_REGISTER in which case re-use the 79 * primary core address for booting secondary cores. 80 */ 81 if (g_spmd_pm.secondary_ep_locked == true) { 82 spmc_ep_info->pc = g_spmd_pm.secondary_ep; 83 } 84 85 cm_setup_context(&ctx->cpu_ctx, spmc_ep_info); 86 87 /* Mark CPU as initiating ON operation */ 88 ctx->state = SPMC_STATE_ON_PENDING; 89 90 rc = spmd_spm_core_sync_entry(ctx); 91 if (rc != 0ULL) { 92 ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, 93 linear_id); 94 ctx->state = SPMC_STATE_OFF; 95 return; 96 } 97 98 ctx->state = SPMC_STATE_ON; 99 100 VERBOSE("CPU %u on!\n", linear_id); 101 } 102 103 /******************************************************************************* 104 * spmd_cpu_off_handler 105 ******************************************************************************/ 106 static int32_t spmd_cpu_off_handler(u_register_t unused) 107 { 108 spmd_spm_core_context_t *ctx = spmd_get_context(); 109 unsigned int linear_id = plat_my_core_pos(); 110 int64_t rc; 111 112 assert(ctx != NULL); 113 assert(ctx->state != SPMC_STATE_OFF); 114 115 /* Build an SPMD to SPMC direct message request. */ 116 spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF); 117 118 rc = spmd_spm_core_sync_entry(ctx); 119 if (rc != 0ULL) { 120 ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id); 121 } 122 123 /* Expect a direct message response from the SPMC. */ 124 u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), 125 CTX_GPREG_X0); 126 if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) { 127 ERROR("%s invalid SPMC response (%lx).\n", 128 __func__, ffa_resp_func); 129 return -EINVAL; 130 } 131 132 ctx->state = SPMC_STATE_OFF; 133 134 VERBOSE("CPU %u off!\n", linear_id); 135 136 return 0; 137 } 138 139 /******************************************************************************* 140 * Structure populated by the SPM Dispatcher to perform any bookkeeping before 141 * PSCI executes a power mgmt. operation. 142 ******************************************************************************/ 143 const spd_pm_ops_t spmd_pm = { 144 .svc_on_finish = spmd_cpu_on_finish_handler, 145 .svc_off = spmd_cpu_off_handler 146 }; 147