1 /* 2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stddef.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <lib/el3_runtime/context_mgmt.h> 15 #include <lib/el3_runtime/pubsub_events.h> 16 #include <plat/common/platform.h> 17 18 #include "psci_private.h" 19 20 /* 21 * Helper functions for the CPU level spinlocks 22 */ 23 static inline void psci_spin_lock_cpu(unsigned int idx) 24 { 25 spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock); 26 } 27 28 static inline void psci_spin_unlock_cpu(unsigned int idx) 29 { 30 spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock); 31 } 32 33 /******************************************************************************* 34 * This function checks whether a cpu which has been requested to be turned on 35 * is OFF to begin with. 36 ******************************************************************************/ 37 static int cpu_on_validate_state(aff_info_state_t aff_state) 38 { 39 if (aff_state == AFF_STATE_ON) 40 return PSCI_E_ALREADY_ON; 41 42 if (aff_state == AFF_STATE_ON_PENDING) 43 return PSCI_E_ON_PENDING; 44 45 assert(aff_state == AFF_STATE_OFF); 46 return PSCI_E_SUCCESS; 47 } 48 49 /******************************************************************************* 50 * Generic handler which is called to physically power on a cpu identified by 51 * its mpidr. It performs the generic, architectural, platform setup and state 52 * management to power on the target cpu e.g. it will ensure that 53 * enough information is stashed for it to resume execution in the non-secure 54 * security state. 55 * 56 * The state of all the relevant power domains are changed after calling the 57 * platform handler as it can return error. 58 ******************************************************************************/ 59 int psci_cpu_on_start(u_register_t target_cpu, 60 const entry_point_info_t *ep) 61 { 62 int rc; 63 aff_info_state_t target_aff_state; 64 int ret = plat_core_pos_by_mpidr(target_cpu); 65 unsigned int target_idx; 66 67 /* Calling function must supply valid input arguments */ 68 assert(ep != NULL); 69 70 if ((ret < 0) || (ret >= (int)PLATFORM_CORE_COUNT)) { 71 ERROR("Unexpected core index.\n"); 72 panic(); 73 } 74 75 target_idx = (unsigned int)ret; 76 77 /* 78 * This function must only be called on platforms where the 79 * CPU_ON platform hooks have been implemented. 80 */ 81 assert((psci_plat_pm_ops->pwr_domain_on != NULL) && 82 (psci_plat_pm_ops->pwr_domain_on_finish != NULL)); 83 84 /* Protect against multiple CPUs trying to turn ON the same target CPU */ 85 psci_spin_lock_cpu(target_idx); 86 87 /* 88 * Generic management: Ensure that the cpu is off to be 89 * turned on. 90 * Perform cache maintanence ahead of reading the target CPU state to 91 * ensure that the data is not stale. 92 * There is a theoretical edge case where the cache may contain stale 93 * data for the target CPU data - this can occur under the following 94 * conditions: 95 * - the target CPU is in another cluster from the current 96 * - the target CPU was the last CPU to shutdown on its cluster 97 * - the cluster was removed from coherency as part of the CPU shutdown 98 * 99 * In this case the cache maintenace that was performed as part of the 100 * target CPUs shutdown was not seen by the current CPU's cluster. And 101 * so the cache may contain stale data for the target CPU. 102 */ 103 flush_cpu_data_by_index(target_idx, 104 psci_svc_cpu_data.aff_info_state); 105 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); 106 if (rc != PSCI_E_SUCCESS) 107 goto exit; 108 109 /* 110 * Call the cpu on handler registered by the Secure Payload Dispatcher 111 * to let it do any bookeeping. If the handler encounters an error, it's 112 * expected to assert within 113 */ 114 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) 115 psci_spd_pm->svc_on(target_cpu); 116 117 /* 118 * Set the Affinity info state of the target cpu to ON_PENDING. 119 * Flush aff_info_state as it will be accessed with caches 120 * turned OFF. 121 */ 122 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 123 flush_cpu_data_by_index(target_idx, 124 psci_svc_cpu_data.aff_info_state); 125 126 /* 127 * The cache line invalidation by the target CPU after setting the 128 * state to OFF (see psci_do_cpu_off()), could cause the update to 129 * aff_info_state to be invalidated. Retry the update if the target 130 * CPU aff_info_state is not ON_PENDING. 131 */ 132 target_aff_state = psci_get_aff_info_state_by_idx(target_idx); 133 if (target_aff_state != AFF_STATE_ON_PENDING) { 134 assert(target_aff_state == AFF_STATE_OFF); 135 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 136 flush_cpu_data_by_index(target_idx, 137 psci_svc_cpu_data.aff_info_state); 138 139 assert(psci_get_aff_info_state_by_idx(target_idx) == 140 AFF_STATE_ON_PENDING); 141 } 142 143 /* 144 * Perform generic, architecture and platform specific handling. 145 */ 146 /* 147 * Plat. management: Give the platform the current state 148 * of the target cpu to allow it to perform the necessary 149 * steps to power on. 150 */ 151 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); 152 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); 153 154 if (rc == PSCI_E_SUCCESS) 155 /* Store the re-entry information for the non-secure world. */ 156 cm_init_context_by_index(target_idx, ep); 157 else { 158 /* Restore the state on error. */ 159 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); 160 flush_cpu_data_by_index(target_idx, 161 psci_svc_cpu_data.aff_info_state); 162 } 163 164 exit: 165 psci_spin_unlock_cpu(target_idx); 166 return rc; 167 } 168 169 /******************************************************************************* 170 * The following function finish an earlier power on request. They 171 * are called by the common finisher routine in psci_common.c. The `state_info` 172 * is the psci_power_state from which this CPU has woken up from. 173 ******************************************************************************/ 174 void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) 175 { 176 /* 177 * Plat. management: Perform the platform specific actions 178 * for this cpu e.g. enabling the gic or zeroing the mailbox 179 * register. The actual state of this cpu has already been 180 * changed. 181 */ 182 psci_plat_pm_ops->pwr_domain_on_finish(state_info); 183 184 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 185 /* 186 * Arch. management: Enable data cache and manage stack memory 187 */ 188 psci_do_pwrup_cache_maintenance(); 189 #endif 190 191 /* 192 * Plat. management: Perform any platform specific actions which 193 * can only be done with the cpu and the cluster guaranteed to 194 * be coherent. 195 */ 196 if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) 197 psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); 198 199 /* 200 * All the platform specific actions for turning this cpu 201 * on have completed. Perform enough arch.initialization 202 * to run in the non-secure address space. 203 */ 204 psci_arch_setup(); 205 206 /* 207 * Lock the CPU spin lock to make sure that the context initialization 208 * is done. Since the lock is only used in this function to create 209 * a synchronization point with cpu_on_start(), it can be released 210 * immediately. 211 */ 212 psci_spin_lock_cpu(cpu_idx); 213 psci_spin_unlock_cpu(cpu_idx); 214 215 /* Ensure we have been explicitly woken up by another cpu */ 216 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); 217 218 /* 219 * Call the cpu on finish handler registered by the Secure Payload 220 * Dispatcher to let it do any bookeeping. If the handler encounters an 221 * error, it's expected to assert within 222 */ 223 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) 224 psci_spd_pm->svc_on_finish(0); 225 226 PUBLISH_EVENT(psci_cpu_on_finish); 227 228 /* Populate the mpidr field within the cpu node array */ 229 /* This needs to be done only once */ 230 psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; 231 232 /* 233 * Generic management: Now we just need to retrieve the 234 * information that we had stashed away during the cpu_on 235 * call to set this cpu on its way. 236 */ 237 cm_prepare_el3_exit_ns(); 238 } 239