1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stddef.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <drivers/arm/gic.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/el3_runtime/pubsub_events.h> 17 #include <plat/common/platform.h> 18 19 #include "psci_private.h" 20 21 /* 22 * Helper functions for the CPU level spinlocks 23 */ 24 static inline void psci_spin_lock_cpu(unsigned int idx) 25 { 26 spin_lock(&(PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->cpu_lock)); 27 } 28 29 static inline void psci_spin_unlock_cpu(unsigned int idx) 30 { 31 spin_unlock(&(PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->cpu_lock)); 32 } 33 34 /******************************************************************************* 35 * This function checks whether a cpu which has been requested to be turned on 36 * is OFF to begin with. 37 ******************************************************************************/ 38 static int cpu_on_validate_state(aff_info_state_t aff_state) 39 { 40 if (aff_state == AFF_STATE_ON) { 41 return PSCI_E_ALREADY_ON; 42 } 43 44 if (aff_state == AFF_STATE_ON_PENDING) { 45 return PSCI_E_ON_PENDING; 46 } 47 48 assert(aff_state == AFF_STATE_OFF); 49 return PSCI_E_SUCCESS; 50 } 51 52 /******************************************************************************* 53 * Generic handler which is called to physically power on a cpu identified by 54 * its mpidr. It performs the generic, architectural, platform setup and state 55 * management to power on the target cpu e.g. it will ensure that 56 * enough information is stashed for it to resume execution in the non-secure 57 * security state. 58 * 59 * The state of all the relevant power domains are changed after calling the 60 * platform handler as it can return error. 61 ******************************************************************************/ 62 int psci_cpu_on_start(u_register_t target_cpu, 63 const entry_point_info_t *ep) 64 { 65 int rc; 66 aff_info_state_t target_aff_state; 67 unsigned int target_idx = (unsigned int)plat_core_pos_by_mpidr(target_cpu); 68 69 /* 70 * This function must only be called on platforms where the 71 * CPU_ON platform hooks have been implemented. 72 */ 73 assert((psci_plat_pm_ops->pwr_domain_on != NULL) && 74 (psci_plat_pm_ops->pwr_domain_on_finish != NULL)); 75 76 /* Protect against multiple CPUs trying to turn ON the same target CPU */ 77 psci_spin_lock_cpu(target_idx); 78 79 /* 80 * Generic management: Ensure that the cpu is off to be 81 * turned on. 82 * Perform cache maintanence ahead of reading the target CPU state to 83 * ensure that the data is not stale. 84 * There is a theoretical edge case where the cache may contain stale 85 * data for the target CPU data - this can occur under the following 86 * conditions: 87 * - the target CPU is in another cluster from the current 88 * - the target CPU was the last CPU to shutdown on its cluster 89 * - the cluster was removed from coherency as part of the CPU shutdown 90 * 91 * In this case the cache maintenace that was performed as part of the 92 * target CPUs shutdown was not seen by the current CPU's cluster. And 93 * so the cache may contain stale data for the target CPU. 94 */ 95 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 96 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); 97 if (rc != PSCI_E_SUCCESS) { 98 goto on_exit; 99 } 100 101 /* 102 * Call the cpu on handler registered by the Secure Payload Dispatcher 103 * to let it do any bookeeping. If the handler encounters an error, it's 104 * expected to assert within 105 */ 106 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) { 107 psci_spd_pm->svc_on(target_cpu); 108 } 109 110 /* 111 * Set the Affinity info state of the target cpu to ON_PENDING. 112 * Flush aff_info_state as it will be accessed with caches 113 * turned OFF. 114 */ 115 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 116 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 117 118 /* 119 * The cache line invalidation by the target CPU after setting the 120 * state to OFF (see psci_do_cpu_off()), could cause the update to 121 * aff_info_state to be invalidated. Retry the update if the target 122 * CPU aff_info_state is not ON_PENDING. 123 */ 124 target_aff_state = psci_get_aff_info_state_by_idx(target_idx); 125 if (target_aff_state != AFF_STATE_ON_PENDING) { 126 assert(target_aff_state == AFF_STATE_OFF); 127 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 128 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 129 130 assert(psci_get_aff_info_state_by_idx(target_idx) == 131 AFF_STATE_ON_PENDING); 132 } 133 134 /* 135 * Perform generic, architecture and platform specific handling. 136 */ 137 /* 138 * Plat. management: Give the platform the current state 139 * of the target cpu to allow it to perform the necessary 140 * steps to power on. 141 */ 142 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); 143 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); 144 145 if (rc != PSCI_E_SUCCESS) { 146 /* Restore the state on error. */ 147 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); 148 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 149 } 150 151 on_exit: 152 psci_spin_unlock_cpu(target_idx); 153 return rc; 154 } 155 156 /******************************************************************************* 157 * The following function finish an earlier power on request. They 158 * are called by the common finisher routine in psci_common.c. The `state_info` 159 * is the psci_power_state from which this CPU has woken up from. 160 ******************************************************************************/ 161 void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) 162 { 163 /* 164 * Plat. management: Perform the platform specific actions 165 * for this cpu e.g. enabling the gic or zeroing the mailbox 166 * register. The actual state of this cpu has already been 167 * changed. 168 */ 169 psci_plat_pm_ops->pwr_domain_on_finish(state_info); 170 171 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 172 /* 173 * Arch. management: Enable data cache and manage stack memory 174 */ 175 psci_do_pwrup_cache_maintenance(); 176 #endif 177 178 /* 179 * Plat. management: Perform any platform specific actions which 180 * can only be done with the cpu and the cluster guaranteed to 181 * be coherent. 182 */ 183 if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) { 184 psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); 185 } 186 187 #if USE_GIC_DRIVER 188 /* GIC init after platform has had a say with MMU on */ 189 gic_pcpu_init(cpu_idx); 190 gic_cpuif_enable(cpu_idx); 191 #endif /* USE_GIC_DRIVER */ 192 193 /* 194 * All the platform specific actions for turning this cpu 195 * on have completed. Perform enough arch.initialization 196 * to run in the non-secure address space. 197 */ 198 psci_arch_setup(); 199 200 /* 201 * Lock the CPU spin lock to make sure that the context initialization 202 * is done. Since the lock is only used in this function to create 203 * a synchronization point with cpu_on_start(), it can be released 204 * immediately. 205 */ 206 psci_spin_lock_cpu(cpu_idx); 207 psci_spin_unlock_cpu(cpu_idx); 208 209 /* Ensure we have been explicitly woken up by another cpu */ 210 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); 211 212 /* 213 * Call the cpu on finish handler registered by the Secure Payload 214 * Dispatcher to let it do any bookeeping. If the handler encounters an 215 * error, it's expected to assert within 216 */ 217 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) { 218 psci_spd_pm->svc_on_finish(0); 219 } 220 PUBLISH_EVENT(psci_cpu_on_finish); 221 222 /* Populate the mpidr field within the cpu node array */ 223 /* This needs to be done only once */ 224 PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; 225 } 226