1 /* 2 * Copyright (c) 2013-2026, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stddef.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <drivers/arm/gic.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/el3_runtime/pubsub_events.h> 17 #include <plat/common/platform.h> 18 19 #include "psci_private.h" 20 21 /* 22 * Helper functions for the CPU level spinlocks 23 */ 24 static inline void psci_spin_lock_cpu(unsigned int idx) 25 { 26 spin_lock(&(PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->cpu_lock)); 27 } 28 29 static inline void psci_spin_unlock_cpu(unsigned int idx) 30 { 31 spin_unlock(&(PER_CPU_BY_INDEX(psci_cpu_pd_nodes, idx)->cpu_lock)); 32 } 33 34 /******************************************************************************* 35 * This function checks whether a cpu which has been requested to be turned on 36 * is OFF to begin with. 37 ******************************************************************************/ 38 static int cpu_on_validate_state(aff_info_state_t aff_state) 39 { 40 if (aff_state == AFF_STATE_ON) { 41 return PSCI_E_ALREADY_ON; 42 } 43 44 if (aff_state == AFF_STATE_ON_PENDING) { 45 return PSCI_E_ON_PENDING; 46 } 47 48 assert(aff_state == AFF_STATE_OFF); 49 return PSCI_E_SUCCESS; 50 } 51 52 /******************************************************************************* 53 * Generic handler which is called to physically power on a cpu identified by 54 * its mpidr. It performs the generic, architectural, platform setup and state 55 * management to power on the target cpu e.g. it will ensure that 56 * enough information is stashed for it to resume execution in the non-secure 57 * security state. 58 * 59 * The state of all the relevant power domains are changed after calling the 60 * platform handler as it can return error. 61 ******************************************************************************/ 62 int psci_cpu_on_start(u_register_t target_cpu) 63 { 64 int rc; 65 aff_info_state_t target_aff_state; 66 unsigned int target_idx = (unsigned int)plat_core_pos_by_mpidr(target_cpu); 67 68 /* 69 * This function must only be called on platforms where the 70 * CPU_ON platform hooks have been implemented. 71 */ 72 assert((psci_plat_pm_ops->pwr_domain_on != NULL) && 73 (psci_plat_pm_ops->pwr_domain_on_finish != NULL)); 74 75 /* Protect against multiple CPUs trying to turn ON the same target CPU */ 76 psci_spin_lock_cpu(target_idx); 77 78 /* 79 * Generic management: Ensure that the cpu is off to be 80 * turned on. 81 * Perform cache maintanence ahead of reading the target CPU state to 82 * ensure that the data is not stale. 83 * There is a theoretical edge case where the cache may contain stale 84 * data for the target CPU data - this can occur under the following 85 * conditions: 86 * - the target CPU is in another cluster from the current 87 * - the target CPU was the last CPU to shutdown on its cluster 88 * - the cluster was removed from coherency as part of the CPU shutdown 89 * 90 * In this case the cache maintenace that was performed as part of the 91 * target CPUs shutdown was not seen by the current CPU's cluster. And 92 * so the cache may contain stale data for the target CPU. 93 */ 94 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 95 rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); 96 if (rc != PSCI_E_SUCCESS) { 97 goto on_exit; 98 } 99 100 /* 101 * Call the cpu on handler registered by the Secure Payload Dispatcher 102 * to let it do any bookeeping. If the handler encounters an error, it's 103 * expected to assert within 104 */ 105 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) { 106 psci_spd_pm->svc_on(target_cpu); 107 } 108 109 /* 110 * Set the Affinity info state of the target cpu to ON_PENDING. 111 * Flush aff_info_state as it will be accessed with caches 112 * turned OFF. 113 */ 114 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 115 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 116 117 /* 118 * The cache line invalidation by the target CPU after setting the 119 * state to OFF (see psci_do_cpu_off()), could cause the update to 120 * aff_info_state to be invalidated. Retry the update if the target 121 * CPU aff_info_state is not ON_PENDING. 122 */ 123 target_aff_state = psci_get_aff_info_state_by_idx(target_idx); 124 if (target_aff_state != AFF_STATE_ON_PENDING) { 125 assert(target_aff_state == AFF_STATE_OFF); 126 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); 127 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 128 129 assert(psci_get_aff_info_state_by_idx(target_idx) == 130 AFF_STATE_ON_PENDING); 131 } 132 133 /* 134 * Perform generic, architecture and platform specific handling. 135 */ 136 /* 137 * Plat. management: Give the platform the current state 138 * of the target cpu to allow it to perform the necessary 139 * steps to power on. 140 */ 141 rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); 142 assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL)); 143 144 if (rc != PSCI_E_SUCCESS) { 145 /* Restore the state on error. */ 146 psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); 147 flush_cpu_data_by_index(target_idx, psci_svc_cpu_data); 148 } 149 150 on_exit: 151 psci_spin_unlock_cpu(target_idx); 152 return rc; 153 } 154 155 /******************************************************************************* 156 * The following function finish an earlier power on request. They 157 * are called by the common finisher routine in psci_common.c. The `state_info` 158 * is the psci_power_state from which this CPU has woken up from. 159 ******************************************************************************/ 160 void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) 161 { 162 /* 163 * Plat. management: Perform the platform specific actions 164 * for this cpu e.g. enabling the gic or zeroing the mailbox 165 * register. The actual state of this cpu has already been 166 * changed. 167 */ 168 psci_plat_pm_ops->pwr_domain_on_finish(state_info); 169 170 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 171 /* 172 * Arch. management: Enable data cache and manage stack memory 173 */ 174 psci_do_pwrup_cache_maintenance(); 175 #endif 176 177 #if USE_GIC_DRIVER 178 /* 179 * Set up this core's GIC interface, with caching on, before the late 180 * platform setup so that it has a chance to configure interrupts. The 181 * GIC provides a hook to set itself up early. 182 */ 183 gic_pcpu_init(cpu_idx); 184 gic_cpuif_enable(cpu_idx); 185 #endif /* USE_GIC_DRIVER */ 186 187 /* 188 * Plat. management: Perform any platform specific actions which 189 * can only be done with the cpu and the cluster guaranteed to 190 * be coherent. 191 */ 192 if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) { 193 psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); 194 } 195 196 /* 197 * All the platform specific actions for turning this cpu 198 * on have completed. Perform enough arch.initialization 199 * to run in the non-secure address space. 200 */ 201 psci_arch_setup(); 202 203 /* 204 * Lock the CPU spin lock to make sure that the context initialization 205 * is done. Since the lock is only used in this function to create 206 * a synchronization point with cpu_on_start(), it can be released 207 * immediately. 208 */ 209 psci_spin_lock_cpu(cpu_idx); 210 psci_spin_unlock_cpu(cpu_idx); 211 212 /* Ensure we have been explicitly woken up by another cpu */ 213 assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); 214 215 /* 216 * Call the cpu on finish handler registered by the Secure Payload 217 * Dispatcher to let it do any bookeeping. If the handler encounters an 218 * error, it's expected to assert within 219 */ 220 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) { 221 psci_spd_pm->svc_on_finish(0); 222 } 223 PUBLISH_EVENT(psci_cpu_on_finish); 224 225 /* Populate the mpidr field within the cpu node array */ 226 /* This needs to be done only once */ 227 PER_CPU_BY_INDEX(psci_cpu_pd_nodes, cpu_idx)->mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; 228 } 229