1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stddef.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <context.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/el3_runtime/cpu_data.h> 17 #include <lib/el3_runtime/pubsub_events.h> 18 #include <lib/pmf/pmf.h> 19 #include <lib/runtime_instr.h> 20 #include <plat/common/platform.h> 21 22 #include "psci_private.h" 23 24 /******************************************************************************* 25 * This function does generic and platform specific operations after a wake-up 26 * from standby/retention states at multiple power levels. 27 ******************************************************************************/ 28 static void psci_cpu_suspend_to_standby_finish(unsigned int end_pwrlvl, 29 psci_power_state_t *state_info) 30 { 31 /* 32 * Plat. management: Allow the platform to do operations 33 * on waking up from retention. 34 */ 35 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 36 37 /* This loses its meaning when not suspending, reset so it's correct for OFF */ 38 psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL); 39 } 40 41 /******************************************************************************* 42 * This function does generic and platform specific suspend to power down 43 * operations. 44 ******************************************************************************/ 45 static void psci_suspend_to_pwrdown_start(unsigned int idx, 46 unsigned int end_pwrlvl, 47 unsigned int max_off_lvl, 48 const psci_power_state_t *state_info) 49 { 50 PUBLISH_EVENT_ARG(psci_suspend_pwrdown_start, &idx); 51 52 #if PSCI_OS_INIT_MODE 53 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL 54 end_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL; 55 #else 56 end_pwrlvl = PLAT_MAX_PWR_LVL; 57 #endif 58 #endif 59 60 /* Save PSCI target power level for the suspend finisher handler */ 61 psci_set_suspend_pwrlvl(end_pwrlvl); 62 63 /* 64 * Flush the target power level as it might be accessed on power up with 65 * Data cache disabled. 66 */ 67 psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 68 69 /* 70 * Call the cpu suspend handler registered by the Secure Payload 71 * Dispatcher to let it do any book-keeping. If the handler encounters an 72 * error, it's expected to assert within 73 */ 74 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL)) 75 psci_spd_pm->svc_suspend(max_off_lvl); 76 77 #if !HW_ASSISTED_COHERENCY 78 /* 79 * Plat. management: Allow the platform to perform any early 80 * actions required to power down the CPU. This might be useful for 81 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these 82 * actions with data caches enabled. 83 */ 84 if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL) 85 psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); 86 #endif 87 /* 88 * Arch. management. Initiate power down sequence. 89 */ 90 psci_pwrdown_cpu_start(max_off_lvl); 91 } 92 93 /******************************************************************************* 94 * Top level handler which is called when a cpu wants to suspend its execution. 95 * It is assumed that along with suspending the cpu power domain, power domains 96 * at higher levels until the target power level will be suspended as well. It 97 * coordinates with the platform to negotiate the target state for each of 98 * the power domain level till the target power domain level. It then performs 99 * generic, architectural, platform setup and state management required to 100 * suspend that power domain level and power domain levels below it. 101 * e.g. For a cpu that's to be suspended, it could mean programming the 102 * power controller whereas for a cluster that's to be suspended, it will call 103 * the platform specific code which will disable coherency at the interconnect 104 * level if the cpu is the last in the cluster and also the program the power 105 * controller. 106 * 107 * All the required parameter checks are performed at the beginning and after 108 * the state transition has been done, no further error is expected and it is 109 * not possible to undo any of the actions taken beyond that point. 110 ******************************************************************************/ 111 int psci_cpu_suspend_start(unsigned int idx, 112 unsigned int end_pwrlvl, 113 psci_power_state_t *state_info, 114 unsigned int is_power_down_state) 115 { 116 int rc = PSCI_E_SUCCESS; 117 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; 118 unsigned int max_off_lvl = 0; 119 120 /* 121 * This function must only be called on platforms where the 122 * CPU_SUSPEND platform hooks have been implemented. 123 */ 124 assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) && 125 (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)); 126 127 /* Get the parent nodes */ 128 psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes); 129 130 /* 131 * This function acquires the lock corresponding to each power 132 * level so that by the time all locks are taken, the system topology 133 * is snapshot and state management can be done safely. 134 */ 135 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); 136 137 /* 138 * We check if there are any pending interrupts after the delay 139 * introduced by lock contention to increase the chances of early 140 * detection that a wake-up interrupt has fired. 141 */ 142 if (read_isr_el1() != 0U) { 143 goto suspend_exit; 144 } 145 146 #if PSCI_OS_INIT_MODE 147 if (psci_suspend_mode == OS_INIT) { 148 /* 149 * This function validates the requested state info for 150 * OS-initiated mode. 151 */ 152 rc = psci_validate_state_coordination(idx, end_pwrlvl, state_info); 153 if (rc != PSCI_E_SUCCESS) { 154 goto suspend_exit; 155 } 156 } else { 157 #endif 158 /* 159 * This function is passed the requested state info and 160 * it returns the negotiated state info for each power level upto 161 * the end level specified. 162 */ 163 psci_do_state_coordination(idx, end_pwrlvl, state_info); 164 #if PSCI_OS_INIT_MODE 165 } 166 #endif 167 168 #if PSCI_OS_INIT_MODE 169 if (psci_plat_pm_ops->pwr_domain_validate_suspend != NULL) { 170 rc = psci_plat_pm_ops->pwr_domain_validate_suspend(state_info); 171 if (rc != PSCI_E_SUCCESS) { 172 goto suspend_exit; 173 } 174 } 175 #endif 176 177 /* Update the target state in the power domain nodes */ 178 psci_set_target_local_pwr_states(idx, end_pwrlvl, state_info); 179 180 #if ENABLE_PSCI_STAT 181 /* Update the last cpu for each level till end_pwrlvl */ 182 psci_stats_update_pwr_down(idx, end_pwrlvl, state_info); 183 #endif 184 185 if (is_power_down_state != 0U) { 186 /* 187 * WHen CTX_INCLUDE_EL2_REGS is usnet, we're probably runnig 188 * with some SPD that assumes the core is going off so it 189 * doesn't bother saving NS's context. Do that here until we 190 * figure out a way to make this coherent. 191 */ 192 #if FEAT_PABANDON 193 #if !CTX_INCLUDE_EL2_REGS 194 cm_el1_sysregs_context_save(NON_SECURE); 195 #endif 196 #endif 197 max_off_lvl = psci_find_max_off_lvl(state_info); 198 psci_suspend_to_pwrdown_start(idx, end_pwrlvl, end_pwrlvl, state_info); 199 } 200 201 /* 202 * Plat. management: Allow the platform to perform the 203 * necessary actions to turn off this cpu e.g. set the 204 * platform defined mailbox with the psci entrypoint, 205 * program the power controller etc. 206 */ 207 208 psci_plat_pm_ops->pwr_domain_suspend(state_info); 209 210 #if ENABLE_PSCI_STAT 211 plat_psci_stat_accounting_start(state_info); 212 #endif 213 214 /* 215 * Release the locks corresponding to each power level in the 216 * reverse order to which they were acquired. 217 */ 218 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); 219 220 #if ENABLE_RUNTIME_INSTRUMENTATION 221 /* 222 * Update the timestamp with cache off. We assume this 223 * timestamp can only be read from the current CPU and the 224 * timestamp cache line will be flushed before return to 225 * normal world on wakeup. 226 */ 227 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 228 RT_INSTR_ENTER_HW_LOW_PWR, 229 PMF_NO_CACHE_MAINT); 230 #endif 231 232 if (is_power_down_state != 0U) { 233 if (psci_plat_pm_ops->pwr_domain_pwr_down != NULL) { 234 /* This function may not return */ 235 psci_plat_pm_ops->pwr_domain_pwr_down(state_info); 236 } 237 238 psci_pwrdown_cpu_end_wakeup(max_off_lvl); 239 } else { 240 /* 241 * We will reach here if only retention/standby states have been 242 * requested at multiple power levels. This means that the cpu 243 * context will be preserved. 244 */ 245 wfi(); 246 } 247 248 #if ENABLE_RUNTIME_INSTRUMENTATION 249 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 250 RT_INSTR_EXIT_HW_LOW_PWR, 251 PMF_NO_CACHE_MAINT); 252 #endif 253 254 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); 255 /* 256 * Find out which retention states this CPU has exited from until the 257 * 'end_pwrlvl'. The exit retention state could be deeper than the entry 258 * state as a result of state coordination amongst other CPUs post wfi. 259 */ 260 psci_get_target_local_pwr_states(idx, end_pwrlvl, state_info); 261 262 #if ENABLE_PSCI_STAT 263 plat_psci_stat_accounting_stop(state_info); 264 psci_stats_update_pwr_up(idx, end_pwrlvl, state_info); 265 #endif 266 267 /* 268 * Waking up means we've retained all context. Call the finishers to put 269 * the system back to a usable state. 270 */ 271 if (is_power_down_state != 0U) { 272 #if FEAT_PABANDON 273 psci_cpu_suspend_to_powerdown_finish(idx, max_off_lvl, state_info); 274 275 #if !CTX_INCLUDE_EL2_REGS 276 cm_el1_sysregs_context_restore(NON_SECURE); 277 #endif 278 #endif 279 } else { 280 psci_cpu_suspend_to_standby_finish(end_pwrlvl, state_info); 281 } 282 283 /* 284 * Set the requested and target state of this CPU and all the higher 285 * power domain levels for this CPU to run. 286 */ 287 psci_set_pwr_domains_to_run(idx, end_pwrlvl); 288 289 suspend_exit: 290 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); 291 292 return rc; 293 } 294 295 /******************************************************************************* 296 * The following functions finish an earlier suspend request. They 297 * are called by the common finisher routine in psci_common.c. The `state_info` 298 * is the psci_power_state from which this CPU has woken up from. 299 ******************************************************************************/ 300 void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info) 301 { 302 unsigned int counter_freq; 303 304 /* Ensure we have been woken up from a suspended state */ 305 assert((psci_get_aff_info_state() == AFF_STATE_ON) && 306 (is_local_state_off( 307 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0)); 308 309 /* 310 * Plat. management: Perform the platform specific actions 311 * before we change the state of the cpu e.g. enabling the 312 * gic or zeroing the mailbox register. If anything goes 313 * wrong then assert as there is no way to recover from this 314 * situation. 315 */ 316 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 317 318 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 319 /* Arch. management: Enable the data cache, stack memory maintenance. */ 320 psci_do_pwrup_cache_maintenance(); 321 #endif 322 323 /* Re-init the cntfrq_el0 register */ 324 counter_freq = plat_get_syscnt_freq2(); 325 write_cntfrq_el0(counter_freq); 326 327 #if ENABLE_PAUTH 328 /* Store APIAKey_EL1 key */ 329 set_cpu_data(apiakey[0], read_apiakeylo_el1()); 330 set_cpu_data(apiakey[1], read_apiakeyhi_el1()); 331 #endif /* ENABLE_PAUTH */ 332 333 /* 334 * Call the cpu suspend finish handler registered by the Secure Payload 335 * Dispatcher to let it do any bookeeping. If the handler encounters an 336 * error, it's expected to assert within 337 */ 338 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) { 339 psci_spd_pm->svc_suspend_finish(max_off_lvl); 340 } 341 342 /* This loses its meaning when not suspending, reset so it's correct for OFF */ 343 psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL); 344 345 PUBLISH_EVENT_ARG(psci_suspend_pwrdown_finish, &cpu_idx); 346 } 347