1 /* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch.h> 8 #include <arch_helpers.h> 9 #include <assert.h> 10 #include <bl_common.h> 11 #include <context.h> 12 #include <context_mgmt.h> 13 #include <cpu_data.h> 14 #include <debug.h> 15 #include <platform.h> 16 #include <pmf.h> 17 #include <pubsub_events.h> 18 #include <runtime_instr.h> 19 #include <stddef.h> 20 #include "psci_private.h" 21 22 /******************************************************************************* 23 * This function does generic and platform specific operations after a wake-up 24 * from standby/retention states at multiple power levels. 25 ******************************************************************************/ 26 static void psci_suspend_to_standby_finisher(int cpu_idx, 27 unsigned int end_pwrlvl) 28 { 29 psci_power_state_t state_info; 30 31 psci_acquire_pwr_domain_locks(end_pwrlvl, 32 cpu_idx); 33 34 /* 35 * Find out which retention states this CPU has exited from until the 36 * 'end_pwrlvl'. The exit retention state could be deeper than the entry 37 * state as a result of state coordination amongst other CPUs post wfi. 38 */ 39 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 40 41 #if ENABLE_PSCI_STAT 42 plat_psci_stat_accounting_stop(&state_info); 43 psci_stats_update_pwr_up(end_pwrlvl, &state_info); 44 #endif 45 46 /* 47 * Plat. management: Allow the platform to do operations 48 * on waking up from retention. 49 */ 50 psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); 51 52 /* 53 * Set the requested and target state of this CPU and all the higher 54 * power domain levels for this CPU to run. 55 */ 56 psci_set_pwr_domains_to_run(end_pwrlvl); 57 58 psci_release_pwr_domain_locks(end_pwrlvl, 59 cpu_idx); 60 } 61 62 /******************************************************************************* 63 * This function does generic and platform specific suspend to power down 64 * operations. 65 ******************************************************************************/ 66 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, 67 const entry_point_info_t *ep, 68 const psci_power_state_t *state_info) 69 { 70 unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); 71 72 PUBLISH_EVENT(psci_suspend_pwrdown_start); 73 74 /* Save PSCI target power level for the suspend finisher handler */ 75 psci_set_suspend_pwrlvl(end_pwrlvl); 76 77 /* 78 * Flush the target power level as it might be accessed on power up with 79 * Data cache disabled. 80 */ 81 psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 82 83 /* 84 * Call the cpu suspend handler registered by the Secure Payload 85 * Dispatcher to let it do any book-keeping. If the handler encounters an 86 * error, it's expected to assert within 87 */ 88 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL)) 89 psci_spd_pm->svc_suspend(max_off_lvl); 90 91 #if !HW_ASSISTED_COHERENCY 92 /* 93 * Plat. management: Allow the platform to perform any early 94 * actions required to power down the CPU. This might be useful for 95 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these 96 * actions with data caches enabled. 97 */ 98 if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL) 99 psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); 100 #endif 101 102 /* 103 * Store the re-entry information for the non-secure world. 104 */ 105 cm_init_my_context(ep); 106 107 #if ENABLE_RUNTIME_INSTRUMENTATION 108 109 /* 110 * Flush cache line so that even if CPU power down happens 111 * the timestamp update is reflected in memory. 112 */ 113 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 114 RT_INSTR_ENTER_CFLUSH, 115 PMF_CACHE_MAINT); 116 #endif 117 118 /* 119 * Arch. management. Initiate power down sequence. 120 * TODO : Introduce a mechanism to query the cache level to flush 121 * and the cpu-ops power down to perform from the platform. 122 */ 123 psci_do_pwrdown_sequence(max_off_lvl); 124 125 #if ENABLE_RUNTIME_INSTRUMENTATION 126 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 127 RT_INSTR_EXIT_CFLUSH, 128 PMF_NO_CACHE_MAINT); 129 #endif 130 } 131 132 /******************************************************************************* 133 * Top level handler which is called when a cpu wants to suspend its execution. 134 * It is assumed that along with suspending the cpu power domain, power domains 135 * at higher levels until the target power level will be suspended as well. It 136 * coordinates with the platform to negotiate the target state for each of 137 * the power domain level till the target power domain level. It then performs 138 * generic, architectural, platform setup and state management required to 139 * suspend that power domain level and power domain levels below it. 140 * e.g. For a cpu that's to be suspended, it could mean programming the 141 * power controller whereas for a cluster that's to be suspended, it will call 142 * the platform specific code which will disable coherency at the interconnect 143 * level if the cpu is the last in the cluster and also the program the power 144 * controller. 145 * 146 * All the required parameter checks are performed at the beginning and after 147 * the state transition has been done, no further error is expected and it is 148 * not possible to undo any of the actions taken beyond that point. 149 ******************************************************************************/ 150 void psci_cpu_suspend_start(const entry_point_info_t *ep, 151 unsigned int end_pwrlvl, 152 psci_power_state_t *state_info, 153 unsigned int is_power_down_state) 154 { 155 int skip_wfi = 0; 156 int idx = (int) plat_my_core_pos(); 157 158 /* 159 * This function must only be called on platforms where the 160 * CPU_SUSPEND platform hooks have been implemented. 161 */ 162 assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) && 163 (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)); 164 165 /* 166 * This function acquires the lock corresponding to each power 167 * level so that by the time all locks are taken, the system topology 168 * is snapshot and state management can be done safely. 169 */ 170 psci_acquire_pwr_domain_locks(end_pwrlvl, 171 idx); 172 173 /* 174 * We check if there are any pending interrupts after the delay 175 * introduced by lock contention to increase the chances of early 176 * detection that a wake-up interrupt has fired. 177 */ 178 if (read_isr_el1() != 0U) { 179 skip_wfi = 1; 180 goto exit; 181 } 182 183 /* 184 * This function is passed the requested state info and 185 * it returns the negotiated state info for each power level upto 186 * the end level specified. 187 */ 188 psci_do_state_coordination(end_pwrlvl, state_info); 189 190 #if ENABLE_PSCI_STAT 191 /* Update the last cpu for each level till end_pwrlvl */ 192 psci_stats_update_pwr_down(end_pwrlvl, state_info); 193 #endif 194 195 if (is_power_down_state != 0U) 196 psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); 197 198 /* 199 * Plat. management: Allow the platform to perform the 200 * necessary actions to turn off this cpu e.g. set the 201 * platform defined mailbox with the psci entrypoint, 202 * program the power controller etc. 203 */ 204 psci_plat_pm_ops->pwr_domain_suspend(state_info); 205 206 #if ENABLE_PSCI_STAT 207 plat_psci_stat_accounting_start(state_info); 208 #endif 209 210 exit: 211 /* 212 * Release the locks corresponding to each power level in the 213 * reverse order to which they were acquired. 214 */ 215 psci_release_pwr_domain_locks(end_pwrlvl, 216 idx); 217 if (skip_wfi == 1) 218 return; 219 220 if (is_power_down_state != 0U) { 221 #if ENABLE_RUNTIME_INSTRUMENTATION 222 223 /* 224 * Update the timestamp with cache off. We assume this 225 * timestamp can only be read from the current CPU and the 226 * timestamp cache line will be flushed before return to 227 * normal world on wakeup. 228 */ 229 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 230 RT_INSTR_ENTER_HW_LOW_PWR, 231 PMF_NO_CACHE_MAINT); 232 #endif 233 234 /* The function calls below must not return */ 235 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) 236 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); 237 else 238 psci_power_down_wfi(); 239 } 240 241 #if ENABLE_RUNTIME_INSTRUMENTATION 242 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 243 RT_INSTR_ENTER_HW_LOW_PWR, 244 PMF_NO_CACHE_MAINT); 245 #endif 246 247 /* 248 * We will reach here if only retention/standby states have been 249 * requested at multiple power levels. This means that the cpu 250 * context will be preserved. 251 */ 252 wfi(); 253 254 #if ENABLE_RUNTIME_INSTRUMENTATION 255 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 256 RT_INSTR_EXIT_HW_LOW_PWR, 257 PMF_NO_CACHE_MAINT); 258 #endif 259 260 /* 261 * After we wake up from context retaining suspend, call the 262 * context retaining suspend finisher. 263 */ 264 psci_suspend_to_standby_finisher(idx, end_pwrlvl); 265 } 266 267 /******************************************************************************* 268 * The following functions finish an earlier suspend request. They 269 * are called by the common finisher routine in psci_common.c. The `state_info` 270 * is the psci_power_state from which this CPU has woken up from. 271 ******************************************************************************/ 272 void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info) 273 { 274 unsigned int counter_freq; 275 unsigned int max_off_lvl; 276 277 /* Ensure we have been woken up from a suspended state */ 278 assert((psci_get_aff_info_state() == AFF_STATE_ON) && 279 (is_local_state_off( 280 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0)); 281 282 /* 283 * Plat. management: Perform the platform specific actions 284 * before we change the state of the cpu e.g. enabling the 285 * gic or zeroing the mailbox register. If anything goes 286 * wrong then assert as there is no way to recover from this 287 * situation. 288 */ 289 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 290 291 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 292 /* Arch. management: Enable the data cache, stack memory maintenance. */ 293 psci_do_pwrup_cache_maintenance(); 294 #endif 295 296 /* Re-init the cntfrq_el0 register */ 297 counter_freq = plat_get_syscnt_freq2(); 298 write_cntfrq_el0(counter_freq); 299 300 /* 301 * Call the cpu suspend finish handler registered by the Secure Payload 302 * Dispatcher to let it do any bookeeping. If the handler encounters an 303 * error, it's expected to assert within 304 */ 305 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) { 306 max_off_lvl = psci_find_max_off_lvl(state_info); 307 assert(max_off_lvl != PSCI_INVALID_PWR_LVL); 308 psci_spd_pm->svc_suspend_finish(max_off_lvl); 309 } 310 311 /* Invalidate the suspend level for the cpu */ 312 psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); 313 314 PUBLISH_EVENT(psci_suspend_pwrdown_finish); 315 316 /* 317 * Generic management: Now we just need to retrieve the 318 * information that we had stashed away during the suspend 319 * call to set this cpu on its way. 320 */ 321 cm_prepare_el3_exit(NON_SECURE); 322 } 323