1 /* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <stddef.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <context.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/el3_runtime/cpu_data.h> 17 #include <lib/el3_runtime/pubsub_events.h> 18 #include <lib/pmf/pmf.h> 19 #include <lib/runtime_instr.h> 20 #include <plat/common/platform.h> 21 22 #include "psci_private.h" 23 24 /******************************************************************************* 25 * This function does generic and platform specific operations after a wake-up 26 * from standby/retention states at multiple power levels. 27 ******************************************************************************/ 28 static void psci_suspend_to_standby_finisher(int cpu_idx, 29 unsigned int end_pwrlvl) 30 { 31 psci_power_state_t state_info; 32 33 psci_acquire_pwr_domain_locks(end_pwrlvl, 34 cpu_idx); 35 36 /* 37 * Find out which retention states this CPU has exited from until the 38 * 'end_pwrlvl'. The exit retention state could be deeper than the entry 39 * state as a result of state coordination amongst other CPUs post wfi. 40 */ 41 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 42 43 #if ENABLE_PSCI_STAT 44 plat_psci_stat_accounting_stop(&state_info); 45 psci_stats_update_pwr_up(end_pwrlvl, &state_info); 46 #endif 47 48 /* 49 * Plat. management: Allow the platform to do operations 50 * on waking up from retention. 51 */ 52 psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); 53 54 /* 55 * Set the requested and target state of this CPU and all the higher 56 * power domain levels for this CPU to run. 57 */ 58 psci_set_pwr_domains_to_run(end_pwrlvl); 59 60 psci_release_pwr_domain_locks(end_pwrlvl, 61 cpu_idx); 62 } 63 64 /******************************************************************************* 65 * This function does generic and platform specific suspend to power down 66 * operations. 67 ******************************************************************************/ 68 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, 69 const entry_point_info_t *ep, 70 const psci_power_state_t *state_info) 71 { 72 unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); 73 74 PUBLISH_EVENT(psci_suspend_pwrdown_start); 75 76 /* Save PSCI target power level for the suspend finisher handler */ 77 psci_set_suspend_pwrlvl(end_pwrlvl); 78 79 /* 80 * Flush the target power level as it might be accessed on power up with 81 * Data cache disabled. 82 */ 83 psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 84 85 /* 86 * Call the cpu suspend handler registered by the Secure Payload 87 * Dispatcher to let it do any book-keeping. If the handler encounters an 88 * error, it's expected to assert within 89 */ 90 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL)) 91 psci_spd_pm->svc_suspend(max_off_lvl); 92 93 #if !HW_ASSISTED_COHERENCY 94 /* 95 * Plat. management: Allow the platform to perform any early 96 * actions required to power down the CPU. This might be useful for 97 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these 98 * actions with data caches enabled. 99 */ 100 if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL) 101 psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); 102 #endif 103 104 /* 105 * Store the re-entry information for the non-secure world. 106 */ 107 cm_init_my_context(ep); 108 109 #if ENABLE_RUNTIME_INSTRUMENTATION 110 111 /* 112 * Flush cache line so that even if CPU power down happens 113 * the timestamp update is reflected in memory. 114 */ 115 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 116 RT_INSTR_ENTER_CFLUSH, 117 PMF_CACHE_MAINT); 118 #endif 119 120 /* 121 * Arch. management. Initiate power down sequence. 122 * TODO : Introduce a mechanism to query the cache level to flush 123 * and the cpu-ops power down to perform from the platform. 124 */ 125 psci_do_pwrdown_sequence(max_off_lvl); 126 127 #if ENABLE_RUNTIME_INSTRUMENTATION 128 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 129 RT_INSTR_EXIT_CFLUSH, 130 PMF_NO_CACHE_MAINT); 131 #endif 132 } 133 134 /******************************************************************************* 135 * Top level handler which is called when a cpu wants to suspend its execution. 136 * It is assumed that along with suspending the cpu power domain, power domains 137 * at higher levels until the target power level will be suspended as well. It 138 * coordinates with the platform to negotiate the target state for each of 139 * the power domain level till the target power domain level. It then performs 140 * generic, architectural, platform setup and state management required to 141 * suspend that power domain level and power domain levels below it. 142 * e.g. For a cpu that's to be suspended, it could mean programming the 143 * power controller whereas for a cluster that's to be suspended, it will call 144 * the platform specific code which will disable coherency at the interconnect 145 * level if the cpu is the last in the cluster and also the program the power 146 * controller. 147 * 148 * All the required parameter checks are performed at the beginning and after 149 * the state transition has been done, no further error is expected and it is 150 * not possible to undo any of the actions taken beyond that point. 151 ******************************************************************************/ 152 void psci_cpu_suspend_start(const entry_point_info_t *ep, 153 unsigned int end_pwrlvl, 154 psci_power_state_t *state_info, 155 unsigned int is_power_down_state) 156 { 157 int skip_wfi = 0; 158 int idx = (int) plat_my_core_pos(); 159 160 /* 161 * This function must only be called on platforms where the 162 * CPU_SUSPEND platform hooks have been implemented. 163 */ 164 assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) && 165 (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)); 166 167 /* 168 * This function acquires the lock corresponding to each power 169 * level so that by the time all locks are taken, the system topology 170 * is snapshot and state management can be done safely. 171 */ 172 psci_acquire_pwr_domain_locks(end_pwrlvl, 173 idx); 174 175 /* 176 * We check if there are any pending interrupts after the delay 177 * introduced by lock contention to increase the chances of early 178 * detection that a wake-up interrupt has fired. 179 */ 180 if (read_isr_el1() != 0U) { 181 skip_wfi = 1; 182 goto exit; 183 } 184 185 /* 186 * This function is passed the requested state info and 187 * it returns the negotiated state info for each power level upto 188 * the end level specified. 189 */ 190 psci_do_state_coordination(end_pwrlvl, state_info); 191 192 #if ENABLE_PSCI_STAT 193 /* Update the last cpu for each level till end_pwrlvl */ 194 psci_stats_update_pwr_down(end_pwrlvl, state_info); 195 #endif 196 197 if (is_power_down_state != 0U) 198 psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); 199 200 /* 201 * Plat. management: Allow the platform to perform the 202 * necessary actions to turn off this cpu e.g. set the 203 * platform defined mailbox with the psci entrypoint, 204 * program the power controller etc. 205 */ 206 psci_plat_pm_ops->pwr_domain_suspend(state_info); 207 208 #if ENABLE_PSCI_STAT 209 plat_psci_stat_accounting_start(state_info); 210 #endif 211 212 exit: 213 /* 214 * Release the locks corresponding to each power level in the 215 * reverse order to which they were acquired. 216 */ 217 psci_release_pwr_domain_locks(end_pwrlvl, 218 idx); 219 if (skip_wfi == 1) 220 return; 221 222 if (is_power_down_state != 0U) { 223 #if ENABLE_RUNTIME_INSTRUMENTATION 224 225 /* 226 * Update the timestamp with cache off. We assume this 227 * timestamp can only be read from the current CPU and the 228 * timestamp cache line will be flushed before return to 229 * normal world on wakeup. 230 */ 231 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 232 RT_INSTR_ENTER_HW_LOW_PWR, 233 PMF_NO_CACHE_MAINT); 234 #endif 235 236 /* The function calls below must not return */ 237 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) 238 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); 239 else 240 psci_power_down_wfi(); 241 } 242 243 #if ENABLE_RUNTIME_INSTRUMENTATION 244 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 245 RT_INSTR_ENTER_HW_LOW_PWR, 246 PMF_NO_CACHE_MAINT); 247 #endif 248 249 /* 250 * We will reach here if only retention/standby states have been 251 * requested at multiple power levels. This means that the cpu 252 * context will be preserved. 253 */ 254 wfi(); 255 256 #if ENABLE_RUNTIME_INSTRUMENTATION 257 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 258 RT_INSTR_EXIT_HW_LOW_PWR, 259 PMF_NO_CACHE_MAINT); 260 #endif 261 262 /* 263 * After we wake up from context retaining suspend, call the 264 * context retaining suspend finisher. 265 */ 266 psci_suspend_to_standby_finisher(idx, end_pwrlvl); 267 } 268 269 /******************************************************************************* 270 * The following functions finish an earlier suspend request. They 271 * are called by the common finisher routine in psci_common.c. The `state_info` 272 * is the psci_power_state from which this CPU has woken up from. 273 ******************************************************************************/ 274 void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info) 275 { 276 unsigned int counter_freq; 277 unsigned int max_off_lvl; 278 279 /* Ensure we have been woken up from a suspended state */ 280 assert((psci_get_aff_info_state() == AFF_STATE_ON) && 281 (is_local_state_off( 282 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0)); 283 284 /* 285 * Plat. management: Perform the platform specific actions 286 * before we change the state of the cpu e.g. enabling the 287 * gic or zeroing the mailbox register. If anything goes 288 * wrong then assert as there is no way to recover from this 289 * situation. 290 */ 291 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 292 293 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 294 /* Arch. management: Enable the data cache, stack memory maintenance. */ 295 psci_do_pwrup_cache_maintenance(); 296 #endif 297 298 /* Re-init the cntfrq_el0 register */ 299 counter_freq = plat_get_syscnt_freq2(); 300 write_cntfrq_el0(counter_freq); 301 302 /* 303 * Call the cpu suspend finish handler registered by the Secure Payload 304 * Dispatcher to let it do any bookeeping. If the handler encounters an 305 * error, it's expected to assert within 306 */ 307 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) { 308 max_off_lvl = psci_find_max_off_lvl(state_info); 309 assert(max_off_lvl != PSCI_INVALID_PWR_LVL); 310 psci_spd_pm->svc_suspend_finish(max_off_lvl); 311 } 312 313 /* Invalidate the suspend level for the cpu */ 314 psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); 315 316 PUBLISH_EVENT(psci_suspend_pwrdown_finish); 317 318 /* 319 * Generic management: Now we just need to retrieve the 320 * information that we had stashed away during the suspend 321 * call to set this cpu on its way. 322 */ 323 cm_prepare_el3_exit(NON_SECURE); 324 } 325