1 /* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <assert.h> 32 #include <bl_common.h> 33 #include <arch.h> 34 #include <arch_helpers.h> 35 #include <context.h> 36 #include <context_mgmt.h> 37 #include <cpu_data.h> 38 #include <debug.h> 39 #include <platform.h> 40 #include <pmf.h> 41 #include <runtime_instr.h> 42 #include <stddef.h> 43 #include "psci_private.h" 44 45 /******************************************************************************* 46 * This function does generic and platform specific operations after a wake-up 47 * from standby/retention states at multiple power levels. 48 ******************************************************************************/ 49 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, 50 unsigned int end_pwrlvl) 51 { 52 psci_power_state_t state_info; 53 54 psci_acquire_pwr_domain_locks(end_pwrlvl, 55 cpu_idx); 56 57 /* 58 * Find out which retention states this CPU has exited from until the 59 * 'end_pwrlvl'. The exit retention state could be deeper than the entry 60 * state as a result of state coordination amongst other CPUs post wfi. 61 */ 62 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 63 64 /* 65 * Plat. management: Allow the platform to do operations 66 * on waking up from retention. 67 */ 68 psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); 69 70 /* 71 * Set the requested and target state of this CPU and all the higher 72 * power domain levels for this CPU to run. 73 */ 74 psci_set_pwr_domains_to_run(end_pwrlvl); 75 76 psci_release_pwr_domain_locks(end_pwrlvl, 77 cpu_idx); 78 } 79 80 /******************************************************************************* 81 * This function does generic and platform specific suspend to power down 82 * operations. 83 ******************************************************************************/ 84 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, 85 entry_point_info_t *ep, 86 psci_power_state_t *state_info) 87 { 88 unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); 89 90 /* Save PSCI target power level for the suspend finisher handler */ 91 psci_set_suspend_pwrlvl(end_pwrlvl); 92 93 /* 94 * Flush the target power level as it will be accessed on power up with 95 * Data cache disabled. 96 */ 97 flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 98 99 /* 100 * Call the cpu suspend handler registered by the Secure Payload 101 * Dispatcher to let it do any book-keeping. If the handler encounters an 102 * error, it's expected to assert within 103 */ 104 if (psci_spd_pm && psci_spd_pm->svc_suspend) 105 psci_spd_pm->svc_suspend(max_off_lvl); 106 107 /* 108 * Store the re-entry information for the non-secure world. 109 */ 110 cm_init_my_context(ep); 111 112 #if ENABLE_RUNTIME_INSTRUMENTATION 113 114 /* 115 * Flush cache line so that even if CPU power down happens 116 * the timestamp update is reflected in memory. 117 */ 118 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 119 RT_INSTR_ENTER_CFLUSH, 120 PMF_CACHE_MAINT); 121 #endif 122 123 /* 124 * Arch. management. Perform the necessary steps to flush all 125 * cpu caches. Currently we assume that the power level correspond 126 * the cache level. 127 * TODO : Introduce a mechanism to query the cache level to flush 128 * and the cpu-ops power down to perform from the platform. 129 */ 130 psci_do_pwrdown_cache_maintenance(max_off_lvl); 131 132 #if ENABLE_RUNTIME_INSTRUMENTATION 133 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 134 RT_INSTR_EXIT_CFLUSH, 135 PMF_NO_CACHE_MAINT); 136 #endif 137 } 138 139 /******************************************************************************* 140 * Top level handler which is called when a cpu wants to suspend its execution. 141 * It is assumed that along with suspending the cpu power domain, power domains 142 * at higher levels until the target power level will be suspended as well. It 143 * coordinates with the platform to negotiate the target state for each of 144 * the power domain level till the target power domain level. It then performs 145 * generic, architectural, platform setup and state management required to 146 * suspend that power domain level and power domain levels below it. 147 * e.g. For a cpu that's to be suspended, it could mean programming the 148 * power controller whereas for a cluster that's to be suspended, it will call 149 * the platform specific code which will disable coherency at the interconnect 150 * level if the cpu is the last in the cluster and also the program the power 151 * controller. 152 * 153 * All the required parameter checks are performed at the beginning and after 154 * the state transition has been done, no further error is expected and it is 155 * not possible to undo any of the actions taken beyond that point. 156 ******************************************************************************/ 157 void psci_cpu_suspend_start(entry_point_info_t *ep, 158 unsigned int end_pwrlvl, 159 psci_power_state_t *state_info, 160 unsigned int is_power_down_state) 161 { 162 int skip_wfi = 0; 163 unsigned int idx = plat_my_core_pos(); 164 165 /* 166 * This function must only be called on platforms where the 167 * CPU_SUSPEND platform hooks have been implemented. 168 */ 169 assert(psci_plat_pm_ops->pwr_domain_suspend && 170 psci_plat_pm_ops->pwr_domain_suspend_finish); 171 172 /* 173 * This function acquires the lock corresponding to each power 174 * level so that by the time all locks are taken, the system topology 175 * is snapshot and state management can be done safely. 176 */ 177 psci_acquire_pwr_domain_locks(end_pwrlvl, 178 idx); 179 180 /* 181 * We check if there are any pending interrupts after the delay 182 * introduced by lock contention to increase the chances of early 183 * detection that a wake-up interrupt has fired. 184 */ 185 if (read_isr_el1()) { 186 skip_wfi = 1; 187 goto exit; 188 } 189 190 /* 191 * This function is passed the requested state info and 192 * it returns the negotiated state info for each power level upto 193 * the end level specified. 194 */ 195 psci_do_state_coordination(end_pwrlvl, state_info); 196 197 #if ENABLE_PSCI_STAT 198 /* Update the last cpu for each level till end_pwrlvl */ 199 psci_stats_update_pwr_down(end_pwrlvl, state_info); 200 #endif 201 202 if (is_power_down_state) 203 psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); 204 205 /* 206 * Plat. management: Allow the platform to perform the 207 * necessary actions to turn off this cpu e.g. set the 208 * platform defined mailbox with the psci entrypoint, 209 * program the power controller etc. 210 */ 211 psci_plat_pm_ops->pwr_domain_suspend(state_info); 212 213 #if ENABLE_PSCI_STAT 214 plat_psci_stat_accounting_start(state_info); 215 #endif 216 217 exit: 218 /* 219 * Release the locks corresponding to each power level in the 220 * reverse order to which they were acquired. 221 */ 222 psci_release_pwr_domain_locks(end_pwrlvl, 223 idx); 224 if (skip_wfi) 225 return; 226 227 if (is_power_down_state) { 228 #if ENABLE_RUNTIME_INSTRUMENTATION 229 230 /* 231 * Update the timestamp with cache off. We assume this 232 * timestamp can only be read from the current CPU and the 233 * timestamp cache line will be flushed before return to 234 * normal world on wakeup. 235 */ 236 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 237 RT_INSTR_ENTER_HW_LOW_PWR, 238 PMF_NO_CACHE_MAINT); 239 #endif 240 241 /* The function calls below must not return */ 242 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) 243 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); 244 else 245 psci_power_down_wfi(); 246 } 247 248 #if ENABLE_RUNTIME_INSTRUMENTATION 249 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 250 RT_INSTR_ENTER_HW_LOW_PWR, 251 PMF_NO_CACHE_MAINT); 252 #endif 253 254 #if ENABLE_PSCI_STAT 255 plat_psci_stat_accounting_start(state_info); 256 #endif 257 258 /* 259 * We will reach here if only retention/standby states have been 260 * requested at multiple power levels. This means that the cpu 261 * context will be preserved. 262 */ 263 wfi(); 264 265 #if ENABLE_PSCI_STAT 266 plat_psci_stat_accounting_stop(state_info); 267 psci_stats_update_pwr_up(end_pwrlvl, state_info); 268 #endif 269 270 #if ENABLE_RUNTIME_INSTRUMENTATION 271 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 272 RT_INSTR_EXIT_HW_LOW_PWR, 273 PMF_NO_CACHE_MAINT); 274 #endif 275 276 /* 277 * After we wake up from context retaining suspend, call the 278 * context retaining suspend finisher. 279 */ 280 psci_suspend_to_standby_finisher(idx, end_pwrlvl); 281 } 282 283 /******************************************************************************* 284 * The following functions finish an earlier suspend request. They 285 * are called by the common finisher routine in psci_common.c. The `state_info` 286 * is the psci_power_state from which this CPU has woken up from. 287 ******************************************************************************/ 288 void psci_cpu_suspend_finish(unsigned int cpu_idx, 289 psci_power_state_t *state_info) 290 { 291 unsigned int counter_freq; 292 unsigned int max_off_lvl; 293 294 /* Ensure we have been woken up from a suspended state */ 295 assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ 296 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); 297 298 /* 299 * Plat. management: Perform the platform specific actions 300 * before we change the state of the cpu e.g. enabling the 301 * gic or zeroing the mailbox register. If anything goes 302 * wrong then assert as there is no way to recover from this 303 * situation. 304 */ 305 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 306 307 /* 308 * Arch. management: Enable the data cache, manage stack memory and 309 * restore the stashed EL3 architectural context from the 'cpu_context' 310 * structure for this cpu. 311 */ 312 psci_do_pwrup_cache_maintenance(); 313 314 /* Re-init the cntfrq_el0 register */ 315 counter_freq = plat_get_syscnt_freq2(); 316 write_cntfrq_el0(counter_freq); 317 318 /* 319 * Call the cpu suspend finish handler registered by the Secure Payload 320 * Dispatcher to let it do any bookeeping. If the handler encounters an 321 * error, it's expected to assert within 322 */ 323 if (psci_spd_pm && psci_spd_pm->svc_suspend) { 324 max_off_lvl = psci_find_max_off_lvl(state_info); 325 assert (max_off_lvl != PSCI_INVALID_PWR_LVL); 326 psci_spd_pm->svc_suspend_finish(max_off_lvl); 327 } 328 329 /* Invalidate the suspend level for the cpu */ 330 psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); 331 332 /* 333 * Generic management: Now we just need to retrieve the 334 * information that we had stashed away during the suspend 335 * call to set this cpu on its way. 336 */ 337 cm_prepare_el3_exit(NON_SECURE); 338 } 339