1 /* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <assert.h> 32 #include <bl_common.h> 33 #include <arch.h> 34 #include <arch_helpers.h> 35 #include <context.h> 36 #include <context_mgmt.h> 37 #include <cpu_data.h> 38 #include <debug.h> 39 #include <platform.h> 40 #include <pmf.h> 41 #include <runtime_instr.h> 42 #include <stddef.h> 43 #include "psci_private.h" 44 45 /******************************************************************************* 46 * This function does generic and platform specific operations after a wake-up 47 * from standby/retention states at multiple power levels. 48 ******************************************************************************/ 49 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, 50 unsigned int end_pwrlvl) 51 { 52 psci_power_state_t state_info; 53 54 psci_acquire_pwr_domain_locks(end_pwrlvl, 55 cpu_idx); 56 57 /* 58 * Find out which retention states this CPU has exited from until the 59 * 'end_pwrlvl'. The exit retention state could be deeper than the entry 60 * state as a result of state coordination amongst other CPUs post wfi. 61 */ 62 psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 63 64 /* 65 * Plat. management: Allow the platform to do operations 66 * on waking up from retention. 67 */ 68 psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); 69 70 /* 71 * Set the requested and target state of this CPU and all the higher 72 * power domain levels for this CPU to run. 73 */ 74 psci_set_pwr_domains_to_run(end_pwrlvl); 75 76 psci_release_pwr_domain_locks(end_pwrlvl, 77 cpu_idx); 78 } 79 80 /******************************************************************************* 81 * This function does generic and platform specific suspend to power down 82 * operations. 83 ******************************************************************************/ 84 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, 85 entry_point_info_t *ep, 86 psci_power_state_t *state_info) 87 { 88 unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); 89 90 /* Save PSCI target power level for the suspend finisher handler */ 91 psci_set_suspend_pwrlvl(end_pwrlvl); 92 93 /* 94 * Flush the target power level as it might be accessed on power up with 95 * Data cache disabled. 96 */ 97 psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 98 99 /* 100 * Call the cpu suspend handler registered by the Secure Payload 101 * Dispatcher to let it do any book-keeping. If the handler encounters an 102 * error, it's expected to assert within 103 */ 104 if (psci_spd_pm && psci_spd_pm->svc_suspend) 105 psci_spd_pm->svc_suspend(max_off_lvl); 106 107 /* 108 * Store the re-entry information for the non-secure world. 109 */ 110 cm_init_my_context(ep); 111 112 #if ENABLE_RUNTIME_INSTRUMENTATION 113 114 /* 115 * Flush cache line so that even if CPU power down happens 116 * the timestamp update is reflected in memory. 117 */ 118 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 119 RT_INSTR_ENTER_CFLUSH, 120 PMF_CACHE_MAINT); 121 #endif 122 123 /* 124 * Arch. management. Initiate power down sequence. 125 * TODO : Introduce a mechanism to query the cache level to flush 126 * and the cpu-ops power down to perform from the platform. 127 */ 128 psci_do_pwrdown_sequence(max_off_lvl); 129 130 #if ENABLE_RUNTIME_INSTRUMENTATION 131 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 132 RT_INSTR_EXIT_CFLUSH, 133 PMF_NO_CACHE_MAINT); 134 #endif 135 } 136 137 /******************************************************************************* 138 * Top level handler which is called when a cpu wants to suspend its execution. 139 * It is assumed that along with suspending the cpu power domain, power domains 140 * at higher levels until the target power level will be suspended as well. It 141 * coordinates with the platform to negotiate the target state for each of 142 * the power domain level till the target power domain level. It then performs 143 * generic, architectural, platform setup and state management required to 144 * suspend that power domain level and power domain levels below it. 145 * e.g. For a cpu that's to be suspended, it could mean programming the 146 * power controller whereas for a cluster that's to be suspended, it will call 147 * the platform specific code which will disable coherency at the interconnect 148 * level if the cpu is the last in the cluster and also the program the power 149 * controller. 150 * 151 * All the required parameter checks are performed at the beginning and after 152 * the state transition has been done, no further error is expected and it is 153 * not possible to undo any of the actions taken beyond that point. 154 ******************************************************************************/ 155 void psci_cpu_suspend_start(entry_point_info_t *ep, 156 unsigned int end_pwrlvl, 157 psci_power_state_t *state_info, 158 unsigned int is_power_down_state) 159 { 160 int skip_wfi = 0; 161 unsigned int idx = plat_my_core_pos(); 162 163 /* 164 * This function must only be called on platforms where the 165 * CPU_SUSPEND platform hooks have been implemented. 166 */ 167 assert(psci_plat_pm_ops->pwr_domain_suspend && 168 psci_plat_pm_ops->pwr_domain_suspend_finish); 169 170 /* 171 * This function acquires the lock corresponding to each power 172 * level so that by the time all locks are taken, the system topology 173 * is snapshot and state management can be done safely. 174 */ 175 psci_acquire_pwr_domain_locks(end_pwrlvl, 176 idx); 177 178 /* 179 * We check if there are any pending interrupts after the delay 180 * introduced by lock contention to increase the chances of early 181 * detection that a wake-up interrupt has fired. 182 */ 183 if (read_isr_el1()) { 184 skip_wfi = 1; 185 goto exit; 186 } 187 188 /* 189 * This function is passed the requested state info and 190 * it returns the negotiated state info for each power level upto 191 * the end level specified. 192 */ 193 psci_do_state_coordination(end_pwrlvl, state_info); 194 195 #if ENABLE_PSCI_STAT 196 /* Update the last cpu for each level till end_pwrlvl */ 197 psci_stats_update_pwr_down(end_pwrlvl, state_info); 198 #endif 199 200 if (is_power_down_state) 201 psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); 202 203 /* 204 * Plat. management: Allow the platform to perform the 205 * necessary actions to turn off this cpu e.g. set the 206 * platform defined mailbox with the psci entrypoint, 207 * program the power controller etc. 208 */ 209 psci_plat_pm_ops->pwr_domain_suspend(state_info); 210 211 #if ENABLE_PSCI_STAT 212 plat_psci_stat_accounting_start(state_info); 213 #endif 214 215 exit: 216 /* 217 * Release the locks corresponding to each power level in the 218 * reverse order to which they were acquired. 219 */ 220 psci_release_pwr_domain_locks(end_pwrlvl, 221 idx); 222 if (skip_wfi) 223 return; 224 225 if (is_power_down_state) { 226 #if ENABLE_RUNTIME_INSTRUMENTATION 227 228 /* 229 * Update the timestamp with cache off. We assume this 230 * timestamp can only be read from the current CPU and the 231 * timestamp cache line will be flushed before return to 232 * normal world on wakeup. 233 */ 234 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 235 RT_INSTR_ENTER_HW_LOW_PWR, 236 PMF_NO_CACHE_MAINT); 237 #endif 238 239 /* The function calls below must not return */ 240 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) 241 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); 242 else 243 psci_power_down_wfi(); 244 } 245 246 #if ENABLE_RUNTIME_INSTRUMENTATION 247 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 248 RT_INSTR_ENTER_HW_LOW_PWR, 249 PMF_NO_CACHE_MAINT); 250 #endif 251 252 #if ENABLE_PSCI_STAT 253 plat_psci_stat_accounting_start(state_info); 254 #endif 255 256 /* 257 * We will reach here if only retention/standby states have been 258 * requested at multiple power levels. This means that the cpu 259 * context will be preserved. 260 */ 261 wfi(); 262 263 #if ENABLE_PSCI_STAT 264 plat_psci_stat_accounting_stop(state_info); 265 psci_stats_update_pwr_up(end_pwrlvl, state_info); 266 #endif 267 268 #if ENABLE_RUNTIME_INSTRUMENTATION 269 PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 270 RT_INSTR_EXIT_HW_LOW_PWR, 271 PMF_NO_CACHE_MAINT); 272 #endif 273 274 /* 275 * After we wake up from context retaining suspend, call the 276 * context retaining suspend finisher. 277 */ 278 psci_suspend_to_standby_finisher(idx, end_pwrlvl); 279 } 280 281 /******************************************************************************* 282 * The following functions finish an earlier suspend request. They 283 * are called by the common finisher routine in psci_common.c. The `state_info` 284 * is the psci_power_state from which this CPU has woken up from. 285 ******************************************************************************/ 286 void psci_cpu_suspend_finish(unsigned int cpu_idx, 287 psci_power_state_t *state_info) 288 { 289 unsigned int counter_freq; 290 unsigned int max_off_lvl; 291 292 /* Ensure we have been woken up from a suspended state */ 293 assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ 294 state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); 295 296 /* 297 * Plat. management: Perform the platform specific actions 298 * before we change the state of the cpu e.g. enabling the 299 * gic or zeroing the mailbox register. If anything goes 300 * wrong then assert as there is no way to recover from this 301 * situation. 302 */ 303 psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 304 305 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) 306 /* Arch. management: Enable the data cache, stack memory maintenance. */ 307 psci_do_pwrup_cache_maintenance(); 308 #endif 309 310 /* Re-init the cntfrq_el0 register */ 311 counter_freq = plat_get_syscnt_freq2(); 312 write_cntfrq_el0(counter_freq); 313 314 /* 315 * Call the cpu suspend finish handler registered by the Secure Payload 316 * Dispatcher to let it do any bookeeping. If the handler encounters an 317 * error, it's expected to assert within 318 */ 319 if (psci_spd_pm && psci_spd_pm->svc_suspend) { 320 max_off_lvl = psci_find_max_off_lvl(state_info); 321 assert (max_off_lvl != PSCI_INVALID_PWR_LVL); 322 psci_spd_pm->svc_suspend_finish(max_off_lvl); 323 } 324 325 /* Invalidate the suspend level for the cpu */ 326 psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); 327 328 /* 329 * Generic management: Now we just need to retrieve the 330 * information that we had stashed away during the suspend 331 * call to set this cpu on its way. 332 */ 333 cm_prepare_el3_exit(NON_SECURE); 334 } 335