1532ed618SSoby Mathew /* 204c1db1eSdp-arm * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3532ed618SSoby Mathew * 4532ed618SSoby Mathew * Redistribution and use in source and binary forms, with or without 5532ed618SSoby Mathew * modification, are permitted provided that the following conditions are met: 6532ed618SSoby Mathew * 7532ed618SSoby Mathew * Redistributions of source code must retain the above copyright notice, this 8532ed618SSoby Mathew * list of conditions and the following disclaimer. 9532ed618SSoby Mathew * 10532ed618SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice, 11532ed618SSoby Mathew * this list of conditions and the following disclaimer in the documentation 12532ed618SSoby Mathew * and/or other materials provided with the distribution. 13532ed618SSoby Mathew * 14532ed618SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used 15532ed618SSoby Mathew * to endorse or promote products derived from this software without specific 16532ed618SSoby Mathew * prior written permission. 17532ed618SSoby Mathew * 18532ed618SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19532ed618SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20532ed618SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21532ed618SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22532ed618SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23532ed618SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24532ed618SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25532ed618SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26532ed618SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27532ed618SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28532ed618SSoby Mathew * POSSIBILITY OF SUCH DAMAGE. 29532ed618SSoby Mathew */ 30532ed618SSoby Mathew 31532ed618SSoby Mathew #include <assert.h> 32532ed618SSoby Mathew #include <bl_common.h> 33532ed618SSoby Mathew #include <arch.h> 34532ed618SSoby Mathew #include <arch_helpers.h> 35532ed618SSoby Mathew #include <context.h> 36532ed618SSoby Mathew #include <context_mgmt.h> 37532ed618SSoby Mathew #include <cpu_data.h> 38532ed618SSoby Mathew #include <debug.h> 39532ed618SSoby Mathew #include <platform.h> 40872be88aSdp-arm #include <pmf.h> 41872be88aSdp-arm #include <runtime_instr.h> 42532ed618SSoby Mathew #include <stddef.h> 43532ed618SSoby Mathew #include "psci_private.h" 44532ed618SSoby Mathew 45532ed618SSoby Mathew /******************************************************************************* 46532ed618SSoby Mathew * This function does generic and platform specific operations after a wake-up 47532ed618SSoby Mathew * from standby/retention states at multiple power levels. 48532ed618SSoby Mathew ******************************************************************************/ 49532ed618SSoby Mathew static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, 50532ed618SSoby Mathew unsigned int end_pwrlvl) 51532ed618SSoby Mathew { 5261eae524SAchin Gupta psci_power_state_t state_info; 5361eae524SAchin Gupta 54532ed618SSoby Mathew psci_acquire_pwr_domain_locks(end_pwrlvl, 55532ed618SSoby Mathew cpu_idx); 56532ed618SSoby Mathew 57532ed618SSoby Mathew /* 5861eae524SAchin Gupta * Find out which retention states this CPU has exited from until the 5961eae524SAchin Gupta * 'end_pwrlvl'. The exit retention state could be deeper than the entry 6061eae524SAchin Gupta * state as a result of state coordination amongst other CPUs post wfi. 6161eae524SAchin Gupta */ 6261eae524SAchin Gupta psci_get_target_local_pwr_states(end_pwrlvl, &state_info); 6361eae524SAchin Gupta 6461eae524SAchin Gupta /* 65532ed618SSoby Mathew * Plat. management: Allow the platform to do operations 66532ed618SSoby Mathew * on waking up from retention. 67532ed618SSoby Mathew */ 6861eae524SAchin Gupta psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); 69532ed618SSoby Mathew 70532ed618SSoby Mathew /* 71532ed618SSoby Mathew * Set the requested and target state of this CPU and all the higher 72532ed618SSoby Mathew * power domain levels for this CPU to run. 73532ed618SSoby Mathew */ 74532ed618SSoby Mathew psci_set_pwr_domains_to_run(end_pwrlvl); 75532ed618SSoby Mathew 76532ed618SSoby Mathew psci_release_pwr_domain_locks(end_pwrlvl, 77532ed618SSoby Mathew cpu_idx); 78532ed618SSoby Mathew } 79532ed618SSoby Mathew 80532ed618SSoby Mathew /******************************************************************************* 81532ed618SSoby Mathew * This function does generic and platform specific suspend to power down 82532ed618SSoby Mathew * operations. 83532ed618SSoby Mathew ******************************************************************************/ 84532ed618SSoby Mathew static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, 85532ed618SSoby Mathew entry_point_info_t *ep, 86532ed618SSoby Mathew psci_power_state_t *state_info) 87532ed618SSoby Mathew { 88532ed618SSoby Mathew unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); 89532ed618SSoby Mathew 90532ed618SSoby Mathew /* Save PSCI target power level for the suspend finisher handler */ 91532ed618SSoby Mathew psci_set_suspend_pwrlvl(end_pwrlvl); 92532ed618SSoby Mathew 93532ed618SSoby Mathew /* 94*a10d3632SJeenu Viswambharan * Flush the target power level as it might be accessed on power up with 95532ed618SSoby Mathew * Data cache disabled. 96532ed618SSoby Mathew */ 97*a10d3632SJeenu Viswambharan psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); 98532ed618SSoby Mathew 99532ed618SSoby Mathew /* 100532ed618SSoby Mathew * Call the cpu suspend handler registered by the Secure Payload 101532ed618SSoby Mathew * Dispatcher to let it do any book-keeping. If the handler encounters an 102532ed618SSoby Mathew * error, it's expected to assert within 103532ed618SSoby Mathew */ 104532ed618SSoby Mathew if (psci_spd_pm && psci_spd_pm->svc_suspend) 105532ed618SSoby Mathew psci_spd_pm->svc_suspend(max_off_lvl); 106532ed618SSoby Mathew 107532ed618SSoby Mathew /* 108532ed618SSoby Mathew * Store the re-entry information for the non-secure world. 109532ed618SSoby Mathew */ 110532ed618SSoby Mathew cm_init_my_context(ep); 111532ed618SSoby Mathew 1127941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION 1137941816aSdp-arm 1147941816aSdp-arm /* 1157941816aSdp-arm * Flush cache line so that even if CPU power down happens 1167941816aSdp-arm * the timestamp update is reflected in memory. 1177941816aSdp-arm */ 1187941816aSdp-arm PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 1197941816aSdp-arm RT_INSTR_ENTER_CFLUSH, 1207941816aSdp-arm PMF_CACHE_MAINT); 1217941816aSdp-arm #endif 1227941816aSdp-arm 123532ed618SSoby Mathew /* 124532ed618SSoby Mathew * Arch. management. Perform the necessary steps to flush all 125532ed618SSoby Mathew * cpu caches. Currently we assume that the power level correspond 126532ed618SSoby Mathew * the cache level. 127532ed618SSoby Mathew * TODO : Introduce a mechanism to query the cache level to flush 128532ed618SSoby Mathew * and the cpu-ops power down to perform from the platform. 129532ed618SSoby Mathew */ 130532ed618SSoby Mathew psci_do_pwrdown_cache_maintenance(max_off_lvl); 1317941816aSdp-arm 1327941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION 1337941816aSdp-arm PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 1347941816aSdp-arm RT_INSTR_EXIT_CFLUSH, 1357941816aSdp-arm PMF_NO_CACHE_MAINT); 1367941816aSdp-arm #endif 137532ed618SSoby Mathew } 138532ed618SSoby Mathew 139532ed618SSoby Mathew /******************************************************************************* 140532ed618SSoby Mathew * Top level handler which is called when a cpu wants to suspend its execution. 141532ed618SSoby Mathew * It is assumed that along with suspending the cpu power domain, power domains 142532ed618SSoby Mathew * at higher levels until the target power level will be suspended as well. It 143532ed618SSoby Mathew * coordinates with the platform to negotiate the target state for each of 144532ed618SSoby Mathew * the power domain level till the target power domain level. It then performs 145532ed618SSoby Mathew * generic, architectural, platform setup and state management required to 146532ed618SSoby Mathew * suspend that power domain level and power domain levels below it. 147532ed618SSoby Mathew * e.g. For a cpu that's to be suspended, it could mean programming the 148532ed618SSoby Mathew * power controller whereas for a cluster that's to be suspended, it will call 149532ed618SSoby Mathew * the platform specific code which will disable coherency at the interconnect 150532ed618SSoby Mathew * level if the cpu is the last in the cluster and also the program the power 151532ed618SSoby Mathew * controller. 152532ed618SSoby Mathew * 153532ed618SSoby Mathew * All the required parameter checks are performed at the beginning and after 154532ed618SSoby Mathew * the state transition has been done, no further error is expected and it is 155532ed618SSoby Mathew * not possible to undo any of the actions taken beyond that point. 156532ed618SSoby Mathew ******************************************************************************/ 157532ed618SSoby Mathew void psci_cpu_suspend_start(entry_point_info_t *ep, 158532ed618SSoby Mathew unsigned int end_pwrlvl, 159532ed618SSoby Mathew psci_power_state_t *state_info, 160532ed618SSoby Mathew unsigned int is_power_down_state) 161532ed618SSoby Mathew { 162532ed618SSoby Mathew int skip_wfi = 0; 163532ed618SSoby Mathew unsigned int idx = plat_my_core_pos(); 164532ed618SSoby Mathew 165532ed618SSoby Mathew /* 166532ed618SSoby Mathew * This function must only be called on platforms where the 167532ed618SSoby Mathew * CPU_SUSPEND platform hooks have been implemented. 168532ed618SSoby Mathew */ 169532ed618SSoby Mathew assert(psci_plat_pm_ops->pwr_domain_suspend && 170532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_suspend_finish); 171532ed618SSoby Mathew 172532ed618SSoby Mathew /* 173532ed618SSoby Mathew * This function acquires the lock corresponding to each power 174532ed618SSoby Mathew * level so that by the time all locks are taken, the system topology 175532ed618SSoby Mathew * is snapshot and state management can be done safely. 176532ed618SSoby Mathew */ 177532ed618SSoby Mathew psci_acquire_pwr_domain_locks(end_pwrlvl, 178532ed618SSoby Mathew idx); 179532ed618SSoby Mathew 180532ed618SSoby Mathew /* 181532ed618SSoby Mathew * We check if there are any pending interrupts after the delay 182532ed618SSoby Mathew * introduced by lock contention to increase the chances of early 183532ed618SSoby Mathew * detection that a wake-up interrupt has fired. 184532ed618SSoby Mathew */ 185532ed618SSoby Mathew if (read_isr_el1()) { 186532ed618SSoby Mathew skip_wfi = 1; 187532ed618SSoby Mathew goto exit; 188532ed618SSoby Mathew } 189532ed618SSoby Mathew 190532ed618SSoby Mathew /* 191532ed618SSoby Mathew * This function is passed the requested state info and 192532ed618SSoby Mathew * it returns the negotiated state info for each power level upto 193532ed618SSoby Mathew * the end level specified. 194532ed618SSoby Mathew */ 195532ed618SSoby Mathew psci_do_state_coordination(end_pwrlvl, state_info); 196532ed618SSoby Mathew 197532ed618SSoby Mathew #if ENABLE_PSCI_STAT 198532ed618SSoby Mathew /* Update the last cpu for each level till end_pwrlvl */ 199532ed618SSoby Mathew psci_stats_update_pwr_down(end_pwrlvl, state_info); 200532ed618SSoby Mathew #endif 201532ed618SSoby Mathew 202532ed618SSoby Mathew if (is_power_down_state) 203532ed618SSoby Mathew psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); 204532ed618SSoby Mathew 205532ed618SSoby Mathew /* 206532ed618SSoby Mathew * Plat. management: Allow the platform to perform the 207532ed618SSoby Mathew * necessary actions to turn off this cpu e.g. set the 208532ed618SSoby Mathew * platform defined mailbox with the psci entrypoint, 209532ed618SSoby Mathew * program the power controller etc. 210532ed618SSoby Mathew */ 211532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_suspend(state_info); 212532ed618SSoby Mathew 213532ed618SSoby Mathew #if ENABLE_PSCI_STAT 21404c1db1eSdp-arm plat_psci_stat_accounting_start(state_info); 215532ed618SSoby Mathew #endif 216532ed618SSoby Mathew 217532ed618SSoby Mathew exit: 218532ed618SSoby Mathew /* 219532ed618SSoby Mathew * Release the locks corresponding to each power level in the 220532ed618SSoby Mathew * reverse order to which they were acquired. 221532ed618SSoby Mathew */ 222532ed618SSoby Mathew psci_release_pwr_domain_locks(end_pwrlvl, 223532ed618SSoby Mathew idx); 224532ed618SSoby Mathew if (skip_wfi) 225532ed618SSoby Mathew return; 226532ed618SSoby Mathew 227532ed618SSoby Mathew if (is_power_down_state) { 228872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION 229872be88aSdp-arm 230872be88aSdp-arm /* 231872be88aSdp-arm * Update the timestamp with cache off. We assume this 232872be88aSdp-arm * timestamp can only be read from the current CPU and the 233872be88aSdp-arm * timestamp cache line will be flushed before return to 234872be88aSdp-arm * normal world on wakeup. 235872be88aSdp-arm */ 236872be88aSdp-arm PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 237872be88aSdp-arm RT_INSTR_ENTER_HW_LOW_PWR, 238872be88aSdp-arm PMF_NO_CACHE_MAINT); 239872be88aSdp-arm #endif 240872be88aSdp-arm 241532ed618SSoby Mathew /* The function calls below must not return */ 242532ed618SSoby Mathew if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) 243532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); 244532ed618SSoby Mathew else 245532ed618SSoby Mathew psci_power_down_wfi(); 246532ed618SSoby Mathew } 247532ed618SSoby Mathew 248872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION 249872be88aSdp-arm PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 250872be88aSdp-arm RT_INSTR_ENTER_HW_LOW_PWR, 251872be88aSdp-arm PMF_NO_CACHE_MAINT); 252872be88aSdp-arm #endif 253872be88aSdp-arm 254e5bbd16aSdp-arm #if ENABLE_PSCI_STAT 255e5bbd16aSdp-arm plat_psci_stat_accounting_start(state_info); 256e5bbd16aSdp-arm #endif 257e5bbd16aSdp-arm 258532ed618SSoby Mathew /* 259532ed618SSoby Mathew * We will reach here if only retention/standby states have been 260532ed618SSoby Mathew * requested at multiple power levels. This means that the cpu 261532ed618SSoby Mathew * context will be preserved. 262532ed618SSoby Mathew */ 263532ed618SSoby Mathew wfi(); 264532ed618SSoby Mathew 265e5bbd16aSdp-arm #if ENABLE_PSCI_STAT 266e5bbd16aSdp-arm plat_psci_stat_accounting_stop(state_info); 267e5bbd16aSdp-arm psci_stats_update_pwr_up(end_pwrlvl, state_info); 268e5bbd16aSdp-arm #endif 269e5bbd16aSdp-arm 270872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION 271872be88aSdp-arm PMF_CAPTURE_TIMESTAMP(rt_instr_svc, 272872be88aSdp-arm RT_INSTR_EXIT_HW_LOW_PWR, 273872be88aSdp-arm PMF_NO_CACHE_MAINT); 274872be88aSdp-arm #endif 275872be88aSdp-arm 276532ed618SSoby Mathew /* 277532ed618SSoby Mathew * After we wake up from context retaining suspend, call the 278532ed618SSoby Mathew * context retaining suspend finisher. 279532ed618SSoby Mathew */ 28061eae524SAchin Gupta psci_suspend_to_standby_finisher(idx, end_pwrlvl); 281532ed618SSoby Mathew } 282532ed618SSoby Mathew 283532ed618SSoby Mathew /******************************************************************************* 284532ed618SSoby Mathew * The following functions finish an earlier suspend request. They 285532ed618SSoby Mathew * are called by the common finisher routine in psci_common.c. The `state_info` 286532ed618SSoby Mathew * is the psci_power_state from which this CPU has woken up from. 287532ed618SSoby Mathew ******************************************************************************/ 288532ed618SSoby Mathew void psci_cpu_suspend_finish(unsigned int cpu_idx, 289532ed618SSoby Mathew psci_power_state_t *state_info) 290532ed618SSoby Mathew { 291532ed618SSoby Mathew unsigned int counter_freq; 292532ed618SSoby Mathew unsigned int max_off_lvl; 293532ed618SSoby Mathew 294532ed618SSoby Mathew /* Ensure we have been woken up from a suspended state */ 295532ed618SSoby Mathew assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ 296532ed618SSoby Mathew state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); 297532ed618SSoby Mathew 298532ed618SSoby Mathew /* 299532ed618SSoby Mathew * Plat. management: Perform the platform specific actions 300532ed618SSoby Mathew * before we change the state of the cpu e.g. enabling the 301532ed618SSoby Mathew * gic or zeroing the mailbox register. If anything goes 302532ed618SSoby Mathew * wrong then assert as there is no way to recover from this 303532ed618SSoby Mathew * situation. 304532ed618SSoby Mathew */ 305532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); 306532ed618SSoby Mathew 307532ed618SSoby Mathew /* 308532ed618SSoby Mathew * Arch. management: Enable the data cache, manage stack memory and 309532ed618SSoby Mathew * restore the stashed EL3 architectural context from the 'cpu_context' 310532ed618SSoby Mathew * structure for this cpu. 311532ed618SSoby Mathew */ 312532ed618SSoby Mathew psci_do_pwrup_cache_maintenance(); 313532ed618SSoby Mathew 314532ed618SSoby Mathew /* Re-init the cntfrq_el0 register */ 315532ed618SSoby Mathew counter_freq = plat_get_syscnt_freq2(); 316532ed618SSoby Mathew write_cntfrq_el0(counter_freq); 317532ed618SSoby Mathew 318532ed618SSoby Mathew /* 319532ed618SSoby Mathew * Call the cpu suspend finish handler registered by the Secure Payload 320532ed618SSoby Mathew * Dispatcher to let it do any bookeeping. If the handler encounters an 321532ed618SSoby Mathew * error, it's expected to assert within 322532ed618SSoby Mathew */ 323532ed618SSoby Mathew if (psci_spd_pm && psci_spd_pm->svc_suspend) { 324532ed618SSoby Mathew max_off_lvl = psci_find_max_off_lvl(state_info); 325532ed618SSoby Mathew assert (max_off_lvl != PSCI_INVALID_PWR_LVL); 326532ed618SSoby Mathew psci_spd_pm->svc_suspend_finish(max_off_lvl); 327532ed618SSoby Mathew } 328532ed618SSoby Mathew 329532ed618SSoby Mathew /* Invalidate the suspend level for the cpu */ 330532ed618SSoby Mathew psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); 331532ed618SSoby Mathew 332532ed618SSoby Mathew /* 333532ed618SSoby Mathew * Generic management: Now we just need to retrieve the 334532ed618SSoby Mathew * information that we had stashed away during the suspend 335532ed618SSoby Mathew * call to set this cpu on its way. 336532ed618SSoby Mathew */ 337532ed618SSoby Mathew cm_prepare_el3_exit(NON_SECURE); 338532ed618SSoby Mathew } 339