1*532ed618SSoby Mathew /* 2*532ed618SSoby Mathew * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. 3*532ed618SSoby Mathew * 4*532ed618SSoby Mathew * Redistribution and use in source and binary forms, with or without 5*532ed618SSoby Mathew * modification, are permitted provided that the following conditions are met: 6*532ed618SSoby Mathew * 7*532ed618SSoby Mathew * Redistributions of source code must retain the above copyright notice, this 8*532ed618SSoby Mathew * list of conditions and the following disclaimer. 9*532ed618SSoby Mathew * 10*532ed618SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice, 11*532ed618SSoby Mathew * this list of conditions and the following disclaimer in the documentation 12*532ed618SSoby Mathew * and/or other materials provided with the distribution. 13*532ed618SSoby Mathew * 14*532ed618SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used 15*532ed618SSoby Mathew * to endorse or promote products derived from this software without specific 16*532ed618SSoby Mathew * prior written permission. 17*532ed618SSoby Mathew * 18*532ed618SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19*532ed618SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20*532ed618SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21*532ed618SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22*532ed618SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23*532ed618SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24*532ed618SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25*532ed618SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26*532ed618SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27*532ed618SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28*532ed618SSoby Mathew * POSSIBILITY OF SUCH DAMAGE. 29*532ed618SSoby Mathew */ 30*532ed618SSoby Mathew 31*532ed618SSoby Mathew #include <arch.h> 32*532ed618SSoby Mathew #include <arch_helpers.h> 33*532ed618SSoby Mathew #include <assert.h> 34*532ed618SSoby Mathew #include <debug.h> 35*532ed618SSoby Mathew #include <platform.h> 36*532ed618SSoby Mathew #include <string.h> 37*532ed618SSoby Mathew #include "psci_private.h" 38*532ed618SSoby Mathew 39*532ed618SSoby Mathew /****************************************************************************** 40*532ed618SSoby Mathew * Construct the psci_power_state to request power OFF at all power levels. 41*532ed618SSoby Mathew ******************************************************************************/ 42*532ed618SSoby Mathew static void psci_set_power_off_state(psci_power_state_t *state_info) 43*532ed618SSoby Mathew { 44*532ed618SSoby Mathew int lvl; 45*532ed618SSoby Mathew 46*532ed618SSoby Mathew for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) 47*532ed618SSoby Mathew state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; 48*532ed618SSoby Mathew } 49*532ed618SSoby Mathew 50*532ed618SSoby Mathew /****************************************************************************** 51*532ed618SSoby Mathew * Top level handler which is called when a cpu wants to power itself down. 52*532ed618SSoby Mathew * It's assumed that along with turning the cpu power domain off, power 53*532ed618SSoby Mathew * domains at higher levels will be turned off as far as possible. It finds 54*532ed618SSoby Mathew * the highest level where a domain has to be powered off by traversing the 55*532ed618SSoby Mathew * node information and then performs generic, architectural, platform setup 56*532ed618SSoby Mathew * and state management required to turn OFF that power domain and domains 57*532ed618SSoby Mathew * below it. e.g. For a cpu that's to be powered OFF, it could mean programming 58*532ed618SSoby Mathew * the power controller whereas for a cluster that's to be powered off, it will 59*532ed618SSoby Mathew * call the platform specific code which will disable coherency at the 60*532ed618SSoby Mathew * interconnect level if the cpu is the last in the cluster and also the 61*532ed618SSoby Mathew * program the power controller. 62*532ed618SSoby Mathew ******************************************************************************/ 63*532ed618SSoby Mathew int psci_do_cpu_off(unsigned int end_pwrlvl) 64*532ed618SSoby Mathew { 65*532ed618SSoby Mathew int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos(); 66*532ed618SSoby Mathew psci_power_state_t state_info; 67*532ed618SSoby Mathew 68*532ed618SSoby Mathew /* 69*532ed618SSoby Mathew * This function must only be called on platforms where the 70*532ed618SSoby Mathew * CPU_OFF platform hooks have been implemented. 71*532ed618SSoby Mathew */ 72*532ed618SSoby Mathew assert(psci_plat_pm_ops->pwr_domain_off); 73*532ed618SSoby Mathew 74*532ed618SSoby Mathew /* 75*532ed618SSoby Mathew * This function acquires the lock corresponding to each power 76*532ed618SSoby Mathew * level so that by the time all locks are taken, the system topology 77*532ed618SSoby Mathew * is snapshot and state management can be done safely. 78*532ed618SSoby Mathew */ 79*532ed618SSoby Mathew psci_acquire_pwr_domain_locks(end_pwrlvl, 80*532ed618SSoby Mathew idx); 81*532ed618SSoby Mathew 82*532ed618SSoby Mathew /* 83*532ed618SSoby Mathew * Call the cpu off handler registered by the Secure Payload Dispatcher 84*532ed618SSoby Mathew * to let it do any bookkeeping. Assume that the SPD always reports an 85*532ed618SSoby Mathew * E_DENIED error if SP refuse to power down 86*532ed618SSoby Mathew */ 87*532ed618SSoby Mathew if (psci_spd_pm && psci_spd_pm->svc_off) { 88*532ed618SSoby Mathew rc = psci_spd_pm->svc_off(0); 89*532ed618SSoby Mathew if (rc) 90*532ed618SSoby Mathew goto exit; 91*532ed618SSoby Mathew } 92*532ed618SSoby Mathew 93*532ed618SSoby Mathew /* Construct the psci_power_state for CPU_OFF */ 94*532ed618SSoby Mathew psci_set_power_off_state(&state_info); 95*532ed618SSoby Mathew 96*532ed618SSoby Mathew /* 97*532ed618SSoby Mathew * This function is passed the requested state info and 98*532ed618SSoby Mathew * it returns the negotiated state info for each power level upto 99*532ed618SSoby Mathew * the end level specified. 100*532ed618SSoby Mathew */ 101*532ed618SSoby Mathew psci_do_state_coordination(end_pwrlvl, &state_info); 102*532ed618SSoby Mathew 103*532ed618SSoby Mathew #if ENABLE_PSCI_STAT 104*532ed618SSoby Mathew /* Update the last cpu for each level till end_pwrlvl */ 105*532ed618SSoby Mathew psci_stats_update_pwr_down(end_pwrlvl, &state_info); 106*532ed618SSoby Mathew #endif 107*532ed618SSoby Mathew 108*532ed618SSoby Mathew /* 109*532ed618SSoby Mathew * Arch. management. Perform the necessary steps to flush all 110*532ed618SSoby Mathew * cpu caches. 111*532ed618SSoby Mathew */ 112*532ed618SSoby Mathew psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info)); 113*532ed618SSoby Mathew 114*532ed618SSoby Mathew /* 115*532ed618SSoby Mathew * Plat. management: Perform platform specific actions to turn this 116*532ed618SSoby Mathew * cpu off e.g. exit cpu coherency, program the power controller etc. 117*532ed618SSoby Mathew */ 118*532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_off(&state_info); 119*532ed618SSoby Mathew 120*532ed618SSoby Mathew #if ENABLE_PSCI_STAT 121*532ed618SSoby Mathew /* 122*532ed618SSoby Mathew * Capture time-stamp while entering low power state. 123*532ed618SSoby Mathew * No cache maintenance needed because caches are off 124*532ed618SSoby Mathew * and writes are direct to main memory. 125*532ed618SSoby Mathew */ 126*532ed618SSoby Mathew PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, 127*532ed618SSoby Mathew PMF_NO_CACHE_MAINT); 128*532ed618SSoby Mathew #endif 129*532ed618SSoby Mathew 130*532ed618SSoby Mathew exit: 131*532ed618SSoby Mathew /* 132*532ed618SSoby Mathew * Release the locks corresponding to each power level in the 133*532ed618SSoby Mathew * reverse order to which they were acquired. 134*532ed618SSoby Mathew */ 135*532ed618SSoby Mathew psci_release_pwr_domain_locks(end_pwrlvl, 136*532ed618SSoby Mathew idx); 137*532ed618SSoby Mathew 138*532ed618SSoby Mathew /* 139*532ed618SSoby Mathew * Check if all actions needed to safely power down this cpu have 140*532ed618SSoby Mathew * successfully completed. 141*532ed618SSoby Mathew */ 142*532ed618SSoby Mathew if (rc == PSCI_E_SUCCESS) { 143*532ed618SSoby Mathew /* 144*532ed618SSoby Mathew * Set the affinity info state to OFF. This writes directly to 145*532ed618SSoby Mathew * main memory as caches are disabled, so cache maintenance is 146*532ed618SSoby Mathew * required to ensure that later cached reads of aff_info_state 147*532ed618SSoby Mathew * return AFF_STATE_OFF. A dsbish() ensures ordering of the 148*532ed618SSoby Mathew * update to the affinity info state prior to cache line 149*532ed618SSoby Mathew * invalidation. 150*532ed618SSoby Mathew */ 151*532ed618SSoby Mathew flush_cpu_data(psci_svc_cpu_data.aff_info_state); 152*532ed618SSoby Mathew psci_set_aff_info_state(AFF_STATE_OFF); 153*532ed618SSoby Mathew dsbish(); 154*532ed618SSoby Mathew inv_cpu_data(psci_svc_cpu_data.aff_info_state); 155*532ed618SSoby Mathew 156*532ed618SSoby Mathew if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) { 157*532ed618SSoby Mathew /* This function must not return */ 158*532ed618SSoby Mathew psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info); 159*532ed618SSoby Mathew } else { 160*532ed618SSoby Mathew /* 161*532ed618SSoby Mathew * Enter a wfi loop which will allow the power 162*532ed618SSoby Mathew * controller to physically power down this cpu. 163*532ed618SSoby Mathew */ 164*532ed618SSoby Mathew psci_power_down_wfi(); 165*532ed618SSoby Mathew } 166*532ed618SSoby Mathew } 167*532ed618SSoby Mathew 168*532ed618SSoby Mathew return rc; 169*532ed618SSoby Mathew } 170