1 /* 2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #ifndef __PSCI_PRIVATE_H__ 32 #define __PSCI_PRIVATE_H__ 33 34 #include <arch.h> 35 #include <bakery_lock.h> 36 #include <bl_common.h> 37 #include <cpu_data.h> 38 #include <psci.h> 39 #include <spinlock.h> 40 41 #if HW_ASSISTED_COHERENCY 42 /* 43 * On systems with hardware-assisted coherency, make PSCI cache operations NOP, 44 * as PSCI participants are cache-coherent, and there's no need for explicit 45 * cache maintenance operations or barriers to coordinate their state. 46 */ 47 #define psci_flush_dcache_range(addr, size) 48 #define psci_flush_cpu_data(member) 49 #define psci_inv_cpu_data(member) 50 51 #define psci_dsbish() 52 #else 53 /* 54 * If not all PSCI participants are cache-coherent, perform cache maintenance 55 * and issue barriers wherever required to coordinate state. 56 */ 57 #define psci_flush_dcache_range(addr, size) flush_dcache_range(addr, size) 58 #define psci_flush_cpu_data(member) flush_cpu_data(member) 59 #define psci_inv_cpu_data(member) inv_cpu_data(member) 60 61 #define psci_dsbish() dsbish() 62 #endif 63 64 /* 65 * The following helper macros abstract the interface to the Bakery 66 * Lock API. 67 */ 68 #define psci_lock_init(non_cpu_pd_node, idx) \ 69 ((non_cpu_pd_node)[(idx)].lock_index = (idx)) 70 #define psci_lock_get(non_cpu_pd_node) \ 71 bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index]) 72 #define psci_lock_release(non_cpu_pd_node) \ 73 bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index]) 74 75 /* 76 * The PSCI capability which are provided by the generic code but does not 77 * depend on the platform or spd capabilities. 78 */ 79 #define PSCI_GENERIC_CAP \ 80 (define_psci_cap(PSCI_VERSION) | \ 81 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ 82 define_psci_cap(PSCI_FEATURES)) 83 84 /* 85 * The PSCI capabilities mask for 64 bit functions. 86 */ 87 #define PSCI_CAP_64BIT_MASK \ 88 (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ 89 define_psci_cap(PSCI_CPU_ON_AARCH64) | \ 90 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ 91 define_psci_cap(PSCI_MIG_AARCH64) | \ 92 define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ 93 define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \ 94 define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \ 95 define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \ 96 define_psci_cap(PSCI_STAT_COUNT_AARCH64)) 97 98 /* 99 * Helper macros to get/set the fields of PSCI per-cpu data. 100 */ 101 #define psci_set_aff_info_state(aff_state) \ 102 set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state) 103 #define psci_get_aff_info_state() \ 104 get_cpu_data(psci_svc_cpu_data.aff_info_state) 105 #define psci_get_aff_info_state_by_idx(idx) \ 106 get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state) 107 #define psci_set_aff_info_state_by_idx(idx, aff_state) \ 108 set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\ 109 aff_state) 110 #define psci_get_suspend_pwrlvl() \ 111 get_cpu_data(psci_svc_cpu_data.target_pwrlvl) 112 #define psci_set_suspend_pwrlvl(target_lvl) \ 113 set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl) 114 #define psci_set_cpu_local_state(state) \ 115 set_cpu_data(psci_svc_cpu_data.local_state, state) 116 #define psci_get_cpu_local_state() \ 117 get_cpu_data(psci_svc_cpu_data.local_state) 118 #define psci_get_cpu_local_state_by_idx(idx) \ 119 get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state) 120 121 /* 122 * Helper macros for the CPU level spinlocks 123 */ 124 #define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock) 125 #define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock) 126 127 /* Helper macro to identify a CPU standby request in PSCI Suspend call */ 128 #define is_cpu_standby_req(is_power_down_state, retn_lvl) \ 129 (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0) 130 131 /******************************************************************************* 132 * The following two data structures implement the power domain tree. The tree 133 * is used to track the state of all the nodes i.e. power domain instances 134 * described by the platform. The tree consists of nodes that describe CPU power 135 * domains i.e. leaf nodes and all other power domains which are parents of a 136 * CPU power domain i.e. non-leaf nodes. 137 ******************************************************************************/ 138 typedef struct non_cpu_pwr_domain_node { 139 /* 140 * Index of the first CPU power domain node level 0 which has this node 141 * as its parent. 142 */ 143 unsigned int cpu_start_idx; 144 145 /* 146 * Number of CPU power domains which are siblings of the domain indexed 147 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx 148 * -> cpu_start_idx + ncpus' have this node as their parent. 149 */ 150 unsigned int ncpus; 151 152 /* 153 * Index of the parent power domain node. 154 * TODO: Figure out whether to whether using pointer is more efficient. 155 */ 156 unsigned int parent_node; 157 158 plat_local_state_t local_state; 159 160 unsigned char level; 161 162 /* For indexing the psci_lock array*/ 163 unsigned char lock_index; 164 } non_cpu_pd_node_t; 165 166 typedef struct cpu_pwr_domain_node { 167 u_register_t mpidr; 168 169 /* 170 * Index of the parent power domain node. 171 * TODO: Figure out whether to whether using pointer is more efficient. 172 */ 173 unsigned int parent_node; 174 175 /* 176 * A CPU power domain does not require state coordination like its 177 * parent power domains. Hence this node does not include a bakery 178 * lock. A spinlock is required by the CPU_ON handler to prevent a race 179 * when multiple CPUs try to turn ON the same target CPU. 180 */ 181 spinlock_t cpu_lock; 182 } cpu_pd_node_t; 183 184 /******************************************************************************* 185 * Data prototypes 186 ******************************************************************************/ 187 extern const plat_psci_ops_t *psci_plat_pm_ops; 188 extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; 189 extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; 190 extern unsigned int psci_caps; 191 192 /* One bakery lock is required for each non-cpu power domain */ 193 DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); 194 195 /******************************************************************************* 196 * SPD's power management hooks registered with PSCI 197 ******************************************************************************/ 198 extern const spd_pm_ops_t *psci_spd_pm; 199 200 /******************************************************************************* 201 * Function prototypes 202 ******************************************************************************/ 203 /* Private exported functions from psci_common.c */ 204 int psci_validate_power_state(unsigned int power_state, 205 psci_power_state_t *state_info); 206 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); 207 int psci_validate_mpidr(u_register_t mpidr); 208 void psci_init_req_local_pwr_states(void); 209 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, 210 psci_power_state_t *target_state); 211 int psci_validate_entry_point(entry_point_info_t *ep, 212 uintptr_t entrypoint, u_register_t context_id); 213 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, 214 unsigned int end_lvl, 215 unsigned int node_index[]); 216 void psci_do_state_coordination(unsigned int end_pwrlvl, 217 psci_power_state_t *state_info); 218 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, 219 unsigned int cpu_idx); 220 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, 221 unsigned int cpu_idx); 222 int psci_validate_suspend_req(const psci_power_state_t *state_info, 223 unsigned int is_power_down_state_req); 224 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); 225 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); 226 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl); 227 void psci_print_power_domain_map(void); 228 unsigned int psci_is_last_on_cpu(void); 229 int psci_spd_migrate_info(u_register_t *mpidr); 230 231 /* Private exported functions from psci_on.c */ 232 int psci_cpu_on_start(u_register_t target_cpu, 233 entry_point_info_t *ep); 234 235 void psci_cpu_on_finish(unsigned int cpu_idx, 236 psci_power_state_t *state_info); 237 238 /* Private exported functions from psci_off.c */ 239 int psci_do_cpu_off(unsigned int end_pwrlvl); 240 241 /* Private exported functions from psci_suspend.c */ 242 void psci_cpu_suspend_start(entry_point_info_t *ep, 243 unsigned int end_pwrlvl, 244 psci_power_state_t *state_info, 245 unsigned int is_power_down_state_req); 246 247 void psci_cpu_suspend_finish(unsigned int cpu_idx, 248 psci_power_state_t *state_info); 249 250 /* Private exported functions from psci_helpers.S */ 251 void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level); 252 void psci_do_pwrup_cache_maintenance(void); 253 254 /* Private exported functions from psci_system_off.c */ 255 void __dead2 psci_system_off(void); 256 void __dead2 psci_system_reset(void); 257 258 /* Private exported functions from psci_stat.c */ 259 void psci_stats_update_pwr_down(unsigned int end_pwrlvl, 260 const psci_power_state_t *state_info); 261 void psci_stats_update_pwr_up(unsigned int end_pwrlvl, 262 const psci_power_state_t *state_info); 263 u_register_t psci_stat_residency(u_register_t target_cpu, 264 unsigned int power_state); 265 u_register_t psci_stat_count(u_register_t target_cpu, 266 unsigned int power_state); 267 268 #endif /* __PSCI_PRIVATE_H__ */ 269