xref: /rk3399_ARM-atf/lib/psci/psci_private.h (revision 510a9de79fe14460ec591bba4aa8790665c3f86a)
1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef __PSCI_PRIVATE_H__
32 #define __PSCI_PRIVATE_H__
33 
34 #include <arch.h>
35 #include <bakery_lock.h>
36 #include <bl_common.h>
37 #include <cpu_data.h>
38 #include <psci.h>
39 #include <spinlock.h>
40 
41 #if HW_ASSISTED_COHERENCY
42 
43 /*
44  * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
45  * as PSCI participants are cache-coherent, and there's no need for explicit
46  * cache maintenance operations or barriers to coordinate their state.
47  */
48 #define psci_flush_dcache_range(addr, size)
49 #define psci_flush_cpu_data(member)
50 #define psci_inv_cpu_data(member)
51 
52 #define psci_dsbish()
53 
54 /*
55  * On systems where participant CPUs are cache-coherent, we can use spinlocks
56  * instead of bakery locks.
57  */
58 #define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
59 #define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)
60 
61 #define psci_lock_get(non_cpu_pd_node)				\
62 	spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
63 #define psci_lock_release(non_cpu_pd_node)			\
64 	spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])
65 
66 #else
67 
68 /*
69  * If not all PSCI participants are cache-coherent, perform cache maintenance
70  * and issue barriers wherever required to coordinate state.
71  */
72 #define psci_flush_dcache_range(addr, size)	flush_dcache_range(addr, size)
73 #define psci_flush_cpu_data(member)		flush_cpu_data(member)
74 #define psci_inv_cpu_data(member)		inv_cpu_data(member)
75 
76 #define psci_dsbish()				dsbish()
77 
78 /*
79  * Use bakery locks for state coordination as not all PSCI participants are
80  * cache coherent.
81  */
82 #define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
83 #define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)
84 
85 #define psci_lock_get(non_cpu_pd_node)				\
86 	bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
87 #define psci_lock_release(non_cpu_pd_node)			\
88 	bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
89 
90 #endif
91 
92 #define psci_lock_init(non_cpu_pd_node, idx)			\
93 	((non_cpu_pd_node)[(idx)].lock_index = (idx))
94 
95 /*
96  * The PSCI capability which are provided by the generic code but does not
97  * depend on the platform or spd capabilities.
98  */
99 #define PSCI_GENERIC_CAP	\
100 			(define_psci_cap(PSCI_VERSION) |		\
101 			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
102 			define_psci_cap(PSCI_FEATURES))
103 
104 /*
105  * The PSCI capabilities mask for 64 bit functions.
106  */
107 #define PSCI_CAP_64BIT_MASK	\
108 			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
109 			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
110 			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
111 			define_psci_cap(PSCI_MIG_AARCH64) |		\
112 			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
113 			define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) |	\
114 			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |	\
115 			define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |	\
116 			define_psci_cap(PSCI_STAT_COUNT_AARCH64))
117 
118 /*
119  * Helper macros to get/set the fields of PSCI per-cpu data.
120  */
121 #define psci_set_aff_info_state(aff_state) \
122 		set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
123 #define psci_get_aff_info_state() \
124 		get_cpu_data(psci_svc_cpu_data.aff_info_state)
125 #define psci_get_aff_info_state_by_idx(idx) \
126 		get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
127 #define psci_set_aff_info_state_by_idx(idx, aff_state) \
128 		set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
129 					aff_state)
130 #define psci_get_suspend_pwrlvl() \
131 		get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
132 #define psci_set_suspend_pwrlvl(target_lvl) \
133 		set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
134 #define psci_set_cpu_local_state(state) \
135 		set_cpu_data(psci_svc_cpu_data.local_state, state)
136 #define psci_get_cpu_local_state() \
137 		get_cpu_data(psci_svc_cpu_data.local_state)
138 #define psci_get_cpu_local_state_by_idx(idx) \
139 		get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
140 
141 /*
142  * Helper macros for the CPU level spinlocks
143  */
144 #define psci_spin_lock_cpu(idx)	spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
145 #define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
146 
147 /* Helper macro to identify a CPU standby request in PSCI Suspend call */
148 #define is_cpu_standby_req(is_power_down_state, retn_lvl) \
149 		(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
150 
151 /*******************************************************************************
152  * The following two data structures implement the power domain tree. The tree
153  * is used to track the state of all the nodes i.e. power domain instances
154  * described by the platform. The tree consists of nodes that describe CPU power
155  * domains i.e. leaf nodes and all other power domains which are parents of a
156  * CPU power domain i.e. non-leaf nodes.
157  ******************************************************************************/
158 typedef struct non_cpu_pwr_domain_node {
159 	/*
160 	 * Index of the first CPU power domain node level 0 which has this node
161 	 * as its parent.
162 	 */
163 	unsigned int cpu_start_idx;
164 
165 	/*
166 	 * Number of CPU power domains which are siblings of the domain indexed
167 	 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
168 	 * -> cpu_start_idx + ncpus' have this node as their parent.
169 	 */
170 	unsigned int ncpus;
171 
172 	/*
173 	 * Index of the parent power domain node.
174 	 * TODO: Figure out whether to whether using pointer is more efficient.
175 	 */
176 	unsigned int parent_node;
177 
178 	plat_local_state_t local_state;
179 
180 	unsigned char level;
181 
182 	/* For indexing the psci_lock array*/
183 	unsigned char lock_index;
184 } non_cpu_pd_node_t;
185 
186 typedef struct cpu_pwr_domain_node {
187 	u_register_t mpidr;
188 
189 	/*
190 	 * Index of the parent power domain node.
191 	 * TODO: Figure out whether to whether using pointer is more efficient.
192 	 */
193 	unsigned int parent_node;
194 
195 	/*
196 	 * A CPU power domain does not require state coordination like its
197 	 * parent power domains. Hence this node does not include a bakery
198 	 * lock. A spinlock is required by the CPU_ON handler to prevent a race
199 	 * when multiple CPUs try to turn ON the same target CPU.
200 	 */
201 	spinlock_t cpu_lock;
202 } cpu_pd_node_t;
203 
204 /*******************************************************************************
205  * Data prototypes
206  ******************************************************************************/
207 extern const plat_psci_ops_t *psci_plat_pm_ops;
208 extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
209 extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
210 extern unsigned int psci_caps;
211 
212 /* One lock is required per non-CPU power domain node */
213 DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
214 
215 /*******************************************************************************
216  * SPD's power management hooks registered with PSCI
217  ******************************************************************************/
218 extern const spd_pm_ops_t *psci_spd_pm;
219 
220 /*******************************************************************************
221  * Function prototypes
222  ******************************************************************************/
223 /* Private exported functions from psci_common.c */
224 int psci_validate_power_state(unsigned int power_state,
225 			      psci_power_state_t *state_info);
226 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
227 int psci_validate_mpidr(u_register_t mpidr);
228 void psci_init_req_local_pwr_states(void);
229 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
230 				      psci_power_state_t *target_state);
231 int psci_validate_entry_point(entry_point_info_t *ep,
232 			uintptr_t entrypoint, u_register_t context_id);
233 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
234 				      unsigned int end_lvl,
235 				      unsigned int node_index[]);
236 void psci_do_state_coordination(unsigned int end_pwrlvl,
237 				psci_power_state_t *state_info);
238 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
239 				   unsigned int cpu_idx);
240 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
241 				   unsigned int cpu_idx);
242 int psci_validate_suspend_req(const psci_power_state_t *state_info,
243 			      unsigned int is_power_down_state_req);
244 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
245 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
246 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
247 void psci_print_power_domain_map(void);
248 unsigned int psci_is_last_on_cpu(void);
249 int psci_spd_migrate_info(u_register_t *mpidr);
250 void psci_do_pwrdown_sequence(unsigned int power_level);
251 
252 /*
253  * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
254  * available. Otherwise, this needs post-call stack maintenance, which is
255  * handled in assembly.
256  */
257 void prepare_cpu_pwr_dwn(unsigned int power_level);
258 
259 /* Private exported functions from psci_on.c */
260 int psci_cpu_on_start(u_register_t target_cpu,
261 		      entry_point_info_t *ep);
262 
263 void psci_cpu_on_finish(unsigned int cpu_idx,
264 			psci_power_state_t *state_info);
265 
266 /* Private exported functions from psci_off.c */
267 int psci_do_cpu_off(unsigned int end_pwrlvl);
268 
269 /* Private exported functions from psci_suspend.c */
270 void psci_cpu_suspend_start(entry_point_info_t *ep,
271 			unsigned int end_pwrlvl,
272 			psci_power_state_t *state_info,
273 			unsigned int is_power_down_state_req);
274 
275 void psci_cpu_suspend_finish(unsigned int cpu_idx,
276 			psci_power_state_t *state_info);
277 
278 /* Private exported functions from psci_helpers.S */
279 void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
280 void psci_do_pwrup_cache_maintenance(void);
281 
282 /* Private exported functions from psci_system_off.c */
283 void __dead2 psci_system_off(void);
284 void __dead2 psci_system_reset(void);
285 
286 /* Private exported functions from psci_stat.c */
287 void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
288 			const psci_power_state_t *state_info);
289 void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
290 			const psci_power_state_t *state_info);
291 u_register_t psci_stat_residency(u_register_t target_cpu,
292 			unsigned int power_state);
293 u_register_t psci_stat_count(u_register_t target_cpu,
294 			unsigned int power_state);
295 
296 #endif /* __PSCI_PRIVATE_H__ */
297