1 /*
2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef PSCI_PRIVATE_H
8 #define PSCI_PRIVATE_H
9
10 #include <stdbool.h>
11
12 #include <arch.h>
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <lib/bakery_lock.h>
16 #include <lib/el3_runtime/cpu_data.h>
17 #include <lib/per_cpu/per_cpu.h>
18 #include <lib/psci/psci.h>
19 #include <lib/spinlock.h>
20
21 /*
22 * The PSCI capability which are provided by the generic code but does not
23 * depend on the platform or spd capabilities.
24 */
25 #define PSCI_GENERIC_CAP \
26 (define_psci_cap(PSCI_VERSION) | \
27 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
28 define_psci_cap(PSCI_FEATURES))
29
30 /*
31 * The PSCI capabilities mask for 64 bit functions.
32 */
33 #define PSCI_CAP_64BIT_MASK \
34 (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
35 define_psci_cap(PSCI_CPU_ON_AARCH64) | \
36 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
37 define_psci_cap(PSCI_MIG_AARCH64) | \
38 define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
39 define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
40 define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
41 define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
42 define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
43 define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
44 define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
45
46 /* Internally PSCI uses a uint16_t for various cpu indexes so
47 * define a limit to number of CPUs that can be initialised.
48 */
49 #define PSCI_MAX_CPUS_INDEX 0xFFFFU
50
51 /* Invalid parent */
52 #define PSCI_PARENT_NODE_INVALID 0xFFFFFFFFU
53
54 /*
55 * Helper functions to get/set the fields of PSCI per-cpu data.
56 */
psci_set_aff_info_state(aff_info_state_t aff_state)57 static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
58 {
59 set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
60 }
61
psci_get_aff_info_state(void)62 static inline aff_info_state_t psci_get_aff_info_state(void)
63 {
64 return get_cpu_data(psci_svc_cpu_data.aff_info_state);
65 }
66
psci_get_aff_info_state_by_idx(unsigned int idx)67 static inline aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx)
68 {
69 return get_cpu_data_by_index(idx,
70 psci_svc_cpu_data.aff_info_state);
71 }
72
psci_set_aff_info_state_by_idx(unsigned int idx,aff_info_state_t aff_state)73 static inline void psci_set_aff_info_state_by_idx(unsigned int idx,
74 aff_info_state_t aff_state)
75 {
76 set_cpu_data_by_index(idx,
77 psci_svc_cpu_data.aff_info_state, aff_state);
78 }
79
psci_get_suspend_pwrlvl(void)80 static inline unsigned int psci_get_suspend_pwrlvl(void)
81 {
82 return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
83 }
84
psci_set_suspend_pwrlvl(unsigned int target_lvl)85 static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
86 {
87 set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
88 }
89
psci_set_cpu_local_state(plat_local_state_t state)90 static inline void psci_set_cpu_local_state(plat_local_state_t state)
91 {
92 set_cpu_data(psci_svc_cpu_data.local_state, state);
93 }
94
psci_get_cpu_local_state(void)95 static inline plat_local_state_t psci_get_cpu_local_state(void)
96 {
97 return get_cpu_data(psci_svc_cpu_data.local_state);
98 }
99
psci_get_cpu_local_state_by_idx(unsigned int idx)100 static inline plat_local_state_t psci_get_cpu_local_state_by_idx(
101 unsigned int idx)
102 {
103 return get_cpu_data_by_index(idx,
104 psci_svc_cpu_data.local_state);
105 }
106
107 /* Helper function to identify a CPU standby request in PSCI Suspend call */
is_cpu_standby_req(unsigned int is_power_down_state,unsigned int retn_lvl)108 static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
109 unsigned int retn_lvl)
110 {
111 return (is_power_down_state == 0U) && (retn_lvl == 0U);
112 }
113
114 /*******************************************************************************
115 * The following two data structures implement the power domain tree. The tree
116 * is used to track the state of all the nodes i.e. power domain instances
117 * described by the platform. The tree consists of nodes that describe CPU power
118 * domains i.e. leaf nodes and all other power domains which are parents of a
119 * CPU power domain i.e. non-leaf nodes.
120 ******************************************************************************/
121 typedef struct non_cpu_pwr_domain_node {
122 /*
123 * Index of the first CPU power domain node level 0 which has this node
124 * as its parent.
125 */
126 unsigned int cpu_start_idx;
127
128 /*
129 * Number of CPU power domains which are siblings of the domain indexed
130 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
131 * -> cpu_start_idx + ncpus' have this node as their parent.
132 */
133 unsigned int ncpus;
134
135 /*
136 * Index of the parent power domain node.
137 * TODO: Figure out whether to whether using pointer is more efficient.
138 */
139 unsigned int parent_node;
140
141 plat_local_state_t local_state;
142
143 unsigned char level;
144
145 /* For indexing the psci_lock array*/
146 uint16_t lock_index;
147 } non_cpu_pd_node_t;
148
149 typedef struct cpu_pwr_domain_node {
150 u_register_t mpidr;
151
152 /*
153 * Index of the parent power domain node.
154 * TODO: Figure out whether to whether using pointer is more efficient.
155 */
156 unsigned int parent_node;
157
158 /*
159 * A CPU power domain does not require state coordination like its
160 * parent power domains. Hence this node does not include a bakery
161 * lock. A spinlock is required by the CPU_ON handler to prevent a race
162 * when multiple CPUs try to turn ON the same target CPU.
163 */
164 spinlock_t cpu_lock;
165 } cpu_pd_node_t;
166
167 #if PSCI_OS_INIT_MODE
168 /*******************************************************************************
169 * The supported power state coordination modes that can be used in CPU_SUSPEND.
170 ******************************************************************************/
171 typedef enum suspend_mode {
172 PLAT_COORD = 0,
173 OS_INIT = 1
174 } suspend_mode_t;
175 #endif
176
177 /*******************************************************************************
178 * The following are helpers and declarations of locks.
179 ******************************************************************************/
180 #if HW_ASSISTED_COHERENCY
181 /*
182 * On systems where participant CPUs are cache-coherent, we can use spinlocks
183 * instead of bakery locks.
184 */
185 #define DEFINE_PSCI_LOCK(_name) spinlock_t _name
186 #define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
187
188 /* One lock is required per non-CPU power domain node */
189 DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
190
191 /*
192 * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
193 * as PSCI participants are cache-coherent, and there's no need for explicit
194 * cache maintenance operations or barriers to coordinate their state.
195 */
psci_flush_dcache_range(uintptr_t __unused addr,size_t __unused size)196 static inline void psci_flush_dcache_range(uintptr_t __unused addr,
197 size_t __unused size)
198 {
199 /* Empty */
200 }
201
202 #define psci_flush_cpu_data(member)
203 #define psci_inv_cpu_data(member)
204
psci_dsbish(void)205 static inline void psci_dsbish(void)
206 {
207 /* Empty */
208 }
209
psci_lock_get(non_cpu_pd_node_t * non_cpu_pd_node)210 static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
211 {
212 spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
213 }
214
psci_lock_release(non_cpu_pd_node_t * non_cpu_pd_node)215 static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
216 {
217 spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
218 }
219
220 #else /* if HW_ASSISTED_COHERENCY == 0 */
221 /*
222 * Use bakery locks for state coordination as not all PSCI participants are
223 * cache coherent.
224 */
225 #define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
226 #define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
227
228 /* One lock is required per non-CPU power domain node */
229 DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
230
231 /*
232 * If not all PSCI participants are cache-coherent, perform cache maintenance
233 * and issue barriers wherever required to coordinate state.
234 */
psci_flush_dcache_range(uintptr_t addr,size_t size)235 static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
236 {
237 flush_dcache_range(addr, size);
238 }
239
240 #define psci_flush_cpu_data(member) flush_cpu_data(member)
241 #define psci_inv_cpu_data(member) inv_cpu_data(member)
242
psci_dsbish(void)243 static inline void psci_dsbish(void)
244 {
245 dsbish();
246 }
247
psci_lock_get(non_cpu_pd_node_t * non_cpu_pd_node)248 static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
249 {
250 bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
251 }
252
psci_lock_release(non_cpu_pd_node_t * non_cpu_pd_node)253 static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
254 {
255 bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
256 }
257
258 #endif /* HW_ASSISTED_COHERENCY */
259
psci_lock_init(non_cpu_pd_node_t * non_cpu_pd_node,uint16_t idx)260 static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
261 uint16_t idx)
262 {
263 non_cpu_pd_node[idx].lock_index = idx;
264 }
265
266 /*******************************************************************************
267 * Data prototypes
268 ******************************************************************************/
269 extern const plat_psci_ops_t *psci_plat_pm_ops;
270 extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
271 PER_CPU_DECLARE(cpu_pd_node_t, psci_cpu_pd_nodes);
272 extern unsigned int psci_caps;
273 extern unsigned int psci_plat_core_count;
274 #if PSCI_OS_INIT_MODE
275 extern suspend_mode_t psci_suspend_mode;
276 #endif
277
278 /*******************************************************************************
279 * SPD's power management hooks registered with PSCI
280 ******************************************************************************/
281 extern const spd_pm_ops_t *psci_spd_pm;
282
283 /*******************************************************************************
284 * Function prototypes
285 ******************************************************************************/
286 /* Private exported functions from psci_common.c */
287 int psci_validate_power_state(unsigned int power_state,
288 psci_power_state_t *state_info);
289 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
290 void psci_init_req_local_pwr_states(void);
291 #if PSCI_OS_INIT_MODE
292 void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
293 unsigned int cpu_idx,
294 psci_power_state_t *state_info,
295 plat_local_state_t *prev);
296 void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
297 plat_local_state_t *prev);
298 #endif
299 void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
300 psci_power_state_t *target_state);
301 void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
302 const psci_power_state_t *target_state);
303 int psci_validate_entry_point(entry_point_info_t *ep,
304 uintptr_t entrypoint, u_register_t context_id);
305 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
306 unsigned int end_lvl,
307 unsigned int *node_index);
308 void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
309 psci_power_state_t *state_info);
310 #if PSCI_OS_INIT_MODE
311 int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
312 psci_power_state_t *state_info);
313 #endif
314 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
315 const unsigned int *parent_nodes);
316 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
317 const unsigned int *parent_nodes);
318 int psci_validate_suspend_req(const psci_power_state_t *state_info,
319 unsigned int is_power_down_state);
320 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
321 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
322 void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl);
323 void psci_print_power_domain_map(void);
324 bool psci_is_last_on_cpu(unsigned int my_idx);
325 int psci_spd_migrate_info(u_register_t *mpidr);
326
327 /* This function applies various CPU errata during power down. */
328 void apply_cpu_pwr_dwn_errata(void);
329
330 /* Private exported functions from psci_on.c */
331 int psci_cpu_on_start(u_register_t target_cpu,
332 const entry_point_info_t *ep);
333
334 void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
335
336 /* Private exported functions from psci_off.c */
337 int psci_do_cpu_off(unsigned int end_pwrlvl);
338
339 /* Private exported functions from psci_suspend.c */
340 int psci_cpu_suspend_start(unsigned int idx,
341 unsigned int end_pwrlvl,
342 psci_power_state_t *state_info,
343 unsigned int is_power_down_state);
344
345 void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx,
346 unsigned int max_off_lvl,
347 const psci_power_state_t *state_info,
348 bool abandon);
349
350 /* Private exported functions from psci_helpers.S */
351 void psci_do_pwrdown_cache_maintenance(void);
352 void psci_do_pwrup_cache_maintenance(void);
353
354 /* Private exported functions from psci_system_off.c */
355 void __dead2 psci_system_off(void);
356 void __dead2 psci_system_reset(void);
357 u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
358
359 /* Private exported functions from psci_stat.c */
360 void psci_stats_update_pwr_down(unsigned int cpu_idx, unsigned int end_pwrlvl,
361 const psci_power_state_t *state_info);
362 void psci_stats_update_pwr_up(unsigned int cpu_idx, unsigned int end_pwrlvl,
363 const psci_power_state_t *state_info);
364 u_register_t psci_stat_residency(u_register_t target_cpu,
365 unsigned int power_state);
366 u_register_t psci_stat_count(u_register_t target_cpu,
367 unsigned int power_state);
368
369 /* Private exported functions from psci_mem_protect.c */
370 u_register_t psci_mem_protect(unsigned int enable);
371 u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
372
373 #endif /* PSCI_PRIVATE_H */
374