xref: /rk3399_ARM-atf/lib/psci/psci_off.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <string.h>
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/arm/gic.h>
15 #include <lib/pmf/pmf.h>
16 #include <lib/runtime_instr.h>
17 #include <plat/common/platform.h>
18 
19 #include "psci_private.h"
20 
21 /******************************************************************************
22  * Construct the psci_power_state to request power OFF at all power levels.
23  ******************************************************************************/
24 static void psci_set_power_off_state(psci_power_state_t *state_info)
25 {
26 	unsigned int lvl;
27 
28 	for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
29 		state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
30 }
31 
32 /******************************************************************************
33  * Top level handler which is called when a cpu wants to power itself down.
34  * It's assumed that along with turning the cpu power domain off, power
35  * domains at higher levels will be turned off as far as possible. It finds
36  * the highest level where a domain has to be powered off by traversing the
37  * node information and then performs generic, architectural, platform setup
38  * and state management required to turn OFF that power domain and domains
39  * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
40  * the power controller whereas for a cluster that's to be powered off, it will
41  * call the platform specific code which will disable coherency at the
42  * interconnect level if the cpu is the last in the cluster and also the
43  * program the power controller.
44  ******************************************************************************/
45 int psci_do_cpu_off(unsigned int end_pwrlvl)
46 {
47 	int rc = PSCI_E_SUCCESS;
48 	unsigned int idx = plat_my_core_pos();
49 	psci_power_state_t state_info;
50 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
51 
52 	/*
53 	 * This function must only be called on platforms where the
54 	 * CPU_OFF platform hooks have been implemented.
55 	 */
56 	assert(psci_plat_pm_ops->pwr_domain_off != NULL);
57 
58 	/* Construct the psci_power_state for CPU_OFF */
59 	psci_set_power_off_state(&state_info);
60 
61 	/*
62 	 * Call the platform provided early CPU_OFF handler to allow
63 	 * platforms to perform any housekeeping activities before
64 	 * actually powering the CPU off. PSCI_E_DENIED indicates that
65 	 * the CPU off sequence should be aborted at this time.
66 	 */
67 	if (psci_plat_pm_ops->pwr_domain_off_early) {
68 		rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info);
69 		if (rc == PSCI_E_DENIED) {
70 			return rc;
71 		}
72 	}
73 
74 	/*
75 	 * Get the parent nodes here, this is important to do before we
76 	 * initiate the power down sequence as after that point the core may
77 	 * have exited coherency and its cache may be disabled, any access to
78 	 * shared memory after that (such as the parent node lookup in
79 	 * psci_cpu_pd_nodes) can cause coherency issues on some platforms.
80 	 */
81 	psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
82 
83 	/*
84 	 * This function acquires the lock corresponding to each power
85 	 * level so that by the time all locks are taken, the system topology
86 	 * is snapshot and state management can be done safely.
87 	 */
88 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
89 
90 	/*
91 	 * Call the cpu off handler registered by the Secure Payload Dispatcher
92 	 * to let it do any bookkeeping. Assume that the SPD always reports an
93 	 * E_DENIED error if SP refuse to power down
94 	 */
95 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) {
96 		rc = psci_spd_pm->svc_off(0);
97 		if (rc != PSCI_E_SUCCESS)
98 			goto off_exit;
99 	}
100 
101 	/*
102 	 * This function is passed the requested state info and
103 	 * it returns the negotiated state info for each power level upto
104 	 * the end level specified.
105 	 */
106 	psci_do_state_coordination(idx, end_pwrlvl, &state_info);
107 
108 	/* Update the target state in the power domain nodes */
109 	psci_set_target_local_pwr_states(idx, end_pwrlvl, &state_info);
110 
111 #if ENABLE_PSCI_STAT
112 	/* Update the last cpu for each level till end_pwrlvl */
113 	psci_stats_update_pwr_down(idx, end_pwrlvl, &state_info);
114 #endif
115 
116 	/*
117 	 * Arch. management. Initiate power down sequence.
118 	 */
119 	psci_pwrdown_cpu_start(psci_find_max_off_lvl(&state_info));
120 
121 #if USE_GIC_DRIVER
122 	/* turn the GIC off before we hand off to the platform */
123 	gic_cpuif_disable(idx);
124 	/* we don't want any wakeups until explicitly turned on */
125 	gic_pcpu_off(idx);
126 #endif /* USE_GIC_DRIVER */
127 
128 	/*
129 	 * Plat. management: Perform platform specific actions to turn this
130 	 * cpu off e.g. exit cpu coherency, program the power controller etc.
131 	 */
132 	psci_plat_pm_ops->pwr_domain_off(&state_info);
133 
134 #if ENABLE_PSCI_STAT
135 	plat_psci_stat_accounting_start(&state_info);
136 #endif
137 
138 off_exit:
139 	/*
140 	 * Release the locks corresponding to each power level in the
141 	 * reverse order to which they were acquired.
142 	 */
143 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
144 
145 	/*
146 	 * Check if all actions needed to safely power down this cpu have
147 	 * successfully completed.
148 	 */
149 	if (rc == PSCI_E_SUCCESS) {
150 		/*
151 		 * Set the affinity info state to OFF. When caches are disabled,
152 		 * this writes directly to main memory, so cache maintenance is
153 		 * required to ensure that later cached reads of aff_info_state
154 		 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
155 		 * update to the affinity info state prior to cache line
156 		 * invalidation.
157 		 */
158 		psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
159 		psci_set_aff_info_state(AFF_STATE_OFF);
160 		psci_dsbish();
161 		psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
162 
163 #if ENABLE_RUNTIME_INSTRUMENTATION
164 		/*
165 		 * Update the timestamp with cache off.  We assume this
166 		 * timestamp can only be read from the current CPU and the
167 		 * timestamp cache line will be flushed before return to
168 		 * normal world on wakeup.
169 		 */
170 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
171 		    RT_INSTR_ENTER_HW_LOW_PWR,
172 		    PMF_NO_CACHE_MAINT);
173 #endif
174 		if (psci_plat_pm_ops->pwr_domain_pwr_down != NULL) {
175 			/* This function may not return */
176 			psci_plat_pm_ops->pwr_domain_pwr_down(&state_info);
177 		}
178 
179 		psci_pwrdown_cpu_end_terminal();
180 	}
181 
182 	return rc;
183 }
184