xref: /rk3399_ARM-atf/lib/psci/psci_setup.c (revision 10bcd761574a5aaa208041382399e05275011603)
1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <errata_report.h>
38 #include <platform.h>
39 #include <stddef.h>
40 #include "psci_private.h"
41 
42 /*******************************************************************************
43  * Per cpu non-secure contexts used to program the architectural state prior
44  * return to the normal world.
45  * TODO: Use the memory allocator to set aside memory for the contexts instead
46  * of relying on platform defined constants.
47  ******************************************************************************/
48 static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
49 
50 /******************************************************************************
51  * Define the psci capability variable.
52  *****************************************************************************/
53 unsigned int psci_caps;
54 
55 /*******************************************************************************
56  * Function which initializes the 'psci_non_cpu_pd_nodes' or the
57  * 'psci_cpu_pd_nodes' corresponding to the power level.
58  ******************************************************************************/
59 static void psci_init_pwr_domain_node(unsigned int node_idx,
60 					unsigned int parent_idx,
61 					unsigned int level)
62 {
63 	if (level > PSCI_CPU_PWR_LVL) {
64 		psci_non_cpu_pd_nodes[node_idx].level = level;
65 		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
66 		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
67 		psci_non_cpu_pd_nodes[node_idx].local_state =
68 							 PLAT_MAX_OFF_STATE;
69 	} else {
70 		psci_cpu_data_t *svc_cpu_data;
71 
72 		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
73 
74 		/* Initialize with an invalid mpidr */
75 		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
76 
77 		svc_cpu_data =
78 			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
79 
80 		/* Set the Affinity Info for the cores as OFF */
81 		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
82 
83 		/* Invalidate the suspend level for the cpu */
84 		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
85 
86 		/* Set the power state to OFF state */
87 		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
88 
89 		flush_dcache_range((uintptr_t)svc_cpu_data,
90 						 sizeof(*svc_cpu_data));
91 
92 		cm_set_context_by_index(node_idx,
93 					(void *) &psci_ns_context[node_idx],
94 					NON_SECURE);
95 	}
96 }
97 
98 /*******************************************************************************
99  * This functions updates cpu_start_idx and ncpus field for each of the node in
100  * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
101  * the CPUs and check whether they match with the parent of the previous
102  * CPU. The basic assumption for this work is that children of the same parent
103  * are allocated adjacent indices. The platform should ensure this though proper
104  * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
105  * plat_my_core_pos() APIs.
106  *******************************************************************************/
107 static void psci_update_pwrlvl_limits(void)
108 {
109 	int j;
110 	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
111 	unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
112 
113 	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
114 		psci_get_parent_pwr_domain_nodes(cpu_idx,
115 						 PLAT_MAX_PWR_LVL,
116 						 temp_index);
117 		for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
118 			if (temp_index[j] != nodes_idx[j]) {
119 				nodes_idx[j] = temp_index[j];
120 				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
121 					= cpu_idx;
122 			}
123 			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
124 		}
125 	}
126 }
127 
128 /*******************************************************************************
129  * Core routine to populate the power domain tree. The tree descriptor passed by
130  * the platform is populated breadth-first and the first entry in the map
131  * informs the number of root power domains. The parent nodes of the root nodes
132  * will point to an invalid entry(-1).
133  ******************************************************************************/
134 static void populate_power_domain_tree(const unsigned char *topology)
135 {
136 	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
137 	unsigned int node_index = 0, parent_node_index = 0, num_children;
138 	int level = PLAT_MAX_PWR_LVL;
139 
140 	/*
141 	 * For each level the inputs are:
142 	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
143 	 *   This is the sum of values of nodes at the parent level.
144 	 * - Index of first entry at this level in the plat_array i.e.
145 	 *   parent_node_index.
146 	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
147 	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
148 	 */
149 	while (level >= PSCI_CPU_PWR_LVL) {
150 		num_nodes_at_next_lvl = 0;
151 		/*
152 		 * For each entry (parent node) at this level in the plat_array:
153 		 * - Find the number of children
154 		 * - Allocate a node in a power domain array for each child
155 		 * - Set the parent of the child to the parent_node_index - 1
156 		 * - Increment parent_node_index to point to the next parent
157 		 * - Accumulate the number of children at next level.
158 		 */
159 		for (i = 0; i < num_nodes_at_lvl; i++) {
160 			assert(parent_node_index <=
161 					PSCI_NUM_NON_CPU_PWR_DOMAINS);
162 			num_children = topology[parent_node_index];
163 
164 			for (j = node_index;
165 				j < node_index + num_children; j++)
166 				psci_init_pwr_domain_node(j,
167 							  parent_node_index - 1,
168 							  level);
169 
170 			node_index = j;
171 			num_nodes_at_next_lvl += num_children;
172 			parent_node_index++;
173 		}
174 
175 		num_nodes_at_lvl = num_nodes_at_next_lvl;
176 		level--;
177 
178 		/* Reset the index for the cpu power domain array */
179 		if (level == PSCI_CPU_PWR_LVL)
180 			node_index = 0;
181 	}
182 
183 	/* Validate the sanity of array exported by the platform */
184 	assert(j == PLATFORM_CORE_COUNT);
185 }
186 
187 /*******************************************************************************
188  * This function does the architectural setup and takes the warm boot
189  * entry-point `mailbox_ep` as an argument. The function also initializes the
190  * power domain topology tree by querying the platform. The power domain nodes
191  * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
192  * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
193  * exports its static topology map through the
194  * populate_power_domain_topology_tree() API. The algorithm populates the
195  * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
196  * topology map.  On a platform that implements two clusters of 2 cpus each,
197  * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
198  * look like this:
199  *
200  * ---------------------------------------------------
201  * | system node | cluster 0 node  | cluster 1 node  |
202  * ---------------------------------------------------
203  *
204  * And populated psci_cpu_pd_nodes would look like this :
205  * <-    cpus cluster0   -><-   cpus cluster1   ->
206  * ------------------------------------------------
207  * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
208  * ------------------------------------------------
209  ******************************************************************************/
210 int psci_setup(const psci_lib_args_t *lib_args)
211 {
212 	const unsigned char *topology_tree;
213 
214 	assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
215 
216 	/* Do the Architectural initialization */
217 	psci_arch_setup();
218 
219 	/* Query the topology map from the platform */
220 	topology_tree = plat_get_power_domain_tree_desc();
221 
222 	/* Populate the power domain arrays using the platform topology map */
223 	populate_power_domain_tree(topology_tree);
224 
225 	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
226 	psci_update_pwrlvl_limits();
227 
228 	/* Populate the mpidr field of cpu node for this CPU */
229 	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
230 		read_mpidr() & MPIDR_AFFINITY_MASK;
231 
232 	psci_init_req_local_pwr_states();
233 
234 	/*
235 	 * Set the requested and target state of this CPU and all the higher
236 	 * power domain levels for this CPU to run.
237 	 */
238 	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
239 
240 	plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
241 	assert(psci_plat_pm_ops);
242 
243 	/*
244 	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
245 	 * during warm boot before data cache is enabled.
246 	 */
247 	flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
248 					sizeof(psci_plat_pm_ops));
249 
250 	/* Initialize the psci capability */
251 	psci_caps = PSCI_GENERIC_CAP;
252 
253 	if (psci_plat_pm_ops->pwr_domain_off)
254 		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
255 	if (psci_plat_pm_ops->pwr_domain_on &&
256 			psci_plat_pm_ops->pwr_domain_on_finish)
257 		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
258 	if (psci_plat_pm_ops->pwr_domain_suspend &&
259 			psci_plat_pm_ops->pwr_domain_suspend_finish) {
260 		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
261 		if (psci_plat_pm_ops->get_sys_suspend_power_state)
262 			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
263 	}
264 	if (psci_plat_pm_ops->system_off)
265 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
266 	if (psci_plat_pm_ops->system_reset)
267 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
268 	if (psci_plat_pm_ops->get_node_hw_state)
269 		psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
270 
271 #if ENABLE_PSCI_STAT
272 	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
273 	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
274 #endif
275 
276 	return 0;
277 }
278 
279 /*******************************************************************************
280  * This duplicates what the primary cpu did after a cold boot in BL1. The same
281  * needs to be done when a cpu is hotplugged in. This function could also over-
282  * ride any EL3 setup done by BL1 as this code resides in rw memory.
283  ******************************************************************************/
284 void psci_arch_setup(void)
285 {
286 	/* Program the counter frequency */
287 	write_cntfrq_el0(plat_get_syscnt_freq2());
288 
289 	/* Initialize the cpu_ops pointer. */
290 	init_cpu_ops();
291 
292 	/* Having initialized cpu_ops, we can now print errata status */
293 	print_errata_status();
294 }
295 
296 /******************************************************************************
297  * PSCI Library interface to initialize the cpu context for the next non
298  * secure image during cold boot. The relevant registers in the cpu context
299  * need to be retrieved and programmed on return from this interface.
300  *****************************************************************************/
301 void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
302 {
303 	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
304 	cm_init_my_context(next_image_info);
305 	cm_prepare_el3_exit(NON_SECURE);
306 }
307