xref: /rk3399_ARM-atf/lib/psci/psci_setup.c (revision 048d802a087b2c52bd5e817cb8e0bc52ea6260ad)
1 /*
2  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <platform.h>
38 #include <stddef.h>
39 #include "psci_private.h"
40 
41 /*******************************************************************************
42  * Per cpu non-secure contexts used to program the architectural state prior
43  * return to the normal world.
44  * TODO: Use the memory allocator to set aside memory for the contexts instead
45  * of relying on platform defined constants.
46  ******************************************************************************/
47 static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
48 
49 /******************************************************************************
50  * Define the psci capability variable.
51  *****************************************************************************/
52 unsigned int psci_caps;
53 
54 /*******************************************************************************
55  * Function which initializes the 'psci_non_cpu_pd_nodes' or the
56  * 'psci_cpu_pd_nodes' corresponding to the power level.
57  ******************************************************************************/
58 static void psci_init_pwr_domain_node(unsigned int node_idx,
59 					unsigned int parent_idx,
60 					unsigned int level)
61 {
62 	if (level > PSCI_CPU_PWR_LVL) {
63 		psci_non_cpu_pd_nodes[node_idx].level = level;
64 		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
65 		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
66 		psci_non_cpu_pd_nodes[node_idx].local_state =
67 							 PLAT_MAX_OFF_STATE;
68 	} else {
69 		psci_cpu_data_t *svc_cpu_data;
70 
71 		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
72 
73 		/* Initialize with an invalid mpidr */
74 		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
75 
76 		svc_cpu_data =
77 			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
78 
79 		/* Set the Affinity Info for the cores as OFF */
80 		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
81 
82 		/* Invalidate the suspend level for the cpu */
83 		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
84 
85 		/* Set the power state to OFF state */
86 		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
87 
88 		flush_dcache_range((uintptr_t)svc_cpu_data,
89 						 sizeof(*svc_cpu_data));
90 
91 		cm_set_context_by_index(node_idx,
92 					(void *) &psci_ns_context[node_idx],
93 					NON_SECURE);
94 	}
95 }
96 
97 /*******************************************************************************
98  * This functions updates cpu_start_idx and ncpus field for each of the node in
99  * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
100  * the CPUs and check whether they match with the parent of the previous
101  * CPU. The basic assumption for this work is that children of the same parent
102  * are allocated adjacent indices. The platform should ensure this though proper
103  * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
104  * plat_my_core_pos() APIs.
105  *******************************************************************************/
106 static void psci_update_pwrlvl_limits(void)
107 {
108 	int j;
109 	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
110 	unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
111 
112 	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
113 		psci_get_parent_pwr_domain_nodes(cpu_idx,
114 						 PLAT_MAX_PWR_LVL,
115 						 temp_index);
116 		for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
117 			if (temp_index[j] != nodes_idx[j]) {
118 				nodes_idx[j] = temp_index[j];
119 				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
120 					= cpu_idx;
121 			}
122 			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
123 		}
124 	}
125 }
126 
127 /*******************************************************************************
128  * Core routine to populate the power domain tree. The tree descriptor passed by
129  * the platform is populated breadth-first and the first entry in the map
130  * informs the number of root power domains. The parent nodes of the root nodes
131  * will point to an invalid entry(-1).
132  ******************************************************************************/
133 static void populate_power_domain_tree(const unsigned char *topology)
134 {
135 	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
136 	unsigned int node_index = 0, parent_node_index = 0, num_children;
137 	int level = PLAT_MAX_PWR_LVL;
138 
139 	/*
140 	 * For each level the inputs are:
141 	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
142 	 *   This is the sum of values of nodes at the parent level.
143 	 * - Index of first entry at this level in the plat_array i.e.
144 	 *   parent_node_index.
145 	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
146 	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
147 	 */
148 	while (level >= PSCI_CPU_PWR_LVL) {
149 		num_nodes_at_next_lvl = 0;
150 		/*
151 		 * For each entry (parent node) at this level in the plat_array:
152 		 * - Find the number of children
153 		 * - Allocate a node in a power domain array for each child
154 		 * - Set the parent of the child to the parent_node_index - 1
155 		 * - Increment parent_node_index to point to the next parent
156 		 * - Accumulate the number of children at next level.
157 		 */
158 		for (i = 0; i < num_nodes_at_lvl; i++) {
159 			assert(parent_node_index <=
160 					PSCI_NUM_NON_CPU_PWR_DOMAINS);
161 			num_children = topology[parent_node_index];
162 
163 			for (j = node_index;
164 				j < node_index + num_children; j++)
165 				psci_init_pwr_domain_node(j,
166 							  parent_node_index - 1,
167 							  level);
168 
169 			node_index = j;
170 			num_nodes_at_next_lvl += num_children;
171 			parent_node_index++;
172 		}
173 
174 		num_nodes_at_lvl = num_nodes_at_next_lvl;
175 		level--;
176 
177 		/* Reset the index for the cpu power domain array */
178 		if (level == PSCI_CPU_PWR_LVL)
179 			node_index = 0;
180 	}
181 
182 	/* Validate the sanity of array exported by the platform */
183 	assert(j == PLATFORM_CORE_COUNT);
184 }
185 
186 /*******************************************************************************
187  * This function does the architectural setup and takes the warm boot
188  * entry-point `mailbox_ep` as an argument. The function also initializes the
189  * power domain topology tree by querying the platform. The power domain nodes
190  * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
191  * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
192  * exports its static topology map through the
193  * populate_power_domain_topology_tree() API. The algorithm populates the
194  * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
195  * topology map.  On a platform that implements two clusters of 2 cpus each,
196  * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
197  * look like this:
198  *
199  * ---------------------------------------------------
200  * | system node | cluster 0 node  | cluster 1 node  |
201  * ---------------------------------------------------
202  *
203  * And populated psci_cpu_pd_nodes would look like this :
204  * <-    cpus cluster0   -><-   cpus cluster1   ->
205  * ------------------------------------------------
206  * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
207  * ------------------------------------------------
208  ******************************************************************************/
209 int psci_setup(uintptr_t mailbox_ep)
210 {
211 	const unsigned char *topology_tree;
212 
213 	/* Do the Architectural initialization */
214 	psci_arch_setup();
215 
216 	/* Query the topology map from the platform */
217 	topology_tree = plat_get_power_domain_tree_desc();
218 
219 	/* Populate the power domain arrays using the platform topology map */
220 	populate_power_domain_tree(topology_tree);
221 
222 	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
223 	psci_update_pwrlvl_limits();
224 
225 	/* Populate the mpidr field of cpu node for this CPU */
226 	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
227 		read_mpidr() & MPIDR_AFFINITY_MASK;
228 
229 	psci_init_req_local_pwr_states();
230 
231 	/*
232 	 * Set the requested and target state of this CPU and all the higher
233 	 * power domain levels for this CPU to run.
234 	 */
235 	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
236 
237 	assert(mailbox_ep);
238 	plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops);
239 	assert(psci_plat_pm_ops);
240 
241 	/*
242 	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
243 	 * during warm boot before data cache is enabled.
244 	 */
245 	flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
246 					sizeof(psci_plat_pm_ops));
247 
248 	/* Initialize the psci capability */
249 	psci_caps = PSCI_GENERIC_CAP;
250 
251 	if (psci_plat_pm_ops->pwr_domain_off)
252 		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
253 	if (psci_plat_pm_ops->pwr_domain_on &&
254 			psci_plat_pm_ops->pwr_domain_on_finish)
255 		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
256 	if (psci_plat_pm_ops->pwr_domain_suspend &&
257 			psci_plat_pm_ops->pwr_domain_suspend_finish) {
258 		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
259 		if (psci_plat_pm_ops->get_sys_suspend_power_state)
260 			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
261 	}
262 	if (psci_plat_pm_ops->system_off)
263 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
264 	if (psci_plat_pm_ops->system_reset)
265 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
266 
267 #if ENABLE_PSCI_STAT
268 	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
269 	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
270 #endif
271 
272 	return 0;
273 }
274 
275 /*******************************************************************************
276  * This duplicates what the primary cpu did after a cold boot in BL1. The same
277  * needs to be done when a cpu is hotplugged in. This function could also over-
278  * ride any EL3 setup done by BL1 as this code resides in rw memory.
279  ******************************************************************************/
280 void psci_arch_setup(void)
281 {
282 	/* Program the counter frequency */
283 	write_cntfrq_el0(plat_get_syscnt_freq2());
284 
285 	/* Initialize the cpu_ops pointer. */
286 	init_cpu_ops();
287 }
288 
289 /******************************************************************************
290  * PSCI Library interface to initialize the cpu context for the next non
291  * secure image during cold boot. The relevant registers in the cpu context
292  * need to be retrieved and programmed on return from this interface.
293  *****************************************************************************/
294 void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
295 {
296 	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
297 	cm_init_my_context(next_image_info);
298 	cm_prepare_el3_exit(NON_SECURE);
299 }
300