xref: /rk3399_ARM-atf/plat/xilinx/zynqmp/plat_psci.c (revision 6331a31a66cdcf53421d3dccd3067f072c6da175)
1 /*
2  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch_helpers.h>
32 #include <errno.h>
33 #include <assert.h>
34 #include <debug.h>
35 #include <gicv2.h>
36 #include <mmio.h>
37 #include <plat_arm.h>
38 #include <platform.h>
39 #include <psci.h>
40 #include "pm_api_sys.h"
41 #include "pm_client.h"
42 #include "zynqmp_private.h"
43 
44 uintptr_t zynqmp_sec_entry;
45 
46 void zynqmp_cpu_standby(plat_local_state_t cpu_state)
47 {
48 	VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
49 
50 	dsb();
51 	wfi();
52 }
53 
54 static int zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
55 {
56 	uint32_t r;
57 	unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
58 
59 	VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
60 
61 	if (cpu_id == -1)
62 		return PSCI_E_INTERN_FAIL;
63 
64 	/* program RVBAR */
65 	mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry);
66 	mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32);
67 
68 	/* clear VINITHI */
69 	r = mmio_read_32(APU_CONFIG_0);
70 	r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id);
71 	mmio_write_32(APU_CONFIG_0, r);
72 
73 	/* clear power down request */
74 	r = mmio_read_32(APU_PWRCTL);
75 	r &= ~(1 << cpu_id);
76 	mmio_write_32(APU_PWRCTL, r);
77 
78 	/* power up island */
79 	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id);
80 	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_TRIG, 1 << cpu_id);
81 	/* FIXME: we should have a way to break out */
82 	while (mmio_read_32(PMU_GLOBAL_REQ_PWRUP_STATUS) & (1 << cpu_id))
83 		;
84 
85 	/* release core reset */
86 	r = mmio_read_32(CRF_APB_RST_FPD_APU);
87 	r &= ~((CRF_APB_RST_FPD_APU_ACPU_PWRON_RESET |
88 			CRF_APB_RST_FPD_APU_ACPU_RESET) << cpu_id);
89 	mmio_write_32(CRF_APB_RST_FPD_APU, r);
90 
91 	return PSCI_E_SUCCESS;
92 }
93 
94 static int zynqmp_pwr_domain_on(u_register_t mpidr)
95 {
96 	unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
97 	const struct pm_proc *proc;
98 
99 	VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
100 
101 	if (cpu_id == -1)
102 		return PSCI_E_INTERN_FAIL;
103 
104 	proc = pm_get_proc(cpu_id);
105 
106 	/* Send request to PMU to wake up selected APU CPU core */
107 	pm_req_wakeup(proc->node_id, 1, zynqmp_sec_entry, REQ_ACK_NO);
108 
109 	return PSCI_E_SUCCESS;
110 }
111 
112 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
113 {
114 	uint32_t r;
115 	unsigned int cpu_id = plat_my_core_pos();
116 
117 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
118 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
119 			__func__, i, target_state->pwr_domain_state[i]);
120 
121 	/* Prevent interrupts from spuriously waking up this cpu */
122 	gicv2_cpuif_disable();
123 
124 	/* set power down request */
125 	r = mmio_read_32(APU_PWRCTL);
126 	r |= (1 << cpu_id);
127 	mmio_write_32(APU_PWRCTL, r);
128 }
129 
130 static void zynqmp_pwr_domain_off(const psci_power_state_t *target_state)
131 {
132 	unsigned int cpu_id = plat_my_core_pos();
133 	const struct pm_proc *proc = pm_get_proc(cpu_id);
134 
135 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
136 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
137 			__func__, i, target_state->pwr_domain_state[i]);
138 
139 	/* Prevent interrupts from spuriously waking up this cpu */
140 	gicv2_cpuif_disable();
141 
142 	/*
143 	 * Send request to PMU to power down the appropriate APU CPU
144 	 * core.
145 	 * According to PSCI specification, CPU_off function does not
146 	 * have resume address and CPU core can only be woken up
147 	 * invoking CPU_on function, during which resume address will
148 	 * be set.
149 	 */
150 	pm_self_suspend(proc->node_id, MAX_LATENCY, 0, 0);
151 }
152 
153 static void zynqmp_nopmu_pwr_domain_suspend(const psci_power_state_t *target_state)
154 {
155 	uint32_t r;
156 	unsigned int cpu_id = plat_my_core_pos();
157 
158 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
159 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
160 			__func__, i, target_state->pwr_domain_state[i]);
161 
162 	/* set power down request */
163 	r = mmio_read_32(APU_PWRCTL);
164 	r |= (1 << cpu_id);
165 	mmio_write_32(APU_PWRCTL, r);
166 
167 	/* program RVBAR */
168 	mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry);
169 	mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32);
170 
171 	/* clear VINITHI */
172 	r = mmio_read_32(APU_CONFIG_0);
173 	r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id);
174 	mmio_write_32(APU_CONFIG_0, r);
175 
176 	/* enable power up on IRQ */
177 	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id);
178 }
179 
180 static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state)
181 {
182 	unsigned int cpu_id = plat_my_core_pos();
183 	const struct pm_proc *proc = pm_get_proc(cpu_id);
184 
185 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
186 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
187 			__func__, i, target_state->pwr_domain_state[i]);
188 
189 	/* Send request to PMU to suspend this core */
190 	pm_self_suspend(proc->node_id, MAX_LATENCY, 0, zynqmp_sec_entry);
191 
192 	/* APU is to be turned off */
193 	if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
194 		/* Power down L2 cache */
195 		pm_set_requirement(NODE_L2, 0, 0, REQ_ACK_NO);
196 		/* Send request for OCM retention state */
197 		set_ocm_retention();
198 		/* disable coherency */
199 		plat_arm_interconnect_exit_coherency();
200 	}
201 }
202 
203 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
204 {
205 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
206 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
207 			__func__, i, target_state->pwr_domain_state[i]);
208 
209 	gicv2_cpuif_enable();
210 	gicv2_pcpu_distif_init();
211 }
212 
213 static void zynqmp_nopmu_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
214 {
215 	uint32_t r;
216 	unsigned int cpu_id = plat_my_core_pos();
217 
218 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
219 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
220 			__func__, i, target_state->pwr_domain_state[i]);
221 
222 	/* disable power up on IRQ */
223 	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_DIS, 1 << cpu_id);
224 
225 	/* clear powerdown bit */
226 	r = mmio_read_32(APU_PWRCTL);
227 	r &= ~(1 << cpu_id);
228 	mmio_write_32(APU_PWRCTL, r);
229 }
230 
231 static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
232 {
233 	unsigned int cpu_id = plat_my_core_pos();
234 	const struct pm_proc *proc = pm_get_proc(cpu_id);
235 
236 	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
237 		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
238 			__func__, i, target_state->pwr_domain_state[i]);
239 
240 	/* Clear the APU power control register for this cpu */
241 	pm_client_wakeup(proc);
242 
243 	/* enable coherency */
244 	plat_arm_interconnect_enter_coherency();
245 }
246 
247 /*******************************************************************************
248  * ZynqMP handlers to shutdown/reboot the system
249  ******************************************************************************/
250 static void __dead2 zynqmp_nopmu_system_off(void)
251 {
252 	ERROR("ZynqMP System Off: operation not handled.\n");
253 
254 	/* disable coherency */
255 	plat_arm_interconnect_exit_coherency();
256 
257 	panic();
258 }
259 
260 static void __dead2 zynqmp_system_off(void)
261 {
262 	/* disable coherency */
263 	plat_arm_interconnect_exit_coherency();
264 
265 	/* Send the power down request to the PMU */
266 	pm_system_shutdown(0);
267 
268 	while (1)
269 		wfi();
270 }
271 
272 static void __dead2 zynqmp_nopmu_system_reset(void)
273 {
274 	/*
275 	 * This currently triggers a system reset. I.e. the whole
276 	 * system will be reset! Including RPUs, PMU, PL, etc.
277 	 */
278 
279 	/* disable coherency */
280 	plat_arm_interconnect_exit_coherency();
281 
282 	/* bypass RPLL (needed on 1.0 silicon) */
283 	uint32_t reg = mmio_read_32(CRL_APB_RPLL_CTRL);
284 	reg |= CRL_APB_RPLL_CTRL_BYPASS;
285 	mmio_write_32(CRL_APB_RPLL_CTRL, reg);
286 
287 	/* trigger system reset */
288 	mmio_write_32(CRL_APB_RESET_CTRL, CRL_APB_RESET_CTRL_SOFT_RESET);
289 
290 	while (1)
291 		wfi();
292 }
293 
294 static void __dead2 zynqmp_system_reset(void)
295 {
296 	/* disable coherency */
297 	plat_arm_interconnect_exit_coherency();
298 
299 	/* Send the system reset request to the PMU */
300 	pm_system_shutdown(1);
301 
302 	while (1)
303 		wfi();
304 }
305 
306 int zynqmp_validate_power_state(unsigned int power_state,
307 				psci_power_state_t *req_state)
308 {
309 	VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
310 
311 	/* FIXME: populate req_state */
312 	return PSCI_E_SUCCESS;
313 }
314 
315 int zynqmp_validate_ns_entrypoint(unsigned long ns_entrypoint)
316 {
317 	VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint);
318 
319 	/* FIXME: Actually validate */
320 	return PSCI_E_SUCCESS;
321 }
322 
323 void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
324 {
325 	req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
326 	req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
327 }
328 
329 /*******************************************************************************
330  * Export the platform handlers to enable psci to invoke them
331  ******************************************************************************/
332 static const struct plat_psci_ops zynqmp_psci_ops = {
333 	.cpu_standby			= zynqmp_cpu_standby,
334 	.pwr_domain_on			= zynqmp_pwr_domain_on,
335 	.pwr_domain_off			= zynqmp_pwr_domain_off,
336 	.pwr_domain_suspend		= zynqmp_pwr_domain_suspend,
337 	.pwr_domain_on_finish		= zynqmp_pwr_domain_on_finish,
338 	.pwr_domain_suspend_finish	= zynqmp_pwr_domain_suspend_finish,
339 	.system_off			= zynqmp_system_off,
340 	.system_reset			= zynqmp_system_reset,
341 	.validate_power_state		= zynqmp_validate_power_state,
342 	.validate_ns_entrypoint		= zynqmp_validate_ns_entrypoint,
343 	.get_sys_suspend_power_state	= zynqmp_get_sys_suspend_power_state,
344 };
345 
346 static const struct plat_psci_ops zynqmp_nopmu_psci_ops = {
347 	.cpu_standby			= zynqmp_cpu_standby,
348 	.pwr_domain_on			= zynqmp_nopmu_pwr_domain_on,
349 	.pwr_domain_off			= zynqmp_nopmu_pwr_domain_off,
350 	.pwr_domain_suspend		= zynqmp_nopmu_pwr_domain_suspend,
351 	.pwr_domain_on_finish		= zynqmp_pwr_domain_on_finish,
352 	.pwr_domain_suspend_finish	= zynqmp_nopmu_pwr_domain_suspend_finish,
353 	.system_off			= zynqmp_nopmu_system_off,
354 	.system_reset			= zynqmp_nopmu_system_reset,
355 	.validate_power_state		= zynqmp_validate_power_state,
356 	.validate_ns_entrypoint		= zynqmp_validate_ns_entrypoint,
357 	.get_sys_suspend_power_state	= zynqmp_get_sys_suspend_power_state,
358 };
359 
360 /*******************************************************************************
361  * Export the platform specific power ops.
362  ******************************************************************************/
363 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
364 			const struct plat_psci_ops **psci_ops)
365 {
366 	zynqmp_sec_entry = sec_entrypoint;
367 
368 	if (zynqmp_is_pmu_up())
369 		*psci_ops = &zynqmp_psci_ops;
370 	else
371 		*psci_ops = &zynqmp_nopmu_psci_ops;
372 
373 	return 0;
374 }
375