xref: /rk3399_ARM-atf/plat/hisilicon/hikey960/hikey960_pm.c (revision 61f72a34250d063da67f4fc2b0eb8c3fda3376be)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <assert.h>
9 #include <cci.h>
10 #include <console.h>
11 #include <debug.h>
12 #include <delay_timer.h>
13 #include <gicv2.h>
14 #include <hi3660.h>
15 #include <hi3660_crg.h>
16 #include <mmio.h>
17 #include <psci.h>
18 #include "drivers/pwrc/hisi_pwrc.h"
19 
20 #include "hikey960_def.h"
21 #include "hikey960_private.h"
22 
23 #define CORE_PWR_STATE(state) \
24 	((state)->pwr_domain_state[MPIDR_AFFLVL0])
25 #define CLUSTER_PWR_STATE(state) \
26 	((state)->pwr_domain_state[MPIDR_AFFLVL1])
27 #define SYSTEM_PWR_STATE(state) \
28 	((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
29 
30 #define DMAC_GLB_REG_SEC	0x694
31 #define AXI_CONF_BASE		0x820
32 
33 static unsigned int uart_base;
34 static uintptr_t hikey960_sec_entrypoint;
35 
36 static void hikey960_pwr_domain_standby(plat_local_state_t cpu_state)
37 {
38 	unsigned long scr;
39 
40 	scr = read_scr_el3();
41 
42 	/* Enable Physical IRQ and FIQ to wake the CPU */
43 	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
44 
45 	/* Add barrier before CPU enter WFI state */
46 	isb();
47 	dsb();
48 	wfi();
49 
50 	/*
51 	 * Restore SCR to the original value, synchronisazion of
52 	 * scr_el3 is done by eret while el3_exit to save some
53 	 * execution cycles.
54 	 */
55 	write_scr_el3(scr);
56 }
57 
58 static int hikey960_pwr_domain_on(u_register_t mpidr)
59 {
60 	unsigned int core = mpidr & MPIDR_CPU_MASK;
61 	unsigned int cluster =
62 		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
63 	int cluster_stat = cluster_is_powered_on(cluster);
64 
65 	hisi_set_cpu_boot_flag(cluster, core);
66 
67 	mmio_write_32(CRG_REG_BASE + CRG_RVBAR(cluster, core),
68 		      hikey960_sec_entrypoint >> 2);
69 
70 	if (cluster_stat)
71 		hisi_powerup_core(cluster, core);
72 	else
73 		hisi_powerup_cluster(cluster, core);
74 
75 	return PSCI_E_SUCCESS;
76 }
77 
78 static void
79 hikey960_pwr_domain_on_finish(const psci_power_state_t *target_state)
80 {
81 	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
82 		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
83 
84 	gicv2_pcpu_distif_init();
85 	gicv2_cpuif_enable();
86 }
87 
88 void hikey960_pwr_domain_off(const psci_power_state_t *target_state)
89 {
90 	unsigned long mpidr = read_mpidr_el1();
91 	unsigned int core = mpidr & MPIDR_CPU_MASK;
92 	unsigned int cluster =
93 		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
94 
95 	clr_ex();
96 	isb();
97 	dsbsy();
98 
99 	gicv2_cpuif_disable();
100 
101 	hisi_clear_cpu_boot_flag(cluster, core);
102 	hisi_powerdn_core(cluster, core);
103 
104 	/* check if any core is powered up */
105 	if (hisi_test_cpu_down(cluster, core)) {
106 
107 		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
108 
109 		isb();
110 		dsbsy();
111 
112 		hisi_powerdn_cluster(cluster, core);
113 	}
114 }
115 
116 static void __dead2 hikey960_system_reset(void)
117 {
118 	dsb();
119 	isb();
120 	mdelay(2000);
121 	mmio_write_32(SCTRL_SCPEREN1_REG,
122 		      SCPEREN1_WAIT_DDR_SELFREFRESH_DONE_BYPASS);
123 	mmio_write_32(SCTRL_SCSYSSTAT_REG, 0xdeadbeef);
124 	panic();
125 }
126 
127 int hikey960_validate_power_state(unsigned int power_state,
128 			       psci_power_state_t *req_state)
129 {
130 	unsigned int pstate = psci_get_pstate_type(power_state);
131 	unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
132 	int i;
133 
134 	assert(req_state);
135 
136 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
137 		return PSCI_E_INVALID_PARAMS;
138 
139 	/* Sanity check the requested state */
140 	if (pstate == PSTATE_TYPE_STANDBY) {
141 		/*
142 		 * It's possible to enter standby only on power level 0
143 		 * Ignore any other power level.
144 		 */
145 		if (pwr_lvl != MPIDR_AFFLVL0)
146 			return PSCI_E_INVALID_PARAMS;
147 
148 		req_state->pwr_domain_state[MPIDR_AFFLVL0] =
149 					PLAT_MAX_RET_STATE;
150 	} else {
151 		for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
152 			req_state->pwr_domain_state[i] =
153 					PLAT_MAX_OFF_STATE;
154 	}
155 
156 	/*
157 	 * We expect the 'state id' to be zero.
158 	 */
159 	if (psci_get_pstate_id(power_state))
160 		return PSCI_E_INVALID_PARAMS;
161 
162 	return PSCI_E_SUCCESS;
163 }
164 
165 static int hikey960_validate_ns_entrypoint(uintptr_t entrypoint)
166 {
167 	/*
168 	 * Check if the non secure entrypoint lies within the non
169 	 * secure DRAM.
170 	 */
171 	if ((entrypoint > DDR_BASE) && (entrypoint < (DDR_BASE + DDR_SIZE)))
172 		return PSCI_E_SUCCESS;
173 
174 	return PSCI_E_INVALID_ADDRESS;
175 }
176 
177 static void hikey960_pwr_domain_suspend(const psci_power_state_t *target_state)
178 {
179 	u_register_t mpidr = read_mpidr_el1();
180 	unsigned int core = mpidr & MPIDR_CPU_MASK;
181 	unsigned int cluster =
182 		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
183 
184 	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
185 		return;
186 
187 	if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
188 		clr_ex();
189 		isb();
190 		dsbsy();
191 
192 		gicv2_cpuif_disable();
193 
194 		hisi_cpuidle_lock(cluster, core);
195 		hisi_set_cpuidle_flag(cluster, core);
196 		hisi_cpuidle_unlock(cluster, core);
197 
198 		mmio_write_32(CRG_REG_BASE + CRG_RVBAR(cluster, core),
199 		      hikey960_sec_entrypoint >> 2);
200 
201 		hisi_enter_core_idle(cluster, core);
202 	}
203 
204 	/* Perform the common cluster specific operations */
205 	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
206 		hisi_cpuidle_lock(cluster, core);
207 		hisi_disable_pdc(cluster);
208 
209 		/* check if any core is powered up */
210 		if (hisi_test_pwrdn_allcores(cluster, core)) {
211 
212 			cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
213 
214 			isb();
215 			dsbsy();
216 
217 			/* mask the pdc wakeup irq, then
218 			 * enable pdc to power down the core
219 			 */
220 			hisi_pdc_mask_cluster_wakeirq(cluster);
221 			hisi_enable_pdc(cluster);
222 
223 			hisi_cpuidle_unlock(cluster, core);
224 
225 			/* check the SR flag bit to determine
226 			 * CLUSTER_IDLE_IPC or AP_SR_IPC to send
227 			 */
228 			if (hisi_test_ap_suspend_flag(cluster))
229 				hisi_enter_ap_suspend(cluster, core);
230 			else
231 				hisi_enter_cluster_idle(cluster, core);
232 		} else {
233 			/* enable pdc */
234 			hisi_enable_pdc(cluster);
235 			hisi_cpuidle_unlock(cluster, core);
236 		}
237 	}
238 }
239 
240 static void hikey960_sr_dma_reinit(void)
241 {
242 	unsigned int ctr = 0;
243 
244 	mmio_write_32(DMAC_BASE + DMAC_GLB_REG_SEC, 0x3);
245 
246 	/* 1~15 channel is set non_secure */
247 	for (ctr = 1; ctr <= 15; ctr++)
248 		mmio_write_32(DMAC_BASE + AXI_CONF_BASE + ctr * (0x40),
249 			      (1 << 6) | (1 << 18));
250 }
251 
252 static void
253 hikey960_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
254 {
255 	unsigned long mpidr = read_mpidr_el1();
256 	unsigned int core = mpidr & MPIDR_CPU_MASK;
257 	unsigned int cluster =
258 		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
259 
260 	/* Nothing to be done on waking up from retention from CPU level */
261 	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
262 		return;
263 
264 	hisi_cpuidle_lock(cluster, core);
265 	hisi_clear_cpuidle_flag(cluster, core);
266 	hisi_cpuidle_unlock(cluster, core);
267 
268 	if (hisi_test_ap_suspend_flag(cluster)) {
269 		hikey960_sr_dma_reinit();
270 		gicv2_cpuif_enable();
271 		console_init(uart_base, PL011_UART_CLK_IN_HZ,
272 			     PL011_BAUDRATE);
273 	}
274 
275 	hikey960_pwr_domain_on_finish(target_state);
276 }
277 
278 static void hikey960_get_sys_suspend_power_state(psci_power_state_t *req_state)
279 {
280 	int i;
281 
282 	for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
283 		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
284 }
285 
286 static const plat_psci_ops_t hikey960_psci_ops = {
287 	.cpu_standby			= hikey960_pwr_domain_standby,
288 	.pwr_domain_on			= hikey960_pwr_domain_on,
289 	.pwr_domain_on_finish		= hikey960_pwr_domain_on_finish,
290 	.pwr_domain_off			= hikey960_pwr_domain_off,
291 	.pwr_domain_suspend		= hikey960_pwr_domain_suspend,
292 	.pwr_domain_suspend_finish	= hikey960_pwr_domain_suspend_finish,
293 	.system_off			= NULL,
294 	.system_reset			= hikey960_system_reset,
295 	.validate_power_state		= hikey960_validate_power_state,
296 	.validate_ns_entrypoint		= hikey960_validate_ns_entrypoint,
297 	.get_sys_suspend_power_state	= hikey960_get_sys_suspend_power_state,
298 };
299 
300 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
301 			const plat_psci_ops_t **psci_ops)
302 {
303 	unsigned int id = 0;
304 	int ret;
305 
306 	ret = hikey960_read_boardid(&id);
307 	if (ret == 0) {
308 		if (id == 5300U)
309 			uart_base = PL011_UART5_BASE;
310 		else
311 			uart_base = PL011_UART6_BASE;
312 	} else {
313 		uart_base = PL011_UART6_BASE;
314 	}
315 
316 	hikey960_sec_entrypoint = sec_entrypoint;
317 
318 	INFO("%s: sec_entrypoint=0x%lx\n", __func__,
319 	     (unsigned long)hikey960_sec_entrypoint);
320 
321 	/*
322 	 * Initialize PSCI ops struct
323 	 */
324 	*psci_ops = &hikey960_psci_ops;
325 	return 0;
326 }
327