xref: /rk3399_ARM-atf/plat/rockchip/rk3368/drivers/pmu/pmu.c (revision 9ff67fa6f25c5a0285eec27f3e86362ae535aac3)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
18  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24  * POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <arch_helpers.h>
28 #include <assert.h>
29 #include <debug.h>
30 #include <delay_timer.h>
31 #include <errno.h>
32 #include <mmio.h>
33 #include <platform.h>
34 #include <platform_def.h>
35 #include <plat_private.h>
36 #include <rk3368_def.h>
37 #include <pmu_sram.h>
38 #include <soc.h>
39 #include <pmu.h>
40 #include <ddr_rk3368.h>
41 #include <pmu_com.h>
42 
43 static struct psram_data_t *psram_sleep_cfg =
44 	(struct psram_data_t *)PSRAM_DT_BASE;
45 
46 void rk3368_flash_l2_b(void)
47 {
48 	uint32_t wait_cnt = 0;
49 
50 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
51 	dsb();
52 
53 	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
54 		& BIT(clst_b_l2_flsh_done))) {
55 		wait_cnt++;
56 		if (!(wait_cnt % MAX_WAIT_CONUT))
57 			WARN("%s:reg %x,wait\n", __func__,
58 			     mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
59 	}
60 
61 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
62 }
63 
64 static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
65 {
66 	uint32_t mask = BIT(req);
67 	uint32_t idle_mask = 0;
68 	uint32_t idle_target = 0;
69 	uint32_t val;
70 	uint32_t wait_cnt = 0;
71 
72 	switch (req) {
73 	case bus_ide_req_clst_l:
74 		idle_mask = BIT(pmu_idle_ack_cluster_l);
75 		idle_target = (idle << pmu_idle_ack_cluster_l);
76 		break;
77 
78 	case bus_ide_req_clst_b:
79 		idle_mask = BIT(pmu_idle_ack_cluster_b);
80 		idle_target = (idle << pmu_idle_ack_cluster_b);
81 		break;
82 
83 	case bus_ide_req_cxcs:
84 		idle_mask = BIT(pmu_idle_ack_cxcs);
85 		idle_target = ((!idle) << pmu_idle_ack_cxcs);
86 		break;
87 
88 	case bus_ide_req_cci400:
89 		idle_mask = BIT(pmu_idle_ack_cci400);
90 		idle_target = ((!idle) << pmu_idle_ack_cci400);
91 		break;
92 
93 	case bus_ide_req_gpu:
94 		idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
95 		idle_target = (idle << pmu_idle_ack_gpu) |
96 			      (idle << pmu_idle_gpu);
97 		break;
98 
99 	case bus_ide_req_core:
100 		idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
101 		idle_target = (idle << pmu_idle_ack_core) |
102 			      (idle << pmu_idle_core);
103 		break;
104 
105 	case bus_ide_req_bus:
106 		idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
107 		idle_target = (idle << pmu_idle_ack_bus) |
108 			      (idle << pmu_idle_bus);
109 		break;
110 	case bus_ide_req_dma:
111 		idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
112 		idle_target = (idle << pmu_idle_ack_dma) |
113 			      (idle << pmu_idle_dma);
114 		break;
115 
116 	case bus_ide_req_peri:
117 		idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
118 		idle_target = (idle << pmu_idle_ack_peri) |
119 			      (idle << pmu_idle_peri);
120 		break;
121 
122 	case bus_ide_req_video:
123 		idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
124 		idle_target = (idle << pmu_idle_ack_video) |
125 			      (idle << pmu_idle_video);
126 		break;
127 
128 	case bus_ide_req_vio:
129 		idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
130 		idle_target = (pmu_idle_ack_vio) |
131 			      (idle << pmu_idle_vio);
132 		break;
133 
134 	case bus_ide_req_alive:
135 		idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
136 		idle_target = (idle << pmu_idle_ack_alive) |
137 			      (idle << pmu_idle_alive);
138 		break;
139 
140 	case bus_ide_req_pmu:
141 		idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
142 		idle_target = (idle << pmu_idle_ack_pmu) |
143 			      (idle << pmu_idle_pmu);
144 		break;
145 
146 	case bus_ide_req_msch:
147 		idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
148 		idle_target = (idle << pmu_idle_ack_msch) |
149 			      (idle << pmu_idle_msch);
150 		break;
151 
152 	case bus_ide_req_cci:
153 		idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
154 		idle_target = (idle << pmu_idle_ack_cci) |
155 			      (idle << pmu_idle_cci);
156 		break;
157 
158 	default:
159 		ERROR("%s: Unsupported the idle request\n", __func__);
160 		break;
161 	}
162 
163 	val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
164 	if (idle)
165 		val |=	mask;
166 	else
167 		val &= ~mask;
168 
169 	mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
170 
171 	while ((mmio_read_32(PMU_BASE +
172 	       PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
173 		wait_cnt++;
174 		if (!(wait_cnt % MAX_WAIT_CONUT))
175 			WARN("%s:st=%x(%x)\n", __func__,
176 			     mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
177 			     idle_mask);
178 	}
179 
180 	return 0;
181 }
182 
183 void pmu_scu_b_pwrup(void)
184 {
185 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
186 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
187 }
188 
189 static void pmu_scu_b_pwrdn(void)
190 {
191 	uint32_t wait_cnt = 0;
192 
193 	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
194 	     PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
195 		ERROR("%s: not all cpus is off\n", __func__);
196 		return;
197 	}
198 
199 	rk3368_flash_l2_b();
200 
201 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
202 
203 	while (!(mmio_read_32(PMU_BASE +
204 	       PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
205 		wait_cnt++;
206 		if (!(wait_cnt % MAX_WAIT_CONUT))
207 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
208 			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
209 	}
210 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
211 }
212 
213 static void pmu_sleep_mode_config(void)
214 {
215 	uint32_t pwrmd_core, pwrmd_com;
216 
217 	pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
218 		     BIT(pmu_mdcr_scu_l_pd) |
219 		     BIT(pmu_mdcr_l2_flush) |
220 		     BIT(pmu_mdcr_l2_idle) |
221 		     BIT(pmu_mdcr_clr_clst_l) |
222 		     BIT(pmu_mdcr_clr_core) |
223 		     BIT(pmu_mdcr_clr_cci) |
224 		     BIT(pmu_mdcr_core_pd);
225 
226 	pwrmd_com = BIT(pmu_mode_en) |
227 		    BIT(pmu_mode_sref_enter) |
228 		    BIT(pmu_mode_pwr_off);
229 
230 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
231 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
232 	regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
233 
234 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
235 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
236 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
237 	mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
238 	mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
239 	dsb();
240 }
241 
242 static void ddr_suspend_save(void)
243 {
244 	ddr_reg_save(1, psram_sleep_cfg->ddr_data);
245 }
246 
247 static void pmu_set_sleep_mode(void)
248 {
249 	ddr_suspend_save();
250 	pmu_sleep_mode_config();
251 	soc_sleep_config();
252 	regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
253 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
254 	pmu_scu_b_pwrdn();
255 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
256 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
257 		      CPU_BOOT_ADDR_WMASK);
258 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
259 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
260 		      CPU_BOOT_ADDR_WMASK);
261 }
262 
263 void plat_rockchip_pmusram_prepare(void)
264 {
265 	uint32_t *sram_dst, *sram_src;
266 	size_t sram_size = 2;
267 	uint32_t code_size;
268 
269 	/* pmu sram code and data prepare */
270 	sram_dst = (uint32_t *)PMUSRAM_BASE;
271 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
272 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
273 		    (uint32_t *)sram_src;
274 	u32_align_cpy(sram_dst, sram_src, sram_size);
275 
276 	/* ddr code */
277 	sram_dst += sram_size;
278 	sram_src = ddr_get_resume_code_base();
279 	code_size = ddr_get_resume_code_size();
280 	u32_align_cpy(sram_dst, sram_src, code_size / 4);
281 	psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
282 
283 	/* ddr data */
284 	sram_dst += (code_size / 4);
285 	psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
286 
287 	assert((uint64_t)(sram_dst + ddr_get_resume_data_size() / 4)
288 						 < PSRAM_SP_BOTTOM);
289 	psram_sleep_cfg->sp = PSRAM_SP_TOP;
290 }
291 
292 static int cpus_id_power_domain(uint32_t cluster,
293 				uint32_t cpu,
294 				uint32_t pd_state,
295 				uint32_t wfie_msk)
296 {
297 	uint32_t pd;
298 	uint64_t mpidr;
299 
300 	if (cluster)
301 		pd = PD_CPUB0 + cpu;
302 	else
303 		pd = PD_CPUL0 + cpu;
304 
305 	if (pmu_power_domain_st(pd) == pd_state)
306 		return 0;
307 
308 	if (pd_state == pmu_pd_off) {
309 		mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
310 		if (check_cpu_wfie(mpidr, wfie_msk))
311 			return -EINVAL;
312 	}
313 
314 	return pmu_power_domain_ctr(pd, pd_state);
315 }
316 
317 static void nonboot_cpus_off(void)
318 {
319 	uint32_t boot_cpu, boot_cluster, cpu;
320 
321 	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
322 	boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
323 
324 	/* turn off noboot cpus */
325 	for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
326 		if (!boot_cluster && (cpu == boot_cpu))
327 			continue;
328 		cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
329 	}
330 
331 	for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
332 		if (boot_cluster && (cpu == boot_cpu))
333 			continue;
334 		cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
335 	}
336 }
337 
338 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
339 {
340 	uint32_t cpu, cluster;
341 	uint32_t cpuon_id;
342 
343 	cpu = MPIDR_AFFLVL0_VAL(mpidr);
344 	cluster = MPIDR_AFFLVL1_VAL(mpidr);
345 
346 	/* Make sure the cpu is off,Before power up the cpu! */
347 	cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
348 
349 	cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
350 	assert(cpuson_flags[cpuon_id] == 0);
351 	cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
352 	cpuson_entry_point[cpuon_id] = entrypoint;
353 
354 	/* Switch boot addr to pmusram */
355 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
356 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
357 		      CPU_BOOT_ADDR_WMASK);
358 	dsb();
359 
360 	cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
361 
362 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
363 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
364 		      CPU_BOOT_ADDR_WMASK);
365 
366 	return 0;
367 }
368 
369 static int cores_pwr_domain_on_finish(void)
370 {
371 	uint32_t cpuon_id;
372 
373 	cpuon_id = plat_my_core_pos();
374 	assert(cpuson_flags[cpuon_id] == 0);
375 	cpuson_flags[cpuon_id] = 0x00;
376 
377 	return 0;
378 }
379 
380 static int sys_pwr_domain_resume(void)
381 {
382 	psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
383 
384 	pm_plls_resume();
385 	pmu_scu_b_pwrup();
386 
387 	return 0;
388 }
389 
390 static int sys_pwr_domain_suspend(void)
391 {
392 	nonboot_cpus_off();
393 	pmu_set_sleep_mode();
394 
395 	psram_sleep_cfg->sys_mode = PMU_SYS_SLP_MODE;
396 	psram_sleep_cfg->ddr_flag = 0;
397 
398 	return 0;
399 }
400 
401 static struct rockchip_pm_ops_cb pm_ops = {
402 	.cores_pwr_dm_on = cores_pwr_domain_on,
403 	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
404 	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
405 	.sys_pwr_dm_resume = sys_pwr_domain_resume,
406 	.sys_gbl_soft_reset = soc_sys_global_soft_reset,
407 };
408 
409 void plat_rockchip_pmu_init(void)
410 {
411 	uint32_t cpu;
412 
413 	plat_setup_rockchip_pm_ops(&pm_ops);
414 
415 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
416 		cpuson_flags[cpu] = 0;
417 
418 	psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;
419 
420 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
421 
422 	nonboot_cpus_off();
423 	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
424 	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
425 }
426