xref: /rk3399_ARM-atf/plat/rockchip/rk3368/drivers/pmu/pmu.c (revision 532ed6183868036e4a4f83cd7a71b93266a3bdb7)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
18  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24  * POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <arch_helpers.h>
28 #include <assert.h>
29 #include <debug.h>
30 #include <delay_timer.h>
31 #include <errno.h>
32 #include <mmio.h>
33 #include <platform.h>
34 #include <platform_def.h>
35 #include <plat_private.h>
36 #include <rk3368_def.h>
37 #include <pmu_sram.h>
38 #include <soc.h>
39 #include <pmu.h>
40 #include <ddr_rk3368.h>
41 #include <pmu_com.h>
42 
43 static struct psram_data_t *psram_sleep_cfg =
44 	(struct psram_data_t *)PSRAM_DT_BASE;
45 
46 static uint32_t cpu_warm_boot_addr;
47 
48 void rk3368_flash_l2_b(void)
49 {
50 	uint32_t wait_cnt = 0;
51 
52 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
53 	dsb();
54 
55 	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
56 		& BIT(clst_b_l2_flsh_done))) {
57 		wait_cnt++;
58 		if (!(wait_cnt % MAX_WAIT_CONUT))
59 			WARN("%s:reg %x,wait\n", __func__,
60 			     mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
61 	}
62 
63 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
64 }
65 
66 static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
67 {
68 	uint32_t mask = BIT(req);
69 	uint32_t idle_mask = 0;
70 	uint32_t idle_target = 0;
71 	uint32_t val;
72 	uint32_t wait_cnt = 0;
73 
74 	switch (req) {
75 	case bus_ide_req_clst_l:
76 		idle_mask = BIT(pmu_idle_ack_cluster_l);
77 		idle_target = (idle << pmu_idle_ack_cluster_l);
78 		break;
79 
80 	case bus_ide_req_clst_b:
81 		idle_mask = BIT(pmu_idle_ack_cluster_b);
82 		idle_target = (idle << pmu_idle_ack_cluster_b);
83 		break;
84 
85 	case bus_ide_req_cxcs:
86 		idle_mask = BIT(pmu_idle_ack_cxcs);
87 		idle_target = ((!idle) << pmu_idle_ack_cxcs);
88 		break;
89 
90 	case bus_ide_req_cci400:
91 		idle_mask = BIT(pmu_idle_ack_cci400);
92 		idle_target = ((!idle) << pmu_idle_ack_cci400);
93 		break;
94 
95 	case bus_ide_req_gpu:
96 		idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
97 		idle_target = (idle << pmu_idle_ack_gpu) |
98 			      (idle << pmu_idle_gpu);
99 		break;
100 
101 	case bus_ide_req_core:
102 		idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
103 		idle_target = (idle << pmu_idle_ack_core) |
104 			      (idle << pmu_idle_core);
105 		break;
106 
107 	case bus_ide_req_bus:
108 		idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
109 		idle_target = (idle << pmu_idle_ack_bus) |
110 			      (idle << pmu_idle_bus);
111 		break;
112 	case bus_ide_req_dma:
113 		idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
114 		idle_target = (idle << pmu_idle_ack_dma) |
115 			      (idle << pmu_idle_dma);
116 		break;
117 
118 	case bus_ide_req_peri:
119 		idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
120 		idle_target = (idle << pmu_idle_ack_peri) |
121 			      (idle << pmu_idle_peri);
122 		break;
123 
124 	case bus_ide_req_video:
125 		idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
126 		idle_target = (idle << pmu_idle_ack_video) |
127 			      (idle << pmu_idle_video);
128 		break;
129 
130 	case bus_ide_req_vio:
131 		idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
132 		idle_target = (pmu_idle_ack_vio) |
133 			      (idle << pmu_idle_vio);
134 		break;
135 
136 	case bus_ide_req_alive:
137 		idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
138 		idle_target = (idle << pmu_idle_ack_alive) |
139 			      (idle << pmu_idle_alive);
140 		break;
141 
142 	case bus_ide_req_pmu:
143 		idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
144 		idle_target = (idle << pmu_idle_ack_pmu) |
145 			      (idle << pmu_idle_pmu);
146 		break;
147 
148 	case bus_ide_req_msch:
149 		idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
150 		idle_target = (idle << pmu_idle_ack_msch) |
151 			      (idle << pmu_idle_msch);
152 		break;
153 
154 	case bus_ide_req_cci:
155 		idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
156 		idle_target = (idle << pmu_idle_ack_cci) |
157 			      (idle << pmu_idle_cci);
158 		break;
159 
160 	default:
161 		ERROR("%s: Unsupported the idle request\n", __func__);
162 		break;
163 	}
164 
165 	val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
166 	if (idle)
167 		val |=	mask;
168 	else
169 		val &= ~mask;
170 
171 	mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
172 
173 	while ((mmio_read_32(PMU_BASE +
174 	       PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
175 		wait_cnt++;
176 		if (!(wait_cnt % MAX_WAIT_CONUT))
177 			WARN("%s:st=%x(%x)\n", __func__,
178 			     mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
179 			     idle_mask);
180 	}
181 
182 	return 0;
183 }
184 
185 void pmu_scu_b_pwrup(void)
186 {
187 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
188 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
189 }
190 
191 static void pmu_scu_b_pwrdn(void)
192 {
193 	uint32_t wait_cnt = 0;
194 
195 	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
196 	     PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
197 		ERROR("%s: not all cpus is off\n", __func__);
198 		return;
199 	}
200 
201 	rk3368_flash_l2_b();
202 
203 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
204 
205 	while (!(mmio_read_32(PMU_BASE +
206 	       PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
207 		wait_cnt++;
208 		if (!(wait_cnt % MAX_WAIT_CONUT))
209 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
210 			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
211 	}
212 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
213 }
214 
215 static void pmu_sleep_mode_config(void)
216 {
217 	uint32_t pwrmd_core, pwrmd_com;
218 
219 	pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
220 		     BIT(pmu_mdcr_scu_l_pd) |
221 		     BIT(pmu_mdcr_l2_flush) |
222 		     BIT(pmu_mdcr_l2_idle) |
223 		     BIT(pmu_mdcr_clr_clst_l) |
224 		     BIT(pmu_mdcr_clr_core) |
225 		     BIT(pmu_mdcr_clr_cci) |
226 		     BIT(pmu_mdcr_core_pd);
227 
228 	pwrmd_com = BIT(pmu_mode_en) |
229 		    BIT(pmu_mode_sref_enter) |
230 		    BIT(pmu_mode_pwr_off);
231 
232 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
233 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
234 	regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
235 
236 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
237 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
238 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
239 	mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
240 	mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
241 	dsb();
242 }
243 
244 static void ddr_suspend_save(void)
245 {
246 	ddr_reg_save(1, psram_sleep_cfg->ddr_data);
247 }
248 
249 static void pmu_set_sleep_mode(void)
250 {
251 	ddr_suspend_save();
252 	pmu_sleep_mode_config();
253 	soc_sleep_config();
254 	regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
255 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
256 	pmu_scu_b_pwrdn();
257 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
258 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
259 		      CPU_BOOT_ADDR_WMASK);
260 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
261 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
262 		      CPU_BOOT_ADDR_WMASK);
263 }
264 
265 void plat_rockchip_pmusram_prepare(void)
266 {
267 	uint32_t *sram_dst, *sram_src;
268 	size_t sram_size = 2;
269 	uint32_t code_size;
270 
271 	/* pmu sram code and data prepare */
272 	sram_dst = (uint32_t *)PMUSRAM_BASE;
273 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
274 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
275 		    (uint32_t *)sram_src;
276 	u32_align_cpy(sram_dst, sram_src, sram_size);
277 
278 	/* ddr code */
279 	sram_dst += sram_size;
280 	sram_src = ddr_get_resume_code_base();
281 	code_size = ddr_get_resume_code_size();
282 	u32_align_cpy(sram_dst, sram_src, code_size / 4);
283 	psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
284 
285 	/* ddr data */
286 	sram_dst += (code_size / 4);
287 	psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
288 
289 	assert((uint64_t)(sram_dst + ddr_get_resume_data_size() / 4)
290 						 < PSRAM_SP_BOTTOM);
291 	psram_sleep_cfg->sp = PSRAM_SP_TOP;
292 }
293 
294 static int cpus_id_power_domain(uint32_t cluster,
295 				uint32_t cpu,
296 				uint32_t pd_state,
297 				uint32_t wfie_msk)
298 {
299 	uint32_t pd;
300 	uint64_t mpidr;
301 
302 	if (cluster)
303 		pd = PD_CPUB0 + cpu;
304 	else
305 		pd = PD_CPUL0 + cpu;
306 
307 	if (pmu_power_domain_st(pd) == pd_state)
308 		return 0;
309 
310 	if (pd_state == pmu_pd_off) {
311 		mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
312 		if (check_cpu_wfie(mpidr, wfie_msk))
313 			return -EINVAL;
314 	}
315 
316 	return pmu_power_domain_ctr(pd, pd_state);
317 }
318 
319 static void nonboot_cpus_off(void)
320 {
321 	uint32_t boot_cpu, boot_cluster, cpu;
322 
323 	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
324 	boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
325 
326 	/* turn off noboot cpus */
327 	for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
328 		if (!boot_cluster && (cpu == boot_cpu))
329 			continue;
330 		cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
331 	}
332 
333 	for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
334 		if (boot_cluster && (cpu == boot_cpu))
335 			continue;
336 		cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
337 	}
338 }
339 
340 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
341 {
342 	uint32_t cpu, cluster;
343 	uint32_t cpuon_id;
344 
345 	cpu = MPIDR_AFFLVL0_VAL(mpidr);
346 	cluster = MPIDR_AFFLVL1_VAL(mpidr);
347 
348 	/* Make sure the cpu is off,Before power up the cpu! */
349 	cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
350 
351 	cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
352 	assert(cpuon_id < PLATFORM_CORE_COUNT);
353 	assert(cpuson_flags[cpuon_id] == 0);
354 	cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
355 	cpuson_entry_point[cpuon_id] = entrypoint;
356 
357 	/* Switch boot addr to pmusram */
358 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
359 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
360 		      CPU_BOOT_ADDR_WMASK);
361 	dsb();
362 
363 	cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
364 
365 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
366 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
367 		      CPU_BOOT_ADDR_WMASK);
368 
369 	return 0;
370 }
371 
372 static int cores_pwr_domain_on_finish(void)
373 {
374 	return 0;
375 }
376 
377 static int sys_pwr_domain_resume(void)
378 {
379 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
380 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
381 		      CPU_BOOT_ADDR_WMASK);
382 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
383 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
384 		      CPU_BOOT_ADDR_WMASK);
385 	pm_plls_resume();
386 	pmu_scu_b_pwrup();
387 
388 	return 0;
389 }
390 
391 static int sys_pwr_domain_suspend(void)
392 {
393 	nonboot_cpus_off();
394 	pmu_set_sleep_mode();
395 
396 	psram_sleep_cfg->ddr_flag = 0;
397 
398 	return 0;
399 }
400 
401 static struct rockchip_pm_ops_cb pm_ops = {
402 	.cores_pwr_dm_on = cores_pwr_domain_on,
403 	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
404 	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
405 	.sys_pwr_dm_resume = sys_pwr_domain_resume,
406 	.sys_gbl_soft_reset = soc_sys_global_soft_reset,
407 };
408 
409 void plat_rockchip_pmu_init(void)
410 {
411 	uint32_t cpu;
412 
413 	plat_setup_rockchip_pm_ops(&pm_ops);
414 
415 	/* register requires 32bits mode, switch it to 32 bits */
416 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
417 
418 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
419 		cpuson_flags[cpu] = 0;
420 
421 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
422 
423 	nonboot_cpus_off();
424 	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
425 	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
426 }
427