xref: /rk3399_ARM-atf/plat/rockchip/rk3368/drivers/pmu/pmu.c (revision 61eae524b6e452fd1be931c6e1ff8f7cf3ae969c)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
18  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24  * POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <arch_helpers.h>
28 #include <assert.h>
29 #include <debug.h>
30 #include <delay_timer.h>
31 #include <errno.h>
32 #include <mmio.h>
33 #include <platform.h>
34 #include <platform_def.h>
35 #include <plat_private.h>
36 #include <rk3368_def.h>
37 #include <pmu_sram.h>
38 #include <soc.h>
39 #include <pmu.h>
40 #include <ddr_rk3368.h>
41 #include <pmu_com.h>
42 
43 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
44 
45 static struct psram_data_t *psram_sleep_cfg =
46 	(struct psram_data_t *)PSRAM_DT_BASE;
47 
48 static uint32_t cpu_warm_boot_addr;
49 
50 void rk3368_flash_l2_b(void)
51 {
52 	uint32_t wait_cnt = 0;
53 
54 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
55 	dsb();
56 
57 	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
58 		& BIT(clst_b_l2_flsh_done))) {
59 		wait_cnt++;
60 		if (!(wait_cnt % MAX_WAIT_CONUT))
61 			WARN("%s:reg %x,wait\n", __func__,
62 			     mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
63 	}
64 
65 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
66 }
67 
68 static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
69 {
70 	uint32_t mask = BIT(req);
71 	uint32_t idle_mask = 0;
72 	uint32_t idle_target = 0;
73 	uint32_t val;
74 	uint32_t wait_cnt = 0;
75 
76 	switch (req) {
77 	case bus_ide_req_clst_l:
78 		idle_mask = BIT(pmu_idle_ack_cluster_l);
79 		idle_target = (idle << pmu_idle_ack_cluster_l);
80 		break;
81 
82 	case bus_ide_req_clst_b:
83 		idle_mask = BIT(pmu_idle_ack_cluster_b);
84 		idle_target = (idle << pmu_idle_ack_cluster_b);
85 		break;
86 
87 	case bus_ide_req_cxcs:
88 		idle_mask = BIT(pmu_idle_ack_cxcs);
89 		idle_target = ((!idle) << pmu_idle_ack_cxcs);
90 		break;
91 
92 	case bus_ide_req_cci400:
93 		idle_mask = BIT(pmu_idle_ack_cci400);
94 		idle_target = ((!idle) << pmu_idle_ack_cci400);
95 		break;
96 
97 	case bus_ide_req_gpu:
98 		idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
99 		idle_target = (idle << pmu_idle_ack_gpu) |
100 			      (idle << pmu_idle_gpu);
101 		break;
102 
103 	case bus_ide_req_core:
104 		idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
105 		idle_target = (idle << pmu_idle_ack_core) |
106 			      (idle << pmu_idle_core);
107 		break;
108 
109 	case bus_ide_req_bus:
110 		idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
111 		idle_target = (idle << pmu_idle_ack_bus) |
112 			      (idle << pmu_idle_bus);
113 		break;
114 	case bus_ide_req_dma:
115 		idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
116 		idle_target = (idle << pmu_idle_ack_dma) |
117 			      (idle << pmu_idle_dma);
118 		break;
119 
120 	case bus_ide_req_peri:
121 		idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
122 		idle_target = (idle << pmu_idle_ack_peri) |
123 			      (idle << pmu_idle_peri);
124 		break;
125 
126 	case bus_ide_req_video:
127 		idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
128 		idle_target = (idle << pmu_idle_ack_video) |
129 			      (idle << pmu_idle_video);
130 		break;
131 
132 	case bus_ide_req_vio:
133 		idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
134 		idle_target = (pmu_idle_ack_vio) |
135 			      (idle << pmu_idle_vio);
136 		break;
137 
138 	case bus_ide_req_alive:
139 		idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
140 		idle_target = (idle << pmu_idle_ack_alive) |
141 			      (idle << pmu_idle_alive);
142 		break;
143 
144 	case bus_ide_req_pmu:
145 		idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
146 		idle_target = (idle << pmu_idle_ack_pmu) |
147 			      (idle << pmu_idle_pmu);
148 		break;
149 
150 	case bus_ide_req_msch:
151 		idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
152 		idle_target = (idle << pmu_idle_ack_msch) |
153 			      (idle << pmu_idle_msch);
154 		break;
155 
156 	case bus_ide_req_cci:
157 		idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
158 		idle_target = (idle << pmu_idle_ack_cci) |
159 			      (idle << pmu_idle_cci);
160 		break;
161 
162 	default:
163 		ERROR("%s: Unsupported the idle request\n", __func__);
164 		break;
165 	}
166 
167 	val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
168 	if (idle)
169 		val |=	mask;
170 	else
171 		val &= ~mask;
172 
173 	mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
174 
175 	while ((mmio_read_32(PMU_BASE +
176 	       PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
177 		wait_cnt++;
178 		if (!(wait_cnt % MAX_WAIT_CONUT))
179 			WARN("%s:st=%x(%x)\n", __func__,
180 			     mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
181 			     idle_mask);
182 	}
183 
184 	return 0;
185 }
186 
187 void pmu_scu_b_pwrup(void)
188 {
189 	regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
190 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
191 }
192 
193 static void pmu_scu_b_pwrdn(void)
194 {
195 	uint32_t wait_cnt = 0;
196 
197 	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
198 	     PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
199 		ERROR("%s: not all cpus is off\n", __func__);
200 		return;
201 	}
202 
203 	rk3368_flash_l2_b();
204 
205 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
206 
207 	while (!(mmio_read_32(PMU_BASE +
208 	       PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
209 		wait_cnt++;
210 		if (!(wait_cnt % MAX_WAIT_CONUT))
211 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
212 			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
213 	}
214 	rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
215 }
216 
217 static void pmu_sleep_mode_config(void)
218 {
219 	uint32_t pwrmd_core, pwrmd_com;
220 
221 	pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
222 		     BIT(pmu_mdcr_scu_l_pd) |
223 		     BIT(pmu_mdcr_l2_flush) |
224 		     BIT(pmu_mdcr_l2_idle) |
225 		     BIT(pmu_mdcr_clr_clst_l) |
226 		     BIT(pmu_mdcr_clr_core) |
227 		     BIT(pmu_mdcr_clr_cci) |
228 		     BIT(pmu_mdcr_core_pd);
229 
230 	pwrmd_com = BIT(pmu_mode_en) |
231 		    BIT(pmu_mode_sref_enter) |
232 		    BIT(pmu_mode_pwr_off);
233 
234 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
235 	regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
236 	regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
237 
238 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
239 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
240 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
241 	mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
242 	mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
243 	dsb();
244 }
245 
246 static void ddr_suspend_save(void)
247 {
248 	ddr_reg_save(1, psram_sleep_cfg->ddr_data);
249 }
250 
251 static void pmu_set_sleep_mode(void)
252 {
253 	ddr_suspend_save();
254 	pmu_sleep_mode_config();
255 	soc_sleep_config();
256 	regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
257 	regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
258 	pmu_scu_b_pwrdn();
259 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
260 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
261 		      CPU_BOOT_ADDR_WMASK);
262 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
263 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
264 		      CPU_BOOT_ADDR_WMASK);
265 }
266 
267 void plat_rockchip_pmusram_prepare(void)
268 {
269 	uint32_t *sram_dst, *sram_src;
270 	size_t sram_size = 2;
271 	uint32_t code_size;
272 
273 	/* pmu sram code and data prepare */
274 	sram_dst = (uint32_t *)PMUSRAM_BASE;
275 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
276 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
277 		    (uint32_t *)sram_src;
278 	u32_align_cpy(sram_dst, sram_src, sram_size);
279 
280 	/* ddr code */
281 	sram_dst += sram_size;
282 	sram_src = ddr_get_resume_code_base();
283 	code_size = ddr_get_resume_code_size();
284 	u32_align_cpy(sram_dst, sram_src, code_size / 4);
285 	psram_sleep_cfg->ddr_func = (uint64_t)sram_dst;
286 
287 	/* ddr data */
288 	sram_dst += (code_size / 4);
289 	psram_sleep_cfg->ddr_data = (uint64_t)sram_dst;
290 
291 	assert((uint64_t)(sram_dst + ddr_get_resume_data_size() / 4)
292 						 < PSRAM_SP_BOTTOM);
293 	psram_sleep_cfg->sp = PSRAM_SP_TOP;
294 }
295 
296 static int cpus_id_power_domain(uint32_t cluster,
297 				uint32_t cpu,
298 				uint32_t pd_state,
299 				uint32_t wfie_msk)
300 {
301 	uint32_t pd;
302 	uint64_t mpidr;
303 
304 	if (cluster)
305 		pd = PD_CPUB0 + cpu;
306 	else
307 		pd = PD_CPUL0 + cpu;
308 
309 	if (pmu_power_domain_st(pd) == pd_state)
310 		return 0;
311 
312 	if (pd_state == pmu_pd_off) {
313 		mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
314 		if (check_cpu_wfie(mpidr, wfie_msk))
315 			return -EINVAL;
316 	}
317 
318 	return pmu_power_domain_ctr(pd, pd_state);
319 }
320 
321 static void nonboot_cpus_off(void)
322 {
323 	uint32_t boot_cpu, boot_cluster, cpu;
324 
325 	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
326 	boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
327 
328 	/* turn off noboot cpus */
329 	for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
330 		if (!boot_cluster && (cpu == boot_cpu))
331 			continue;
332 		cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
333 	}
334 
335 	for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
336 		if (boot_cluster && (cpu == boot_cpu))
337 			continue;
338 		cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
339 	}
340 }
341 
342 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
343 {
344 	uint32_t cpu, cluster;
345 	uint32_t cpuon_id;
346 
347 	cpu = MPIDR_AFFLVL0_VAL(mpidr);
348 	cluster = MPIDR_AFFLVL1_VAL(mpidr);
349 
350 	/* Make sure the cpu is off,Before power up the cpu! */
351 	cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
352 
353 	cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
354 	assert(cpuon_id < PLATFORM_CORE_COUNT);
355 	assert(cpuson_flags[cpuon_id] == 0);
356 	cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
357 	cpuson_entry_point[cpuon_id] = entrypoint;
358 
359 	/* Switch boot addr to pmusram */
360 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
361 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
362 		      CPU_BOOT_ADDR_WMASK);
363 	dsb();
364 
365 	cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
366 
367 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
368 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
369 		      CPU_BOOT_ADDR_WMASK);
370 
371 	return 0;
372 }
373 
374 static int cores_pwr_domain_on_finish(void)
375 {
376 	return 0;
377 }
378 
379 static int sys_pwr_domain_resume(void)
380 {
381 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
382 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
383 		      CPU_BOOT_ADDR_WMASK);
384 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
385 		      (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
386 		      CPU_BOOT_ADDR_WMASK);
387 	pm_plls_resume();
388 	pmu_scu_b_pwrup();
389 
390 	return 0;
391 }
392 
393 static int sys_pwr_domain_suspend(void)
394 {
395 	nonboot_cpus_off();
396 	pmu_set_sleep_mode();
397 
398 	psram_sleep_cfg->ddr_flag = 0;
399 
400 	return 0;
401 }
402 
403 static struct rockchip_pm_ops_cb pm_ops = {
404 	.cores_pwr_dm_on = cores_pwr_domain_on,
405 	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
406 	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
407 	.sys_pwr_dm_resume = sys_pwr_domain_resume,
408 	.sys_gbl_soft_reset = soc_sys_global_soft_reset,
409 };
410 
411 void plat_rockchip_pmu_init(void)
412 {
413 	uint32_t cpu;
414 
415 	plat_setup_rockchip_pm_ops(&pm_ops);
416 
417 	/* register requires 32bits mode, switch it to 32 bits */
418 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
419 
420 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
421 		cpuson_flags[cpu] = 0;
422 
423 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
424 
425 	nonboot_cpus_off();
426 	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
427 	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
428 }
429