xref: /rk3399_ARM-atf/plat/rockchip/rk3328/drivers/pmu/pmu.c (revision 4df2246943af8568564b4732de02794b2fd8bd39)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <debug.h>
9 #include <assert.h>
10 #include <bakery_lock.h>
11 #include <bl31.h>
12 #include <console.h>
13 #include <delay_timer.h>
14 #include <errno.h>
15 #include <mmio.h>
16 #include <platform.h>
17 #include <platform_def.h>
18 #include <plat_private.h>
19 #include <pmu_sram.h>
20 #include <pmu.h>
21 #include <rk3328_def.h>
22 #include <pmu_com.h>
23 
24 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
25 
26 static struct psram_data_t *psram_sleep_cfg =
27 		(struct psram_data_t *)PSRAM_DT_BASE;
28 
29 static struct rk3328_sleep_ddr_data ddr_data;
30 static __sramdata struct rk3328_sleep_sram_data sram_data;
31 
32 static uint32_t cpu_warm_boot_addr;
33 
34 #pragma weak rk3328_pmic_suspend
35 #pragma weak rk3328_pmic_resume
36 
37 void plat_rockchip_pmusram_prepare(void)
38 {
39 	uint32_t *sram_dst, *sram_src;
40 	size_t sram_size = 2;
41 	/*
42 	 * pmu sram code and data prepare
43 	 */
44 	sram_dst = (uint32_t *)PMUSRAM_BASE;
45 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
46 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
47 		    (uint32_t *)sram_src;
48 	u32_align_cpy(sram_dst, sram_src, sram_size);
49 
50 	psram_sleep_cfg->sp = PSRAM_DT_BASE;
51 }
52 
53 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
54 {
55 	uint32_t pd_reg, apm_reg;
56 
57 	pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
58 	apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
59 			       BIT(core_pm_en);
60 
61 	if (pd_reg && !apm_reg)
62 		return core_pwr_pd;
63 	else if (!pd_reg && apm_reg)
64 		return core_pwr_wfi;
65 
66 	ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
67 	while (1)
68 	;
69 }
70 
71 static int cpus_power_domain_on(uint32_t cpu_id)
72 {
73 	uint32_t cpu_pd, cfg_info;
74 
75 	cpu_pd = PD_CPU0 + cpu_id;
76 	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
77 
78 	if (cfg_info == core_pwr_pd) {
79 		/* disable apm cfg */
80 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
81 			      CORES_PM_DISABLE);
82 
83 		/* if the cores have be on, power off it firstly */
84 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
85 			mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
86 				      CORES_PM_DISABLE);
87 			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
88 		}
89 		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
90 	} else {
91 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
92 			WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
93 			return -EINVAL;
94 		}
95 
96 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
97 			      BIT(core_pm_sft_wakeup_en));
98 	}
99 
100 	return 0;
101 }
102 
103 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
104 {
105 	uint32_t cpu_pd, core_pm_value;
106 
107 	cpu_pd = PD_CPU0 + cpu_id;
108 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
109 		return 0;
110 
111 	if (pd_cfg == core_pwr_pd) {
112 		if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
113 			return -EINVAL;
114 		/* disable apm cfg */
115 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
116 			      CORES_PM_DISABLE);
117 		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
118 	} else {
119 		core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
120 		if (pd_cfg == core_pwr_wfi_int)
121 			core_pm_value |= BIT(core_pm_int_wakeup_en);
122 
123 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
124 			      core_pm_value);
125 	}
126 
127 	return 0;
128 }
129 
130 static void nonboot_cpus_off(void)
131 {
132 	uint32_t boot_cpu, cpu;
133 
134 	/* turn off noboot cpus */
135 	boot_cpu = plat_my_core_pos();
136 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
137 		if (cpu == boot_cpu)
138 			continue;
139 		cpus_power_domain_off(cpu, core_pwr_pd);
140 	}
141 }
142 
143 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
144 {
145 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
146 
147 	assert(cpu_id < PLATFORM_CORE_COUNT);
148 	assert(cpuson_flags[cpu_id] == 0);
149 	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
150 	cpuson_entry_point[cpu_id] = entrypoint;
151 	dsb();
152 
153 	cpus_power_domain_on(cpu_id);
154 
155 	return 0;
156 }
157 
158 int rockchip_soc_cores_pwr_dm_off(void)
159 {
160 	uint32_t cpu_id = plat_my_core_pos();
161 
162 	cpus_power_domain_off(cpu_id, core_pwr_wfi);
163 
164 	return 0;
165 }
166 
167 int rockchip_soc_cores_pwr_dm_suspend(void)
168 {
169 	uint32_t cpu_id = plat_my_core_pos();
170 
171 	assert(cpu_id < PLATFORM_CORE_COUNT);
172 	assert(cpuson_flags[cpu_id] == 0);
173 	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
174 	cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
175 	dsb();
176 
177 	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
178 
179 	return 0;
180 }
181 
182 int rockchip_soc_cores_pwr_dm_on_finish(void)
183 {
184 	uint32_t cpu_id = plat_my_core_pos();
185 
186 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
187 
188 	return 0;
189 }
190 
191 int rockchip_soc_cores_pwr_dm_resume(void)
192 {
193 	uint32_t cpu_id = plat_my_core_pos();
194 
195 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
196 
197 	return 0;
198 }
199 
200 void __dead2 rockchip_soc_soft_reset(void)
201 {
202 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
203 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
204 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
205 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
206 	dsb();
207 
208 	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
209 	dsb();
210 	/*
211 	 * Maybe the HW needs some times to reset the system,
212 	 * so we do not hope the core to excute valid codes.
213 	 */
214 	while (1)
215 		;
216 }
217 
218 /*
219  * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
220  * If the PMIC is configed for responding the sleep pin to power off it,
221  * once the pin is output high,  it will get the pmic power off.
222  */
223 void __dead2 rockchip_soc_system_off(void)
224 {
225 	uint32_t val;
226 
227 	/* gpio config */
228 	val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
229 	val &= ~GPIO2_D2_GPIO_MODE;
230 	mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
231 
232 	/* config output */
233 	val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
234 	val |= GPIO2_D2;
235 	mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
236 
237 	/* config output high level */
238 	val = mmio_read_32(GPIO2_BASE);
239 	val |= GPIO2_D2;
240 	mmio_write_32(GPIO2_BASE, val);
241 	dsb();
242 
243 	while (1)
244 		;
245 }
246 
247 static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
248 	0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
249 	0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
250 	0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
251 	0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
252 	0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
253 	0x0000, 0x0000, 0x0003, 0x0008
254 };
255 
256 static void clks_gating_suspend(uint32_t *ungt_msk)
257 {
258 	int i;
259 
260 	for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
261 		ddr_data.clk_ungt_save[i] =
262 			mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
263 		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
264 			      ((~ungt_msk[i]) << 16) | 0xffff);
265 	}
266 }
267 
268 static void clks_gating_resume(void)
269 {
270 	int i;
271 
272 	for (i = 0; i < CRU_CLKGATE_NUMS; i++)
273 		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
274 			      ddr_data.clk_ungt_save[i] | 0xffff0000);
275 }
276 
277 static inline void pm_pll_wait_lock(uint32_t pll_id)
278 {
279 	uint32_t delay = PLL_LOCKED_TIMEOUT;
280 
281 	while (delay > 0) {
282 		if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
283 		    PLL_IS_LOCKED)
284 			break;
285 		delay--;
286 	}
287 	if (delay == 0)
288 		ERROR("lock-pll: %d\n", pll_id);
289 }
290 
291 static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
292 {
293 	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
294 		      BITS_WITH_WMASK(1, 1, 15));
295 	if (pd)
296 		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
297 			      BITS_WITH_WMASK(1, 1, 14));
298 	else
299 		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
300 			      BITS_WITH_WMASK(0, 1, 14));
301 }
302 
303 static __sramfunc void dpll_suspend(void)
304 {
305 	int i;
306 
307 	/* slow mode */
308 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
309 
310 	/* save pll con */
311 	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
312 		sram_data.dpll_con_save[i] =
313 				mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
314 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
315 		      BITS_WITH_WMASK(1, 1, 15));
316 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
317 		      BITS_WITH_WMASK(1, 1, 14));
318 }
319 
320 static __sramfunc void dpll_resume(void)
321 {
322 	uint32_t delay = PLL_LOCKED_TIMEOUT;
323 
324 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
325 		      BITS_WITH_WMASK(1, 1, 15));
326 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
327 		      BITS_WITH_WMASK(0, 1, 14));
328 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
329 		      sram_data.dpll_con_save[1] | 0xc0000000);
330 
331 	dsb();
332 
333 	while (delay > 0) {
334 		if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
335 				 PLL_IS_LOCKED)
336 			break;
337 		delay--;
338 	}
339 	if (delay == 0)
340 		while (1)
341 			;
342 
343 	mmio_write_32(CRU_BASE + CRU_CRU_MODE,
344 		      PLL_NORM_MODE(DPLL_ID));
345 }
346 
347 static inline void pll_suspend(uint32_t pll_id)
348 {
349 	int i;
350 
351 	/* slow mode */
352 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
353 
354 	/* save pll con */
355 	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
356 		ddr_data.cru_plls_con_save[pll_id][i] =
357 				mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
358 
359 	/* powerdown pll */
360 	pll_pwr_dwn(pll_id, pmu_pd_off);
361 }
362 
363 static inline void pll_resume(uint32_t pll_id)
364 {
365 	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
366 		      ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
367 
368 	pm_pll_wait_lock(pll_id);
369 
370 	if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
371 		mmio_write_32(CRU_BASE + CRU_CRU_MODE,
372 			      PLL_NORM_MODE(pll_id));
373 }
374 
375 static void pm_plls_suspend(void)
376 {
377 	ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
378 	ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
379 	ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
380 	ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
381 	ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
382 	ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
383 	ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
384 	pll_suspend(NPLL_ID);
385 	pll_suspend(CPLL_ID);
386 	pll_suspend(GPLL_ID);
387 	pll_suspend(APLL_ID);
388 
389 	/* core */
390 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
391 		      BITS_WITH_WMASK(0, 0x1f, 0));
392 
393 	/* pclk_dbg */
394 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
395 		      BITS_WITH_WMASK(0, 0xf, 0));
396 
397 	/* crypto */
398 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
399 		      BITS_WITH_WMASK(0, 0x1f, 0));
400 
401 	/* pwm0 */
402 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
403 		      BITS_WITH_WMASK(0, 0x7f, 8));
404 
405 	/* uart2 from 24M */
406 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
407 		      BITS_WITH_WMASK(2, 0x3, 8));
408 
409 	/* clk_rtc32k */
410 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
411 		      BITS_WITH_WMASK(767, 0x3fff, 0) |
412 		      BITS_WITH_WMASK(2, 0x3, 14));
413 }
414 
415 static void pm_plls_resume(void)
416 {
417 	/* clk_rtc32k */
418 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
419 		      ddr_data.clk_sel38 |
420 		      BITS_WMSK(0x3fff, 0) |
421 		      BITS_WMSK(0x3, 14));
422 
423 	/* uart2 */
424 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
425 		      ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
426 
427 	/* pwm0 */
428 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
429 		      ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
430 
431 	/* crypto */
432 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
433 		      ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
434 
435 	/* pclk_dbg */
436 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
437 		      ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
438 
439 	/* core */
440 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
441 		      ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
442 
443 	pll_pwr_dwn(APLL_ID, pmu_pd_on);
444 	pll_pwr_dwn(GPLL_ID, pmu_pd_on);
445 	pll_pwr_dwn(CPLL_ID, pmu_pd_on);
446 	pll_pwr_dwn(NPLL_ID, pmu_pd_on);
447 
448 	pll_resume(APLL_ID);
449 	pll_resume(GPLL_ID);
450 	pll_resume(CPLL_ID);
451 	pll_resume(NPLL_ID);
452 }
453 
454 #define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
455 
456 static __sramfunc void sram_udelay(uint32_t us)
457 {
458 	uint64_t pct_orig, pct_now;
459 	uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
460 
461 	isb();
462 	pct_orig = read_cntpct_el0();
463 
464 	do {
465 		isb();
466 		pct_now = read_cntpct_el0();
467 	} while ((pct_now - pct_orig) <= to_wait);
468 }
469 
470 /*
471  * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
472  * If the PMIC is configed for responding the sleep pin
473  * to get it into sleep mode,
474  * once the pin is output high,  it will get the pmic into sleep mode.
475  */
476 __sramfunc void rk3328_pmic_suspend(void)
477 {
478 	sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
479 	sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
480 	sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
481 	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
482 	mmio_write_32(GPIO2_BASE + 4,
483 		      sram_data.pmic_sleep_gpio_save[1] | BIT(26));
484 	mmio_write_32(GPIO2_BASE,
485 		      sram_data.pmic_sleep_gpio_save[0] | BIT(26));
486 }
487 
488 __sramfunc void  rk3328_pmic_resume(void)
489 {
490 	mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
491 	mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
492 	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
493 		      sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
494 	/* Resuming volt need a lot of time */
495 	sram_udelay(100);
496 }
497 
498 static inline void rockchip_set_sram_sp(uint64_t set_sp)
499 {
500 	__asm volatile("mov sp, %0\n"::"r" (set_sp) : "sp");
501 }
502 
503 static __sramfunc void ddr_suspend(void)
504 {
505 	sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
506 						 DDR_PCTL2_PWRCTL);
507 	sram_data.pd_sr_idle_save &= SELFREF_EN;
508 
509 	mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
510 	sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
511 					      DDRGRF_SOC_CON(0));
512 	mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
513 
514 	/*
515 	 * Override csysreq from ddrc and
516 	 * send valid csysreq signal to PMU,
517 	 * csysreq is controlled by ddrc only
518 	 */
519 
520 	/* in self-refresh */
521 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
522 	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
523 	       (0x03 << 12)) !=  (0x02 << 12))
524 		;
525 	/* ddr retention */
526 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
527 
528 	/* ddr gating */
529 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
530 		      BITS_WITH_WMASK(0x7, 0x7, 4));
531 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
532 		      BITS_WITH_WMASK(1, 1, 4));
533 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
534 		      BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
535 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
536 		      BITS_WITH_WMASK(0x3, 0x3, 0));
537 
538 	dpll_suspend();
539 }
540 
541 static __sramfunc  void ddr_resume(void)
542 {
543 	dpll_resume();
544 
545 	/* ddr gating */
546 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
547 		      BITS_WITH_WMASK(0, 0x7, 4));
548 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
549 		      BITS_WITH_WMASK(0, 1, 4));
550 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
551 		      BITS_WITH_WMASK(0, 0x1ff, 1));
552 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
553 		      BITS_WITH_WMASK(0, 0x3, 0));
554 
555 	/* ddr de_retention */
556 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
557 	/* exit self-refresh */
558 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
559 	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
560 		(0x03 << 12)) !=  (0x00 << 12))
561 		;
562 
563 	mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
564 	if (sram_data.pd_sr_idle_save)
565 		mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
566 				SELFREF_EN);
567 }
568 
569 static __sramfunc void sram_dbg_uart_suspend(void)
570 {
571 	sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
572 	mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
573 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
574 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
575 }
576 
577 static __sramfunc void sram_dbg_uart_resume(void)
578 {
579 	/* restore uart clk and reset fifo */
580 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
581 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
582 	mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
583 	mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
584 }
585 
586 static __sramfunc void sram_soc_enter_lp(void)
587 {
588 	uint32_t apm_value;
589 
590 	apm_value = BIT(core_pm_en) |
591 		    BIT(core_pm_dis_int) |
592 		    BIT(core_pm_int_wakeup_en);
593 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
594 
595 	dsb();
596 	isb();
597 err_loop:
598 	wfi();
599 	/*
600 	 *Soc will enter low power mode and
601 	 *do not return to here.
602 	 */
603 	goto err_loop;
604 }
605 
606 __sramfunc void sram_suspend(void)
607 {
608 	/* disable mmu and icache */
609 	tlbialle3();
610 	disable_mmu_icache_el3();
611 
612 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
613 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
614 		      CPU_BOOT_ADDR_WMASK);
615 
616 	/* ddr self-refresh and gating phy */
617 	ddr_suspend();
618 
619 	rk3328_pmic_suspend();
620 
621 	sram_dbg_uart_suspend();
622 
623 	sram_soc_enter_lp();
624 }
625 
626 static __sramfunc void sys_resume_first(void)
627 {
628 	sram_dbg_uart_resume();
629 
630 	rk3328_pmic_resume();
631 
632 	/* ddr self-refresh exit */
633 	ddr_resume();
634 
635 	/* disable apm cfg */
636 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(0), CORES_PM_DISABLE);
637 
638 	/* the warm booting address of cpus */
639 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
640 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
641 		      CPU_BOOT_ADDR_WMASK);
642 }
643 
644 void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
645 {
646 	rockchip_set_sram_sp(PSRAM_DT_BASE);
647 
648 	sram_suspend();
649 
650 	/* should never reach here */
651 	psci_power_down_wfi();
652 }
653 
654 int rockchip_soc_sys_pwr_dm_suspend(void)
655 {
656 	clks_gating_suspend(clk_ungt_msk);
657 
658 	pm_plls_suspend();
659 
660 	return 0;
661 }
662 
663 int rockchip_soc_sys_pwr_dm_resume(void)
664 {
665 	pm_plls_resume();
666 
667 	clks_gating_resume();
668 
669 	plat_rockchip_gic_cpuif_enable();
670 
671 	return 0;
672 }
673 
674 void plat_rockchip_pmu_init(void)
675 {
676 	uint32_t cpu;
677 
678 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
679 		cpuson_flags[cpu] = 0;
680 
681 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
682 	psram_sleep_cfg->ddr_func = (uint64_t)sys_resume_first;
683 	psram_sleep_cfg->ddr_data = 0x00;
684 	psram_sleep_cfg->ddr_flag = 0x01;
685 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
686 
687 	/* the warm booting address of cpus */
688 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
689 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
690 		      CPU_BOOT_ADDR_WMASK);
691 
692 	nonboot_cpus_off();
693 
694 	INFO("%s: pd status 0x%x\n",
695 	     __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
696 }
697