xref: /rk3399_ARM-atf/plat/rockchip/rk3328/drivers/pmu/pmu.c (revision 0e14a7fbeb3014e719302c9b7f6a24c4030dfaf0)
1 /*
2  * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch_helpers.h>
8 #include <debug.h>
9 #include <assert.h>
10 #include <bakery_lock.h>
11 #include <bl31.h>
12 #include <console.h>
13 #include <delay_timer.h>
14 #include <errno.h>
15 #include <mmio.h>
16 #include <platform.h>
17 #include <platform_def.h>
18 #include <plat_private.h>
19 #include <pmu_sram.h>
20 #include <pmu.h>
21 #include <rk3328_def.h>
22 #include <pmu_com.h>
23 
24 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
25 
26 static struct psram_data_t *psram_sleep_cfg =
27 		(struct psram_data_t *)PSRAM_DT_BASE;
28 
29 static struct rk3328_sleep_ddr_data ddr_data;
30 static __sramdata struct rk3328_sleep_sram_data sram_data;
31 
32 static uint32_t cpu_warm_boot_addr;
33 
34 #pragma weak rk3328_pmic_suspend
35 #pragma weak rk3328_pmic_resume
36 
37 void plat_rockchip_pmusram_prepare(void)
38 {
39 	uint32_t *sram_dst, *sram_src;
40 	size_t sram_size = 2;
41 	/*
42 	 * pmu sram code and data prepare
43 	 */
44 	sram_dst = (uint32_t *)PMUSRAM_BASE;
45 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
46 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
47 		    (uint32_t *)sram_src;
48 	u32_align_cpy(sram_dst, sram_src, sram_size);
49 
50 	psram_sleep_cfg->sp = PSRAM_DT_BASE;
51 }
52 
53 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
54 {
55 	uint32_t pd_reg, apm_reg;
56 
57 	pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
58 	apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
59 			       BIT(core_pm_en);
60 
61 	if (pd_reg && !apm_reg)
62 		return core_pwr_pd;
63 	else if (!pd_reg && apm_reg)
64 		return core_pwr_wfi;
65 
66 	ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
67 	while (1)
68 	;
69 }
70 
71 static int cpus_power_domain_on(uint32_t cpu_id)
72 {
73 	uint32_t cpu_pd, cfg_info;
74 
75 	cpu_pd = PD_CPU0 + cpu_id;
76 	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
77 
78 	if (cfg_info == core_pwr_pd) {
79 		/* disable apm cfg */
80 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
81 			      CORES_PM_DISABLE);
82 
83 		/* if the cores have be on, power off it firstly */
84 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
85 			mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
86 				      CORES_PM_DISABLE);
87 			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
88 		}
89 		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
90 	} else {
91 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
92 			WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
93 			return -EINVAL;
94 		}
95 
96 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
97 			      BIT(core_pm_sft_wakeup_en));
98 	}
99 
100 	return 0;
101 }
102 
103 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
104 {
105 	uint32_t cpu_pd, core_pm_value;
106 
107 	cpu_pd = PD_CPU0 + cpu_id;
108 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
109 		return 0;
110 
111 	if (pd_cfg == core_pwr_pd) {
112 		if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
113 			return -EINVAL;
114 		/* disable apm cfg */
115 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
116 			      CORES_PM_DISABLE);
117 		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
118 	} else {
119 		core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
120 		if (pd_cfg == core_pwr_wfi_int)
121 			core_pm_value |= BIT(core_pm_int_wakeup_en);
122 
123 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
124 			      core_pm_value);
125 	}
126 
127 	return 0;
128 }
129 
130 static void nonboot_cpus_off(void)
131 {
132 	uint32_t boot_cpu, cpu;
133 
134 	/* turn off noboot cpus */
135 	boot_cpu = plat_my_core_pos();
136 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
137 		if (cpu == boot_cpu)
138 			continue;
139 		cpus_power_domain_off(cpu, core_pwr_pd);
140 	}
141 }
142 
143 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
144 {
145 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
146 
147 	assert(cpuson_flags[cpu_id] == 0);
148 	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
149 	cpuson_entry_point[cpu_id] = entrypoint;
150 	dsb();
151 
152 	cpus_power_domain_on(cpu_id);
153 
154 	return 0;
155 }
156 
157 int rockchip_soc_cores_pwr_dm_off(void)
158 {
159 	uint32_t cpu_id = plat_my_core_pos();
160 
161 	cpus_power_domain_off(cpu_id, core_pwr_wfi);
162 
163 	return 0;
164 }
165 
166 int rockchip_soc_cores_pwr_dm_suspend(void)
167 {
168 	uint32_t cpu_id = plat_my_core_pos();
169 
170 	assert(cpuson_flags[cpu_id] == 0);
171 	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
172 	cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
173 	dsb();
174 
175 	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
176 
177 	return 0;
178 }
179 
180 int rockchip_soc_cores_pwr_dm_on_finish(void)
181 {
182 	uint32_t cpu_id = plat_my_core_pos();
183 
184 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
185 
186 	return 0;
187 }
188 
189 int rockchip_soc_cores_pwr_dm_resume(void)
190 {
191 	uint32_t cpu_id = plat_my_core_pos();
192 
193 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
194 
195 	return 0;
196 }
197 
198 void __dead2 rockchip_soc_soft_reset(void)
199 {
200 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
201 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
202 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
203 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
204 	dsb();
205 
206 	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
207 	dsb();
208 	/*
209 	 * Maybe the HW needs some times to reset the system,
210 	 * so we do not hope the core to excute valid codes.
211 	 */
212 	while (1)
213 		;
214 }
215 
216 /*
217  * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
218  * If the PMIC is configed for responding the sleep pin to power off it,
219  * once the pin is output high,  it will get the pmic power off.
220  */
221 void __dead2 rockchip_soc_system_off(void)
222 {
223 	uint32_t val;
224 
225 	/* gpio config */
226 	val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
227 	val &= ~GPIO2_D2_GPIO_MODE;
228 	mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
229 
230 	/* config output */
231 	val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
232 	val |= GPIO2_D2;
233 	mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
234 
235 	/* config output high level */
236 	val = mmio_read_32(GPIO2_BASE);
237 	val |= GPIO2_D2;
238 	mmio_write_32(GPIO2_BASE, val);
239 	dsb();
240 
241 	while (1)
242 		;
243 }
244 
245 static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
246 	0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
247 	0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
248 	0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
249 	0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
250 	0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
251 	0x0000, 0x0000, 0x0003, 0x0008
252 };
253 
254 static void clks_gating_suspend(uint32_t *ungt_msk)
255 {
256 	int i;
257 
258 	for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
259 		ddr_data.clk_ungt_save[i] =
260 			mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
261 		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
262 			      ((~ungt_msk[i]) << 16) | 0xffff);
263 	}
264 }
265 
266 static void clks_gating_resume(void)
267 {
268 	int i;
269 
270 	for (i = 0; i < CRU_CLKGATE_NUMS; i++)
271 		mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
272 			      ddr_data.clk_ungt_save[i] | 0xffff0000);
273 }
274 
275 static inline void pm_pll_wait_lock(uint32_t pll_id)
276 {
277 	uint32_t delay = PLL_LOCKED_TIMEOUT;
278 
279 	while (delay > 0) {
280 		if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
281 		    PLL_IS_LOCKED)
282 			break;
283 		delay--;
284 	}
285 	if (delay == 0)
286 		ERROR("lock-pll: %d\n", pll_id);
287 }
288 
289 static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
290 {
291 	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
292 		      BITS_WITH_WMASK(1, 1, 15));
293 	if (pd)
294 		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
295 			      BITS_WITH_WMASK(1, 1, 14));
296 	else
297 		mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
298 			      BITS_WITH_WMASK(0, 1, 14));
299 }
300 
301 static __sramfunc void dpll_suspend(void)
302 {
303 	int i;
304 
305 	/* slow mode */
306 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
307 
308 	/* save pll con */
309 	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
310 		sram_data.dpll_con_save[i] =
311 				mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
312 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
313 		      BITS_WITH_WMASK(1, 1, 15));
314 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
315 		      BITS_WITH_WMASK(1, 1, 14));
316 }
317 
318 static __sramfunc void dpll_resume(void)
319 {
320 	uint32_t delay = PLL_LOCKED_TIMEOUT;
321 
322 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
323 		      BITS_WITH_WMASK(1, 1, 15));
324 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
325 		      BITS_WITH_WMASK(0, 1, 14));
326 	mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
327 		      sram_data.dpll_con_save[1] | 0xc0000000);
328 
329 	dsb();
330 
331 	while (delay > 0) {
332 		if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
333 				 PLL_IS_LOCKED)
334 			break;
335 		delay--;
336 	}
337 	if (delay == 0)
338 		while (1)
339 			;
340 
341 	mmio_write_32(CRU_BASE + CRU_CRU_MODE,
342 		      PLL_NORM_MODE(DPLL_ID));
343 }
344 
345 static inline void pll_suspend(uint32_t pll_id)
346 {
347 	int i;
348 
349 	/* slow mode */
350 	mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
351 
352 	/* save pll con */
353 	for (i = 0; i < CRU_PLL_CON_NUMS; i++)
354 		ddr_data.cru_plls_con_save[pll_id][i] =
355 				mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
356 
357 	/* powerdown pll */
358 	pll_pwr_dwn(pll_id, pmu_pd_off);
359 }
360 
361 static inline void pll_resume(uint32_t pll_id)
362 {
363 	mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
364 		      ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
365 
366 	pm_pll_wait_lock(pll_id);
367 
368 	if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
369 		mmio_write_32(CRU_BASE + CRU_CRU_MODE,
370 			      PLL_NORM_MODE(pll_id));
371 }
372 
373 static void pm_plls_suspend(void)
374 {
375 	ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
376 	ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
377 	ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
378 	ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
379 	ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
380 	ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
381 	ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
382 	pll_suspend(NPLL_ID);
383 	pll_suspend(CPLL_ID);
384 	pll_suspend(GPLL_ID);
385 	pll_suspend(APLL_ID);
386 
387 	/* core */
388 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
389 		      BITS_WITH_WMASK(0, 0x1f, 0));
390 
391 	/* pclk_dbg */
392 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
393 		      BITS_WITH_WMASK(0, 0xf, 0));
394 
395 	/* crypto */
396 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
397 		      BITS_WITH_WMASK(0, 0x1f, 0));
398 
399 	/* pwm0 */
400 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
401 		      BITS_WITH_WMASK(0, 0x7f, 8));
402 
403 	/* uart2 from 24M */
404 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
405 		      BITS_WITH_WMASK(2, 0x3, 8));
406 
407 	/* clk_rtc32k */
408 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
409 		      BITS_WITH_WMASK(767, 0x3fff, 0) |
410 		      BITS_WITH_WMASK(2, 0x3, 14));
411 }
412 
413 static void pm_plls_resume(void)
414 {
415 	/* clk_rtc32k */
416 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
417 		      ddr_data.clk_sel38 |
418 		      BITS_WMSK(0x3fff, 0) |
419 		      BITS_WMSK(0x3, 14));
420 
421 	/* uart2 */
422 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
423 		      ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
424 
425 	/* pwm0 */
426 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
427 		      ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
428 
429 	/* crypto */
430 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
431 		      ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
432 
433 	/* pclk_dbg */
434 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
435 		      ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
436 
437 	/* core */
438 	mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
439 		      ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
440 
441 	pll_pwr_dwn(APLL_ID, pmu_pd_on);
442 	pll_pwr_dwn(GPLL_ID, pmu_pd_on);
443 	pll_pwr_dwn(CPLL_ID, pmu_pd_on);
444 	pll_pwr_dwn(NPLL_ID, pmu_pd_on);
445 
446 	pll_resume(APLL_ID);
447 	pll_resume(GPLL_ID);
448 	pll_resume(CPLL_ID);
449 	pll_resume(NPLL_ID);
450 }
451 
452 #define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
453 
454 static __sramfunc void sram_udelay(uint32_t us)
455 {
456 	uint64_t pct_orig, pct_now;
457 	uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
458 
459 	isb();
460 	pct_orig = read_cntpct_el0();
461 
462 	do {
463 		isb();
464 		pct_now = read_cntpct_el0();
465 	} while ((pct_now - pct_orig) <= to_wait);
466 }
467 
468 /*
469  * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
470  * If the PMIC is configed for responding the sleep pin
471  * to get it into sleep mode,
472  * once the pin is output high,  it will get the pmic into sleep mode.
473  */
474 __sramfunc void rk3328_pmic_suspend(void)
475 {
476 	sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
477 	sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
478 	sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
479 	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
480 	mmio_write_32(GPIO2_BASE + 4,
481 		      sram_data.pmic_sleep_gpio_save[1] | BIT(26));
482 	mmio_write_32(GPIO2_BASE,
483 		      sram_data.pmic_sleep_gpio_save[0] | BIT(26));
484 }
485 
486 __sramfunc void  rk3328_pmic_resume(void)
487 {
488 	mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
489 	mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
490 	mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
491 		      sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
492 	/* Resuming volt need a lot of time */
493 	sram_udelay(100);
494 }
495 
496 static inline void rockchip_set_sram_sp(uint64_t set_sp)
497 {
498 	__asm volatile("mov sp, %0\n"::"r" (set_sp) : "sp");
499 }
500 
501 static __sramfunc void ddr_suspend(void)
502 {
503 	sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
504 						 DDR_PCTL2_PWRCTL);
505 	sram_data.pd_sr_idle_save &= SELFREF_EN;
506 
507 	mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
508 	sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
509 					      DDRGRF_SOC_CON(0));
510 	mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
511 
512 	/*
513 	 * Override csysreq from ddrc and
514 	 * send valid csysreq signal to PMU,
515 	 * csysreq is controlled by ddrc only
516 	 */
517 
518 	/* in self-refresh */
519 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
520 	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
521 	       (0x03 << 12)) !=  (0x02 << 12))
522 		;
523 	/* ddr retention */
524 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
525 
526 	/* ddr gating */
527 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
528 		      BITS_WITH_WMASK(0x7, 0x7, 4));
529 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
530 		      BITS_WITH_WMASK(1, 1, 4));
531 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
532 		      BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
533 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
534 		      BITS_WITH_WMASK(0x3, 0x3, 0));
535 
536 	dpll_suspend();
537 }
538 
539 static __sramfunc  void ddr_resume(void)
540 {
541 	dpll_resume();
542 
543 	/* ddr gating */
544 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
545 		      BITS_WITH_WMASK(0, 0x7, 4));
546 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
547 		      BITS_WITH_WMASK(0, 1, 4));
548 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
549 		      BITS_WITH_WMASK(0, 0x1ff, 1));
550 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
551 		      BITS_WITH_WMASK(0, 0x3, 0));
552 
553 	/* ddr de_retention */
554 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
555 	/* exit self-refresh */
556 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
557 	while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
558 		(0x03 << 12)) !=  (0x00 << 12))
559 		;
560 
561 	mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
562 	if (sram_data.pd_sr_idle_save)
563 		mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
564 				SELFREF_EN);
565 }
566 
567 static __sramfunc void sram_dbg_uart_suspend(void)
568 {
569 	sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
570 	mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
571 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
572 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
573 }
574 
575 static __sramfunc void sram_dbg_uart_resume(void)
576 {
577 	/* restore uart clk and reset fifo */
578 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
579 	mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
580 	mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
581 	mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
582 }
583 
584 static __sramfunc void sram_soc_enter_lp(void)
585 {
586 	uint32_t apm_value;
587 
588 	apm_value = BIT(core_pm_en) |
589 		    BIT(core_pm_dis_int) |
590 		    BIT(core_pm_int_wakeup_en);
591 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
592 
593 	dsb();
594 	isb();
595 err_loop:
596 	wfi();
597 	/*
598 	 *Soc will enter low power mode and
599 	 *do not return to here.
600 	 */
601 	goto err_loop;
602 }
603 
604 __sramfunc void sram_suspend(void)
605 {
606 	/* disable mmu and icache */
607 	tlbialle3();
608 	disable_mmu_icache_el3();
609 
610 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
611 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
612 		      CPU_BOOT_ADDR_WMASK);
613 
614 	/* ddr self-refresh and gating phy */
615 	ddr_suspend();
616 
617 	rk3328_pmic_suspend();
618 
619 	sram_dbg_uart_suspend();
620 
621 	sram_soc_enter_lp();
622 }
623 
624 static __sramfunc void sys_resume_first(void)
625 {
626 	sram_dbg_uart_resume();
627 
628 	rk3328_pmic_resume();
629 
630 	/* ddr self-refresh exit */
631 	ddr_resume();
632 
633 	/* disable apm cfg */
634 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(0), CORES_PM_DISABLE);
635 
636 	/* the warm booting address of cpus */
637 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
638 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
639 		      CPU_BOOT_ADDR_WMASK);
640 }
641 
642 void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
643 {
644 	rockchip_set_sram_sp(PSRAM_DT_BASE);
645 
646 	sram_suspend();
647 
648 	/* should never reach here */
649 	psci_power_down_wfi();
650 }
651 
652 int rockchip_soc_sys_pwr_dm_suspend(void)
653 {
654 	clks_gating_suspend(clk_ungt_msk);
655 
656 	pm_plls_suspend();
657 
658 	return 0;
659 }
660 
661 int rockchip_soc_sys_pwr_dm_resume(void)
662 {
663 	pm_plls_resume();
664 
665 	clks_gating_resume();
666 
667 	plat_rockchip_gic_cpuif_enable();
668 
669 	return 0;
670 }
671 
672 void plat_rockchip_pmu_init(void)
673 {
674 	uint32_t cpu;
675 
676 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
677 		cpuson_flags[cpu] = 0;
678 
679 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
680 	psram_sleep_cfg->ddr_func = (uint64_t)sys_resume_first;
681 	psram_sleep_cfg->ddr_data = 0x00;
682 	psram_sleep_cfg->ddr_flag = 0x01;
683 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
684 
685 	/* the warm booting address of cpus */
686 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
687 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
688 		      CPU_BOOT_ADDR_WMASK);
689 
690 	nonboot_cpus_off();
691 
692 	INFO("%s: pd status 0x%x\n",
693 	     __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
694 }
695