xref: /rk3399_ARM-atf/plat/rockchip/px30/drivers/pmu/pmu.c (revision 7af195e29a4213eefac0661d84e1c9c20476e166)
1 /*
2  * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <platform_def.h>
11 
12 #include <arch_helpers.h>
13 #include <bl31/bl31.h>
14 #include <common/debug.h>
15 #include <drivers/console.h>
16 #include <drivers/delay_timer.h>
17 #include <lib/bakery_lock.h>
18 #include <lib/mmio.h>
19 #include <plat/common/platform.h>
20 
21 #include <cpus_on_fixed_addr.h>
22 #include <plat_private.h>
23 #include <pmu.h>
24 #include <px30_def.h>
25 #include <soc.h>
26 
27 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
28 #define rockchip_pd_lock_init()	bakery_lock_init(&rockchip_pd_lock)
29 #define rockchip_pd_lock_get()	bakery_lock_get(&rockchip_pd_lock)
30 #define rockchip_pd_lock_rls()	bakery_lock_release(&rockchip_pd_lock)
31 
32 static struct psram_data_t *psram_boot_cfg =
33 	(struct psram_data_t *)&sys_sleep_flag_sram;
34 
35 /*
36  * There are two ways to powering on or off on core.
37  * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
38  *    it is core_pwr_pd mode
39  * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
40  *     then, if the core enter into wfi, it power domain will be
41  *     powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
42  * so we need core_pm_cfg_info to distinguish which method be used now.
43  */
44 
45 static uint32_t cores_pd_cfg_info[PLATFORM_CORE_COUNT]
46 #if USE_COHERENT_MEM
47 __attribute__ ((section("tzfw_coherent_mem")))
48 #endif
49 ;
50 
51 struct px30_sleep_ddr_data {
52 	uint32_t clk_sel0;
53 	uint32_t cru_mode_save;
54 	uint32_t cru_pmu_mode_save;
55 	uint32_t ddrc_hwlpctl;
56 	uint32_t ddrc_pwrctrl;
57 	uint32_t ddrgrf_con0;
58 	uint32_t ddrgrf_con1;
59 	uint32_t ddrstdby_con0;
60 	uint32_t gpio0b_iomux;
61 	uint32_t gpio0c_iomux;
62 	uint32_t pmu_pwrmd_core_l;
63 	uint32_t pmu_pwrmd_core_h;
64 	uint32_t pmu_pwrmd_cmm_l;
65 	uint32_t pmu_pwrmd_cmm_h;
66 	uint32_t pmu_wkup_cfg2_l;
67 	uint32_t pmu_cru_clksel_con0;
68 	uint32_t pmugrf_soc_con0;
69 	uint32_t pmusgrf_soc_con0;
70 	uint32_t pmic_slp_iomux;
71 	uint32_t pgrf_pvtm_con[2];
72 	uint32_t cru_clk_gate[CRU_CLKGATES_CON_CNT];
73 	uint32_t cru_pmu_clk_gate[CRU_PMU_CLKGATE_CON_CNT];
74 	uint32_t cru_plls_con_save[END_PLL_ID][PLL_CON_CNT];
75 	uint32_t cpu_qos[CPU_AXI_QOS_NUM_REGS];
76 	uint32_t gpu_qos[CPU_AXI_QOS_NUM_REGS];
77 	uint32_t isp_128m_qos[CPU_AXI_QOS_NUM_REGS];
78 	uint32_t isp_rd_qos[CPU_AXI_QOS_NUM_REGS];
79 	uint32_t isp_wr_qos[CPU_AXI_QOS_NUM_REGS];
80 	uint32_t isp_m1_qos[CPU_AXI_QOS_NUM_REGS];
81 	uint32_t vip_qos[CPU_AXI_QOS_NUM_REGS];
82 	uint32_t rga_rd_qos[CPU_AXI_QOS_NUM_REGS];
83 	uint32_t rga_wr_qos[CPU_AXI_QOS_NUM_REGS];
84 	uint32_t vop_m0_qos[CPU_AXI_QOS_NUM_REGS];
85 	uint32_t vop_m1_qos[CPU_AXI_QOS_NUM_REGS];
86 	uint32_t vpu_qos[CPU_AXI_QOS_NUM_REGS];
87 	uint32_t vpu_r128_qos[CPU_AXI_QOS_NUM_REGS];
88 	uint32_t dcf_qos[CPU_AXI_QOS_NUM_REGS];
89 	uint32_t dmac_qos[CPU_AXI_QOS_NUM_REGS];
90 	uint32_t crypto_qos[CPU_AXI_QOS_NUM_REGS];
91 	uint32_t gmac_qos[CPU_AXI_QOS_NUM_REGS];
92 	uint32_t emmc_qos[CPU_AXI_QOS_NUM_REGS];
93 	uint32_t nand_qos[CPU_AXI_QOS_NUM_REGS];
94 	uint32_t sdio_qos[CPU_AXI_QOS_NUM_REGS];
95 	uint32_t sfc_qos[CPU_AXI_QOS_NUM_REGS];
96 	uint32_t sdmmc_qos[CPU_AXI_QOS_NUM_REGS];
97 	uint32_t usb_host_qos[CPU_AXI_QOS_NUM_REGS];
98 	uint32_t usb_otg_qos[CPU_AXI_QOS_NUM_REGS];
99 };
100 
101 static struct px30_sleep_ddr_data ddr_data
102 #if USE_COHERENT_MEM
103 __attribute__ ((section("tzfw_coherent_mem")))
104 #endif
105 ;
106 
107 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
108 {
109 	assert(cpu_id < PLATFORM_CORE_COUNT);
110 	return cores_pd_cfg_info[cpu_id];
111 }
112 
113 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
114 {
115 	assert(cpu_id < PLATFORM_CORE_COUNT);
116 	cores_pd_cfg_info[cpu_id] = value;
117 #if !USE_COHERENT_MEM
118 	flush_dcache_range((uintptr_t)&cores_pd_cfg_info[cpu_id],
119 			   sizeof(uint32_t));
120 #endif
121 }
122 
123 static inline uint32_t pmu_power_domain_st(uint32_t pd)
124 {
125 	return mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd) ?
126 	       pmu_pd_off :
127 	       pmu_pd_on;
128 }
129 
130 static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state)
131 {
132 	uint32_t loop = 0;
133 	int ret = 0;
134 
135 	rockchip_pd_lock_get();
136 
137 	mmio_write_32(PMU_BASE + PMU_PWRDN_CON,
138 		      BITS_WITH_WMASK(pd_state, 0x1, pd));
139 	dsb();
140 
141 	while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) {
142 		udelay(1);
143 		loop++;
144 	}
145 
146 	if (pmu_power_domain_st(pd) != pd_state) {
147 		WARN("%s: %d, %d, error!\n", __func__, pd, pd_state);
148 		ret = -EINVAL;
149 	}
150 
151 	rockchip_pd_lock_rls();
152 
153 	return ret;
154 }
155 
156 static inline uint32_t pmu_bus_idle_st(uint32_t bus)
157 {
158 	return !!((mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus)) &&
159 		  (mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & BIT(bus + 16)));
160 }
161 
162 static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
163 {
164 	uint32_t wait_cnt = 0;
165 
166 	mmio_write_32(PMU_BASE + PMU_BUS_IDLE_REQ,
167 		      BITS_WITH_WMASK(state, 0x1, bus));
168 
169 	while (pmu_bus_idle_st(bus) != state &&
170 	       wait_cnt < BUS_IDLE_LOOP) {
171 		udelay(1);
172 		wait_cnt++;
173 	}
174 
175 	if (pmu_bus_idle_st(bus) != state)
176 		WARN("%s:idle_st=0x%x, bus_id=%d\n",
177 		     __func__, mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), bus);
178 }
179 
180 static void qos_save(void)
181 {
182 	/* scu powerdomain will power off, so cpu qos should be saved */
183 	SAVE_QOS(ddr_data.cpu_qos, CPU);
184 
185 	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
186 		SAVE_QOS(ddr_data.gpu_qos, GPU);
187 	if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
188 		SAVE_QOS(ddr_data.isp_128m_qos, ISP_128M);
189 		SAVE_QOS(ddr_data.isp_rd_qos, ISP_RD);
190 		SAVE_QOS(ddr_data.isp_wr_qos, ISP_WR);
191 		SAVE_QOS(ddr_data.isp_m1_qos, ISP_M1);
192 		SAVE_QOS(ddr_data.vip_qos, VIP);
193 	}
194 	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
195 		SAVE_QOS(ddr_data.rga_rd_qos, RGA_RD);
196 		SAVE_QOS(ddr_data.rga_wr_qos, RGA_WR);
197 		SAVE_QOS(ddr_data.vop_m0_qos, VOP_M0);
198 		SAVE_QOS(ddr_data.vop_m1_qos, VOP_M1);
199 	}
200 	if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
201 		SAVE_QOS(ddr_data.vpu_qos, VPU);
202 		SAVE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
203 	}
204 	if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
205 		SAVE_QOS(ddr_data.emmc_qos, EMMC);
206 		SAVE_QOS(ddr_data.nand_qos, NAND);
207 		SAVE_QOS(ddr_data.sdio_qos, SDIO);
208 		SAVE_QOS(ddr_data.sfc_qos, SFC);
209 	}
210 	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
211 		SAVE_QOS(ddr_data.gmac_qos, GMAC);
212 	if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
213 		SAVE_QOS(ddr_data.crypto_qos, CRYPTO);
214 	if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
215 		SAVE_QOS(ddr_data.sdmmc_qos, SDMMC);
216 	if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
217 		SAVE_QOS(ddr_data.usb_host_qos, USB_HOST);
218 		SAVE_QOS(ddr_data.usb_otg_qos, USB_OTG);
219 	}
220 }
221 
222 static void qos_restore(void)
223 {
224 	RESTORE_QOS(ddr_data.cpu_qos, CPU);
225 
226 	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
227 		RESTORE_QOS(ddr_data.gpu_qos, GPU);
228 	if (pmu_power_domain_st(PD_VI) == pmu_pd_on) {
229 		RESTORE_QOS(ddr_data.isp_128m_qos, ISP_128M);
230 		RESTORE_QOS(ddr_data.isp_rd_qos, ISP_RD);
231 		RESTORE_QOS(ddr_data.isp_wr_qos, ISP_WR);
232 		RESTORE_QOS(ddr_data.isp_m1_qos, ISP_M1);
233 		RESTORE_QOS(ddr_data.vip_qos, VIP);
234 	}
235 	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
236 		RESTORE_QOS(ddr_data.rga_rd_qos, RGA_RD);
237 		RESTORE_QOS(ddr_data.rga_wr_qos, RGA_WR);
238 		RESTORE_QOS(ddr_data.vop_m0_qos, VOP_M0);
239 		RESTORE_QOS(ddr_data.vop_m1_qos, VOP_M1);
240 	}
241 	if (pmu_power_domain_st(PD_VPU) == pmu_pd_on) {
242 		RESTORE_QOS(ddr_data.vpu_qos, VPU);
243 		RESTORE_QOS(ddr_data.vpu_r128_qos, VPU_R128);
244 	}
245 	if (pmu_power_domain_st(PD_MMC_NAND) == pmu_pd_on) {
246 		RESTORE_QOS(ddr_data.emmc_qos, EMMC);
247 		RESTORE_QOS(ddr_data.nand_qos, NAND);
248 		RESTORE_QOS(ddr_data.sdio_qos, SDIO);
249 		RESTORE_QOS(ddr_data.sfc_qos, SFC);
250 	}
251 	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
252 		RESTORE_QOS(ddr_data.gmac_qos, GMAC);
253 	if (pmu_power_domain_st(PD_CRYPTO) == pmu_pd_on)
254 		RESTORE_QOS(ddr_data.crypto_qos, CRYPTO);
255 	if (pmu_power_domain_st(PD_SDCARD) == pmu_pd_on)
256 		RESTORE_QOS(ddr_data.sdmmc_qos, SDMMC);
257 	if (pmu_power_domain_st(PD_USB) == pmu_pd_on) {
258 		RESTORE_QOS(ddr_data.usb_host_qos, USB_HOST);
259 		RESTORE_QOS(ddr_data.usb_otg_qos, USB_OTG);
260 	}
261 }
262 
263 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
264 {
265 	uint32_t state;
266 
267 	if (pmu_power_domain_st(pd_id) == pd_state)
268 		goto out;
269 
270 	if (pd_state == pmu_pd_on)
271 		pmu_power_domain_ctr(pd_id, pd_state);
272 
273 	state = (pd_state == pmu_pd_off) ? bus_idle : bus_active;
274 
275 	switch (pd_id) {
276 	case PD_GPU:
277 		pmu_bus_idle_req(BUS_ID_GPU, state);
278 		break;
279 	case PD_VI:
280 		pmu_bus_idle_req(BUS_ID_VI, state);
281 		break;
282 	case PD_VO:
283 		pmu_bus_idle_req(BUS_ID_VO, state);
284 		break;
285 	case PD_VPU:
286 		pmu_bus_idle_req(BUS_ID_VPU, state);
287 		break;
288 	case PD_MMC_NAND:
289 		pmu_bus_idle_req(BUS_ID_MMC, state);
290 		break;
291 	case PD_GMAC:
292 		pmu_bus_idle_req(BUS_ID_GMAC, state);
293 		break;
294 	case PD_CRYPTO:
295 		pmu_bus_idle_req(BUS_ID_CRYPTO, state);
296 		break;
297 	case PD_SDCARD:
298 		pmu_bus_idle_req(BUS_ID_SDCARD, state);
299 		break;
300 	case PD_USB:
301 		pmu_bus_idle_req(BUS_ID_USB, state);
302 		break;
303 	default:
304 		break;
305 	}
306 
307 	if (pd_state == pmu_pd_off)
308 		pmu_power_domain_ctr(pd_id, pd_state);
309 
310 out:
311 	return 0;
312 }
313 
314 static uint32_t pmu_powerdomain_state;
315 
316 static void pmu_power_domains_suspend(void)
317 {
318 	uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
319 
320 	clk_gate_con_save(clkgt_save);
321 	clk_gate_con_disable();
322 	qos_save();
323 
324 	pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
325 	pmu_set_power_domain(PD_GPU, pmu_pd_off);
326 	pmu_set_power_domain(PD_VI, pmu_pd_off);
327 	pmu_set_power_domain(PD_VO, pmu_pd_off);
328 	pmu_set_power_domain(PD_VPU, pmu_pd_off);
329 	pmu_set_power_domain(PD_MMC_NAND, pmu_pd_off);
330 	pmu_set_power_domain(PD_GMAC, pmu_pd_off);
331 	pmu_set_power_domain(PD_CRYPTO, pmu_pd_off);
332 	pmu_set_power_domain(PD_SDCARD, pmu_pd_off);
333 	pmu_set_power_domain(PD_USB, pmu_pd_off);
334 
335 	clk_gate_con_restore(clkgt_save);
336 }
337 
338 static void pmu_power_domains_resume(void)
339 {
340 	uint32_t clkgt_save[CRU_CLKGATES_CON_CNT + CRU_PMU_CLKGATE_CON_CNT];
341 
342 	clk_gate_con_save(clkgt_save);
343 	clk_gate_con_disable();
344 
345 	if (!(pmu_powerdomain_state & BIT(PD_USB)))
346 		pmu_set_power_domain(PD_USB, pmu_pd_on);
347 	if (!(pmu_powerdomain_state & BIT(PD_SDCARD)))
348 		pmu_set_power_domain(PD_SDCARD, pmu_pd_on);
349 	if (!(pmu_powerdomain_state & BIT(PD_CRYPTO)))
350 		pmu_set_power_domain(PD_CRYPTO, pmu_pd_on);
351 	if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
352 		pmu_set_power_domain(PD_GMAC, pmu_pd_on);
353 	if (!(pmu_powerdomain_state & BIT(PD_MMC_NAND)))
354 		pmu_set_power_domain(PD_MMC_NAND, pmu_pd_on);
355 	if (!(pmu_powerdomain_state & BIT(PD_VPU)))
356 		pmu_set_power_domain(PD_VPU, pmu_pd_on);
357 	if (!(pmu_powerdomain_state & BIT(PD_VO)))
358 		pmu_set_power_domain(PD_VO, pmu_pd_on);
359 	if (!(pmu_powerdomain_state & BIT(PD_VI)))
360 		pmu_set_power_domain(PD_VI, pmu_pd_on);
361 	if (!(pmu_powerdomain_state & BIT(PD_GPU)))
362 		pmu_set_power_domain(PD_GPU, pmu_pd_on);
363 
364 	qos_restore();
365 	clk_gate_con_restore(clkgt_save);
366 }
367 
368 static int check_cpu_wfie(uint32_t cpu)
369 {
370 	uint32_t loop = 0, wfie_msk = CKECK_WFEI_MSK << cpu;
371 
372 	while (!(mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) &&
373 	       (loop < WFEI_CHECK_LOOP)) {
374 		udelay(1);
375 		loop++;
376 	}
377 
378 	if ((mmio_read_32(GRF_BASE + GRF_CPU_STATUS1) & wfie_msk) == 0) {
379 		WARN("%s: %d, %d, error!\n", __func__, cpu, wfie_msk);
380 		return -EINVAL;
381 	}
382 
383 	return 0;
384 }
385 
386 static int cpus_power_domain_on(uint32_t cpu_id)
387 {
388 	uint32_t cpu_pd, apm_value, cfg_info, loop = 0;
389 
390 	cpu_pd = PD_CPU0 + cpu_id;
391 	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
392 
393 	if (cfg_info == core_pwr_pd) {
394 		/* disable apm cfg */
395 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
396 			      WITH_16BITS_WMSK(CORES_PM_DISABLE));
397 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
398 			mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
399 				      WITH_16BITS_WMSK(CORES_PM_DISABLE));
400 			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
401 		}
402 		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
403 	} else {
404 		/* wait cpu down */
405 		while (pmu_power_domain_st(cpu_pd) == pmu_pd_on && loop < 100) {
406 			udelay(2);
407 			loop++;
408 		}
409 
410 		/* return error if can't wait cpu down */
411 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
412 			WARN("%s:can't wait cpu down\n", __func__);
413 			return -EINVAL;
414 		}
415 
416 		/* power up cpu in power down state */
417 		apm_value = BIT(core_pm_sft_wakeup_en);
418 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
419 			      WITH_16BITS_WMSK(apm_value));
420 	}
421 
422 	return 0;
423 }
424 
425 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
426 {
427 	uint32_t cpu_pd, apm_value;
428 
429 	cpu_pd = PD_CPU0 + cpu_id;
430 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
431 		return 0;
432 
433 	if (pd_cfg == core_pwr_pd) {
434 		if (check_cpu_wfie(cpu_id))
435 			return -EINVAL;
436 		/* disable apm cfg */
437 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
438 			      WITH_16BITS_WMSK(CORES_PM_DISABLE));
439 		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
440 		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
441 	} else {
442 		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
443 		apm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
444 		if (pd_cfg == core_pwr_wfi_int)
445 			apm_value |= BIT(core_pm_int_wakeup_en);
446 		mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
447 			      WITH_16BITS_WMSK(apm_value));
448 	}
449 
450 	return 0;
451 }
452 
453 static void nonboot_cpus_off(void)
454 {
455 	uint32_t boot_cpu, cpu;
456 
457 	boot_cpu = plat_my_core_pos();
458 
459 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
460 		if (cpu == boot_cpu)
461 			continue;
462 		cpus_power_domain_off(cpu, core_pwr_pd);
463 	}
464 }
465 
466 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,
467 				 uint64_t entrypoint)
468 {
469 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
470 
471 	assert(cpu_id < PLATFORM_CORE_COUNT);
472 	assert(cpuson_flags[cpu_id] == 0);
473 	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
474 	cpuson_entry_point[cpu_id] = entrypoint;
475 	dsb();
476 
477 	cpus_power_domain_on(cpu_id);
478 
479 	return PSCI_E_SUCCESS;
480 }
481 
482 int rockchip_soc_cores_pwr_dm_on_finish(void)
483 {
484 	uint32_t cpu_id = plat_my_core_pos();
485 
486 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
487 		      WITH_16BITS_WMSK(CORES_PM_DISABLE));
488 	return PSCI_E_SUCCESS;
489 }
490 
491 int rockchip_soc_cores_pwr_dm_off(void)
492 {
493 	uint32_t cpu_id = plat_my_core_pos();
494 
495 	cpus_power_domain_off(cpu_id, core_pwr_wfi);
496 
497 	return PSCI_E_SUCCESS;
498 }
499 
500 int rockchip_soc_cores_pwr_dm_suspend(void)
501 {
502 	uint32_t cpu_id = plat_my_core_pos();
503 
504 	assert(cpu_id < PLATFORM_CORE_COUNT);
505 	assert(cpuson_flags[cpu_id] == 0);
506 	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
507 	cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
508 	dsb();
509 
510 	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
511 
512 	return PSCI_E_SUCCESS;
513 }
514 
515 int rockchip_soc_cores_pwr_dm_resume(void)
516 {
517 	uint32_t cpu_id = plat_my_core_pos();
518 
519 	/* Disable core_pm */
520 	mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
521 		      WITH_16BITS_WMSK(CORES_PM_DISABLE));
522 
523 	return PSCI_E_SUCCESS;
524 }
525 
526 #define CLK_MSK_GATING(msk, con) \
527 	mmio_write_32(CRU_BASE + (con), ((msk) << 16) | 0xffff)
528 #define CLK_MSK_UNGATING(msk, con) \
529 	mmio_write_32(CRU_BASE + (con), ((~(msk)) << 16) | 0xffff)
530 
531 static uint32_t clk_ungt_msk[CRU_CLKGATES_CON_CNT] = {
532 	0xe0ff, 0xffff, 0x0000, 0x0000,
533 	0x0000, 0x0380, 0x0000, 0x0000,
534 	0x07c0, 0x0000, 0x0000, 0x000f,
535 	0x0061, 0x1f02, 0x0440, 0x1801,
536 	0x004b, 0x0000
537 };
538 
539 static uint32_t clk_pmu_ungt_msk[CRU_PMU_CLKGATE_CON_CNT] = {
540 	0xf1ff, 0x0310
541 };
542 
543 void clk_gate_suspend(void)
544 {
545 	int i;
546 
547 	for (i = 0; i < CRU_CLKGATES_CON_CNT; i++) {
548 		ddr_data.cru_clk_gate[i] =
549 			mmio_read_32(CRU_BASE + CRU_CLKGATES_CON(i));
550 			mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
551 				      WITH_16BITS_WMSK(~clk_ungt_msk[i]));
552 	}
553 
554 	for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++) {
555 		ddr_data.cru_pmu_clk_gate[i] =
556 			mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i));
557 			mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
558 				      WITH_16BITS_WMSK(~clk_pmu_ungt_msk[i]));
559 	}
560 }
561 
562 void clk_gate_resume(void)
563 {
564 	int i;
565 
566 	for (i = 0; i < CRU_PMU_CLKGATE_CON_CNT; i++)
567 		mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKGATES_CON(i),
568 			      WITH_16BITS_WMSK(ddr_data.cru_pmu_clk_gate[i]));
569 
570 	for (i = 0; i < CRU_CLKGATES_CON_CNT; i++)
571 		mmio_write_32(CRU_BASE + CRU_CLKGATES_CON(i),
572 			      WITH_16BITS_WMSK(ddr_data.cru_clk_gate[i]));
573 }
574 
575 static void pvtm_32k_config(void)
576 {
577 	uint32_t  pvtm_freq_khz, pvtm_div;
578 
579 	ddr_data.pmu_cru_clksel_con0 =
580 		mmio_read_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0));
581 
582 	ddr_data.pgrf_pvtm_con[0] =
583 		mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON0);
584 	ddr_data.pgrf_pvtm_con[1] =
585 		mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_CON1);
586 
587 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
588 		      BITS_WITH_WMASK(0, 0x3, pgrf_pvtm_st));
589 	dsb();
590 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
591 		      BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_en));
592 	dsb();
593 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1, PVTM_CALC_CNT);
594 	dsb();
595 
596 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
597 		      BITS_WITH_WMASK(1, 0x1, pgrf_pvtm_st));
598 
599 	/* pmugrf_pvtm_st0 will be clear after PVTM start,
600 	 * which will cost about 6 cycles of pvtm at least.
601 	 * So we wait 30 cycles of pvtm for security.
602 	 */
603 	while (mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) < 30)
604 		;
605 
606 	dsb();
607 	while (!(mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST0) & 0x1))
608 		;
609 
610 	pvtm_freq_khz =
611 		(mmio_read_32(PMUGRF_BASE + PMUGRF_PVTM_ST1) * 24000 +
612 		PVTM_CALC_CNT / 2) / PVTM_CALC_CNT;
613 	pvtm_div = (pvtm_freq_khz + 16) / 32;
614 
615 	/* pvtm_div = div_factor << 2 + 1,
616 	 * so div_factor = (pvtm_div - 1) >> 2.
617 	 * But the operation ">> 2" will clear the low bit of pvtm_div,
618 	 * so we don't have to do "- 1" for compasation
619 	 */
620 	pvtm_div = pvtm_div >> 2;
621 	if (pvtm_div > 0x3f)
622 		pvtm_div = 0x3f;
623 
624 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
625 		      BITS_WITH_WMASK(pvtm_div, 0x3f, pgrf_pvtm_div));
626 
627 	/* select pvtm as 32k source */
628 	mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
629 		      BITS_WITH_WMASK(1, 0x3U, 14));
630 }
631 
632 static void pvtm_32k_config_restore(void)
633 {
634 	mmio_write_32(PMUCRU_BASE + CRU_PMU_CLKSELS_CON(0),
635 		      ddr_data.pmu_cru_clksel_con0 | BITS_WMSK(0x3U, 14));
636 
637 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON0,
638 		      WITH_16BITS_WMSK(ddr_data.pgrf_pvtm_con[0]));
639 	mmio_write_32(PMUGRF_BASE + PMUGRF_PVTM_CON1,
640 		      ddr_data.pgrf_pvtm_con[1]);
641 }
642 
643 static void ddr_sleep_config(void)
644 {
645 	/* disable ddr pd, sr */
646 	ddr_data.ddrc_pwrctrl = mmio_read_32(DDR_UPCTL_BASE + 0x30);
647 	mmio_write_32(DDR_UPCTL_BASE + 0x30, BITS_WITH_WMASK(0x0, 0x3, 0));
648 
649 	/* disable ddr auto gt */
650 	ddr_data.ddrgrf_con1 = mmio_read_32(DDRGRF_BASE + 0x4);
651 	mmio_write_32(DDRGRF_BASE + 0x4, BITS_WITH_WMASK(0x0, 0x1f, 0));
652 
653 	/* disable ddr standby */
654 	ddr_data.ddrstdby_con0 = mmio_read_32(DDR_STDBY_BASE + 0x0);
655 	mmio_write_32(DDR_STDBY_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 0));
656 	while ((mmio_read_32(DDR_UPCTL_BASE + 0x4) & 0x7) != 1)
657 		;
658 
659 	/* ddr pmu ctrl */
660 	ddr_data.ddrgrf_con0 = mmio_read_32(DDRGRF_BASE + 0x0);
661 	mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x0, 0x1, 5));
662 	dsb();
663 	mmio_write_32(DDRGRF_BASE + 0x0, BITS_WITH_WMASK(0x1, 0x1, 4));
664 
665 	/* ddr ret sel */
666 	ddr_data.pmugrf_soc_con0 =
667 		mmio_read_32(PMUGRF_BASE + PMUGRF_SOC_CON(0));
668 	mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
669 		      BITS_WITH_WMASK(0x0, 0x1, 12));
670 }
671 
672 static void ddr_sleep_config_restore(void)
673 {
674 	/* restore ddr ret sel */
675 	mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON(0),
676 		      ddr_data.pmugrf_soc_con0 | BITS_WMSK(0x1, 12));
677 
678 	/* restore ddr pmu ctrl */
679 	mmio_write_32(DDRGRF_BASE + 0x0,
680 		      ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 4));
681 	dsb();
682 	mmio_write_32(DDRGRF_BASE + 0x0,
683 		      ddr_data.ddrgrf_con0 | BITS_WMSK(0x1, 5));
684 
685 	/* restore ddr standby */
686 	mmio_write_32(DDR_STDBY_BASE + 0x0,
687 		      ddr_data.ddrstdby_con0 | BITS_WMSK(0x1, 0));
688 
689 	/* restore ddr auto gt */
690 	mmio_write_32(DDRGRF_BASE + 0x4,
691 		      ddr_data.ddrgrf_con1 | BITS_WMSK(0x1f, 0));
692 
693 	/* restore ddr pd, sr */
694 	mmio_write_32(DDR_UPCTL_BASE + 0x30,
695 		      ddr_data.ddrc_pwrctrl | BITS_WMSK(0x3, 0));
696 }
697 
698 static void pmu_sleep_config(void)
699 {
700 	uint32_t pwrmd_core_lo, pwrmd_core_hi, pwrmd_com_lo, pwrmd_com_hi;
701 	uint32_t pmu_wkup_cfg2_lo;
702 	uint32_t clk_freq_khz;
703 
704 	/* save pmic_sleep iomux gpio0_a4 */
705 	ddr_data.pmic_slp_iomux = mmio_read_32(PMUGRF_BASE + GPIO0A_IOMUX);
706 
707 	ddr_data.pmu_pwrmd_core_l =
708 			mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_LO);
709 	ddr_data.pmu_pwrmd_core_h =
710 			mmio_read_32(PMU_BASE + PMU_PWRMODE_CORE_HI);
711 	ddr_data.pmu_pwrmd_cmm_l =
712 			mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO);
713 	ddr_data.pmu_pwrmd_cmm_h =
714 			mmio_read_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI);
715 	ddr_data.pmu_wkup_cfg2_l = mmio_read_32(PMU_BASE + PMU_WKUP_CFG2_LO);
716 
717 	pwrmd_core_lo = BIT(pmu_global_int_dis) |
718 			BIT(pmu_core_src_gt) |
719 			BIT(pmu_cpu0_pd) |
720 			BIT(pmu_clr_core) |
721 			BIT(pmu_scu_pd) |
722 			BIT(pmu_l2_idle) |
723 			BIT(pmu_l2_flush) |
724 			BIT(pmu_clr_bus2main) |
725 			BIT(pmu_clr_peri2msch);
726 
727 	pwrmd_core_hi = BIT(pmu_dpll_pd_en) |
728 			BIT(pmu_apll_pd_en) |
729 			BIT(pmu_cpll_pd_en) |
730 			BIT(pmu_gpll_pd_en) |
731 			BIT(pmu_npll_pd_en);
732 
733 	pwrmd_com_lo = BIT(pmu_mode_en) |
734 		       BIT(pmu_pll_pd) |
735 		       BIT(pmu_pmu_use_if) |
736 		       BIT(pmu_alive_use_if) |
737 		       BIT(pmu_osc_dis) |
738 		       BIT(pmu_sref_enter) |
739 		       BIT(pmu_ddrc_gt) |
740 		       BIT(pmu_clr_pmu) |
741 		       BIT(pmu_clr_peri_pmu);
742 
743 	pwrmd_com_hi = BIT(pmu_clr_bus) |
744 		       BIT(pmu_clr_msch) |
745 		       BIT(pmu_wakeup_begin_cfg);
746 
747 	pmu_wkup_cfg2_lo = BIT(pmu_cluster_wkup_en) |
748 			   BIT(pmu_gpio_wkup_en) |
749 			   BIT(pmu_timer_wkup_en);
750 
751 	/* set pmic_sleep iomux gpio0_a4 */
752 	mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
753 		      BITS_WITH_WMASK(1, 0x3, 8));
754 
755 	clk_freq_khz = 32;
756 
757 	mmio_write_32(PMU_BASE + PMU_OSC_CNT_LO,
758 		      WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
759 	mmio_write_32(PMU_BASE + PMU_OSC_CNT_HI,
760 		      WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
761 
762 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT_LO,
763 		      WITH_16BITS_WMSK(clk_freq_khz * 32 & 0xffff));
764 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT_HI,
765 		      WITH_16BITS_WMSK(clk_freq_khz * 32 >> 16));
766 
767 	mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_LO,
768 		      WITH_16BITS_WMSK(clk_freq_khz * 2 & 0xffff));
769 	mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_HI,
770 		      WITH_16BITS_WMSK(clk_freq_khz * 2 >> 16));
771 
772 	/* Pmu's clk has switched to 24M back When pmu FSM counts
773 	 * the follow counters, so we should use 24M to calculate
774 	 * these counters.
775 	 */
776 	mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_LO,
777 		      WITH_16BITS_WMSK(24000 * 2 & 0xffff));
778 	mmio_write_32(PMU_BASE + PMU_SCU_PWRDN_CNT_HI,
779 		      WITH_16BITS_WMSK(24000 * 2 >> 16));
780 
781 	mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_LO,
782 		      WITH_16BITS_WMSK(24000 * 2 & 0xffff));
783 	mmio_write_32(PMU_BASE + PMU_SCU_PWRUP_CNT_HI,
784 		      WITH_16BITS_WMSK(24000 * 2 >> 16));
785 
786 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_LO,
787 		      WITH_16BITS_WMSK(24000 * 5 & 0xffff));
788 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT_HI,
789 		      WITH_16BITS_WMSK(24000 * 5 >> 16));
790 
791 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_LO,
792 		      WITH_16BITS_WMSK(24000 * 2 & 0xffff));
793 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT_HI,
794 		      WITH_16BITS_WMSK(24000 * 2 >> 16));
795 
796 	/* Config pmu power mode and pmu wakeup source */
797 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
798 		      WITH_16BITS_WMSK(pwrmd_core_lo));
799 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
800 		      WITH_16BITS_WMSK(pwrmd_core_hi));
801 
802 	mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
803 		      WITH_16BITS_WMSK(pwrmd_com_lo));
804 	mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
805 		      WITH_16BITS_WMSK(pwrmd_com_hi));
806 
807 	mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
808 		      WITH_16BITS_WMSK(pmu_wkup_cfg2_lo));
809 }
810 
811 static void pmu_sleep_restore(void)
812 {
813 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_LO,
814 		      WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_l));
815 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CORE_HI,
816 		      WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_core_h));
817 	mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_LO,
818 		      WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_l));
819 	mmio_write_32(PMU_BASE + PMU_PWRMODE_COMMON_CON_HI,
820 		      WITH_16BITS_WMSK(ddr_data.pmu_pwrmd_cmm_h));
821 	mmio_write_32(PMU_BASE + PMU_WKUP_CFG2_LO,
822 		      WITH_16BITS_WMSK(ddr_data.pmu_wkup_cfg2_l));
823 
824 	/* restore pmic_sleep iomux */
825 	mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX,
826 		      WITH_16BITS_WMSK(ddr_data.pmic_slp_iomux));
827 }
828 
829 static void soc_sleep_config(void)
830 {
831 	ddr_data.gpio0c_iomux = mmio_read_32(PMUGRF_BASE + GPIO0C_IOMUX);
832 
833 	pmu_sleep_config();
834 
835 	ddr_sleep_config();
836 
837 	pvtm_32k_config();
838 }
839 
840 static void soc_sleep_restore(void)
841 {
842 	secure_timer_init();
843 
844 	pvtm_32k_config_restore();
845 
846 	ddr_sleep_config_restore();
847 
848 	pmu_sleep_restore();
849 
850 	mmio_write_32(PMUGRF_BASE + GPIO0C_IOMUX,
851 		      WITH_16BITS_WMSK(ddr_data.gpio0c_iomux));
852 }
853 
854 static inline void pm_pll_wait_lock(uint32_t pll_base, uint32_t pll_id)
855 {
856 	uint32_t delay = PLL_LOCKED_TIMEOUT;
857 
858 	while (delay > 0) {
859 		if (mmio_read_32(pll_base + PLL_CON(1)) &
860 		    PLL_LOCK_MSK)
861 			break;
862 		delay--;
863 	}
864 
865 	if (delay == 0)
866 		ERROR("Can't wait pll:%d lock\n", pll_id);
867 }
868 
869 static inline void pll_pwr_ctr(uint32_t pll_base, uint32_t pll_id, uint32_t pd)
870 {
871 	mmio_write_32(pll_base + PLL_CON(1),
872 		      BITS_WITH_WMASK(1, 1U, 15));
873 	if (pd)
874 		mmio_write_32(pll_base + PLL_CON(1),
875 			      BITS_WITH_WMASK(1, 1, 14));
876 	else
877 		mmio_write_32(pll_base + PLL_CON(1),
878 			      BITS_WITH_WMASK(0, 1, 14));
879 }
880 
881 static inline void pll_set_mode(uint32_t pll_id, uint32_t mode)
882 {
883 	uint32_t val = BITS_WITH_WMASK(mode, 0x3, PLL_MODE_SHIFT(pll_id));
884 
885 	if (pll_id != GPLL_ID)
886 		mmio_write_32(CRU_BASE + CRU_MODE, val);
887 	else
888 		mmio_write_32(PMUCRU_BASE + CRU_PMU_MODE,
889 			      BITS_WITH_WMASK(mode, 0x3, 0));
890 }
891 
892 static inline void pll_suspend(uint32_t pll_id)
893 {
894 	int i;
895 	uint32_t pll_base;
896 
897 	if (pll_id != GPLL_ID)
898 		pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
899 	else
900 		pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
901 
902 	/* save pll con */
903 	for (i = 0; i < PLL_CON_CNT; i++)
904 		ddr_data.cru_plls_con_save[pll_id][i] =
905 				mmio_read_32(pll_base + PLL_CON(i));
906 
907 	/* slow mode */
908 	pll_set_mode(pll_id, SLOW_MODE);
909 }
910 
911 static inline void pll_resume(uint32_t pll_id)
912 {
913 	uint32_t mode, pll_base;
914 
915 	if (pll_id != GPLL_ID) {
916 		pll_base = CRU_BASE + CRU_PLL_CONS(pll_id, 0);
917 		mode = (ddr_data.cru_mode_save >> PLL_MODE_SHIFT(pll_id)) & 0x3;
918 	} else {
919 		pll_base = PMUCRU_BASE + CRU_PLL_CONS(0, 0);
920 		mode = ddr_data.cru_pmu_mode_save & 0x3;
921 	}
922 
923 	/* if pll locked before suspend, we should wait atfer resume */
924 	if (ddr_data.cru_plls_con_save[pll_id][1] & PLL_LOCK_MSK)
925 		pm_pll_wait_lock(pll_base, pll_id);
926 
927 	pll_set_mode(pll_id, mode);
928 }
929 
930 static void pm_plls_suspend(void)
931 {
932 	ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_MODE);
933 	ddr_data.cru_pmu_mode_save = mmio_read_32(PMUCRU_BASE + CRU_PMU_MODE);
934 	ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSELS_CON(0));
935 
936 	pll_suspend(GPLL_ID);
937 	pll_suspend(NPLL_ID);
938 	pll_suspend(CPLL_ID);
939 	pll_suspend(APLL_ID);
940 
941 	/* core */
942 	mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
943 		      BITS_WITH_WMASK(0, 0xf, 0));
944 
945 	/* pclk_dbg */
946 	mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
947 		      BITS_WITH_WMASK(0, 0xf, 8));
948 }
949 
950 static void pm_plls_resume(void)
951 {
952 	/* pclk_dbg */
953 	mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
954 		      ddr_data.clk_sel0 | BITS_WMSK(0xf, 8));
955 
956 	/* core */
957 	mmio_write_32(CRU_BASE + CRU_CLKSELS_CON(0),
958 		      ddr_data.clk_sel0 | BITS_WMSK(0xf, 0));
959 
960 	pll_resume(APLL_ID);
961 	pll_resume(CPLL_ID);
962 	pll_resume(NPLL_ID);
963 	pll_resume(GPLL_ID);
964 }
965 
966 int rockchip_soc_sys_pwr_dm_suspend(void)
967 {
968 	pmu_power_domains_suspend();
969 
970 	clk_gate_suspend();
971 
972 	soc_sleep_config();
973 
974 	pm_plls_suspend();
975 
976 	psram_boot_cfg->pm_flag &= ~PM_WARM_BOOT_BIT;
977 
978 	return 0;
979 }
980 
981 int rockchip_soc_sys_pwr_dm_resume(void)
982 {
983 	psram_boot_cfg->pm_flag |= PM_WARM_BOOT_BIT;
984 
985 	pm_plls_resume();
986 
987 	soc_sleep_restore();
988 
989 	clk_gate_resume();
990 
991 	pmu_power_domains_resume();
992 
993 	plat_rockchip_gic_cpuif_enable();
994 
995 	return 0;
996 }
997 
998 void __dead2 rockchip_soc_soft_reset(void)
999 {
1000 	pll_set_mode(GPLL_ID, SLOW_MODE);
1001 	pll_set_mode(CPLL_ID, SLOW_MODE);
1002 	pll_set_mode(NPLL_ID, SLOW_MODE);
1003 	pll_set_mode(APLL_ID, SLOW_MODE);
1004 	dsb();
1005 
1006 	mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
1007 	dsb();
1008 
1009 	/*
1010 	 * Maybe the HW needs some times to reset the system,
1011 	 * so we do not hope the core to execute valid codes.
1012 	 */
1013 	psci_power_down_wfi();
1014 }
1015 
1016 void __dead2 rockchip_soc_system_off(void)
1017 {
1018 	uint32_t val;
1019 
1020 	/* set pmic_sleep pin(gpio0_a4) to gpio mode */
1021 	mmio_write_32(PMUGRF_BASE + GPIO0A_IOMUX, BITS_WITH_WMASK(0, 0x3, 8));
1022 
1023 	/* config output */
1024 	val = mmio_read_32(GPIO0_BASE + SWPORTA_DDR);
1025 	val |= BIT(4);
1026 	mmio_write_32(GPIO0_BASE + SWPORTA_DDR, val);
1027 
1028 	/* config output high level */
1029 	val = mmio_read_32(GPIO0_BASE);
1030 	val |= BIT(4);
1031 	mmio_write_32(GPIO0_BASE, val);
1032 	dsb();
1033 
1034 	/*
1035 	 * Maybe the HW needs some times to reset the system,
1036 	 * so we do not hope the core to execute valid codes.
1037 	 */
1038 	psci_power_down_wfi();
1039 }
1040 
1041 void rockchip_plat_mmu_el3(void)
1042 {
1043 	/* TODO: support the el3 for px30 SoCs */
1044 }
1045 
1046 void plat_rockchip_pmu_init(void)
1047 {
1048 	uint32_t cpu;
1049 
1050 	rockchip_pd_lock_init();
1051 
1052 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
1053 		cpuson_flags[cpu] = 0;
1054 
1055 	psram_boot_cfg->ddr_func = (uint64_t)0;
1056 	psram_boot_cfg->ddr_data = (uint64_t)0;
1057 	psram_boot_cfg->sp = PSRAM_SP_TOP;
1058 	psram_boot_cfg->ddr_flag = 0x0;
1059 	psram_boot_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
1060 	psram_boot_cfg->pm_flag = PM_WARM_BOOT_BIT;
1061 
1062 	nonboot_cpus_off();
1063 
1064 	/* Remap pmu_sram's base address to boot address */
1065 	mmio_write_32(PMUSGRF_BASE + PMUSGRF_SOC_CON(0),
1066 		      BITS_WITH_WMASK(1, 0x1, 13));
1067 
1068 	INFO("%s: pd status %x\n",
1069 	     __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
1070 }
1071