xref: /rk3399_ARM-atf/plat/rockchip/rk3399/drivers/pmu/pmu.c (revision 12ab697e8f91a67a439e6172621b905753d61f46)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch_helpers.h>
32 #include <assert.h>
33 #include <bakery_lock.h>
34 #include <debug.h>
35 #include <delay_timer.h>
36 #include <errno.h>
37 #include <gpio.h>
38 #include <mmio.h>
39 #include <platform.h>
40 #include <platform_def.h>
41 #include <plat_params.h>
42 #include <plat_private.h>
43 #include <rk3399_def.h>
44 #include <pmu_sram.h>
45 #include <soc.h>
46 #include <pmu.h>
47 #include <pmu_com.h>
48 
49 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
50 
51 static struct psram_data_t *psram_sleep_cfg =
52 	(struct psram_data_t *)PSRAM_DT_BASE;
53 
54 static uint32_t cpu_warm_boot_addr;
55 
56 /*
57  * There are two ways to powering on or off on core.
58  * 1) Control it power domain into on or off in PMU_PWRDN_CON reg,
59  *    it is core_pwr_pd mode
60  * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
61  *     then, if the core enter into wfi, it power domain will be
62  *     powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode
63  * so we need core_pm_cfg_info to distinguish which method be used now.
64  */
65 
66 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT]
67 #if USE_COHERENT_MEM
68 __attribute__ ((section("tzfw_coherent_mem")))
69 #endif
70 ;/* coheront */
71 
72 static void pmu_bus_idle_req(uint32_t bus, uint32_t state)
73 {
74 	uint32_t bus_id = BIT(bus);
75 	uint32_t bus_req;
76 	uint32_t wait_cnt = 0;
77 	uint32_t bus_state, bus_ack;
78 
79 	if (state)
80 		bus_req = BIT(bus);
81 	else
82 		bus_req = 0;
83 
84 	mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req);
85 
86 	do {
87 		bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id;
88 		bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id;
89 		wait_cnt++;
90 	} while ((bus_state != bus_req || bus_ack != bus_req) &&
91 		 (wait_cnt < MAX_WAIT_COUNT));
92 
93 	if (bus_state != bus_req || bus_ack != bus_req) {
94 		INFO("%s:st=%x(%x)\n", __func__,
95 		     mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST),
96 		     bus_state);
97 		INFO("%s:st=%x(%x)\n", __func__,
98 		     mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK),
99 		     bus_ack);
100 	}
101 
102 }
103 
104 struct pmu_slpdata_s pmu_slpdata;
105 
106 static void qos_save(void)
107 {
108 	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
109 		RESTORE_QOS(pmu_slpdata.gpu_qos, GPU);
110 	if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
111 		RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
112 		RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
113 	}
114 	if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
115 		RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
116 		RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
117 	}
118 	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
119 		RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
120 		RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
121 		RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
122 	}
123 	if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
124 		RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP);
125 	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
126 		RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC);
127 	if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
128 		RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
129 		RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
130 	}
131 	if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
132 		RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
133 	if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
134 		RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC);
135 	if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
136 		RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO);
137 	if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
138 		RESTORE_QOS(pmu_slpdata.gic_qos, GIC);
139 	if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
140 		RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
141 		RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
142 	}
143 	if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
144 		RESTORE_QOS(pmu_slpdata.iep_qos, IEP);
145 	if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
146 		RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
147 		RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
148 	}
149 	if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
150 		RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
151 		RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
152 		RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
153 	}
154 	if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
155 		RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
156 		RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
157 		RESTORE_QOS(pmu_slpdata.dcf_qos, DCF);
158 		RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
159 		RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
160 		RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
161 		RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
162 		RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
163 	}
164 	if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
165 		RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
166 	if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
167 		RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
168 		RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
169 	}
170 }
171 
172 static void qos_restore(void)
173 {
174 	if (pmu_power_domain_st(PD_GPU) == pmu_pd_on)
175 		SAVE_QOS(pmu_slpdata.gpu_qos, GPU);
176 	if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) {
177 		SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0);
178 		SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1);
179 	}
180 	if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) {
181 		SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0);
182 		SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1);
183 	}
184 	if (pmu_power_domain_st(PD_VO) == pmu_pd_on) {
185 		SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R);
186 		SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W);
187 		SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE);
188 	}
189 	if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on)
190 		SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP);
191 	if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on)
192 		SAVE_QOS(pmu_slpdata.gmac_qos, GMAC);
193 	if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) {
194 		SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0);
195 		SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1);
196 	}
197 	if (pmu_power_domain_st(PD_SD) == pmu_pd_on)
198 		SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC);
199 	if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on)
200 		SAVE_QOS(pmu_slpdata.emmc_qos, EMMC);
201 	if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on)
202 		SAVE_QOS(pmu_slpdata.sdio_qos, SDIO);
203 	if (pmu_power_domain_st(PD_GIC) == pmu_pd_on)
204 		SAVE_QOS(pmu_slpdata.gic_qos, GIC);
205 	if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) {
206 		SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R);
207 		SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W);
208 	}
209 	if (pmu_power_domain_st(PD_IEP) == pmu_pd_on)
210 		SAVE_QOS(pmu_slpdata.iep_qos, IEP);
211 	if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) {
212 		SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0);
213 		SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1);
214 	}
215 	if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) {
216 		SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0);
217 		SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1);
218 		SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP);
219 	}
220 	if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) {
221 		SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0);
222 		SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1);
223 		SAVE_QOS(pmu_slpdata.dcf_qos, DCF);
224 		SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0);
225 		SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1);
226 		SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP);
227 		SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP);
228 		SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1);
229 	}
230 	if (pmu_power_domain_st(PD_VDU) == pmu_pd_on)
231 		SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0);
232 	if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) {
233 		SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R);
234 		SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W);
235 	}
236 }
237 
238 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state)
239 {
240 	uint32_t state;
241 
242 	if (pmu_power_domain_st(pd_id) == pd_state)
243 		goto out;
244 
245 	if (pd_state == pmu_pd_on)
246 		pmu_power_domain_ctr(pd_id, pd_state);
247 
248 	state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE;
249 
250 	switch (pd_id) {
251 	case PD_GPU:
252 		pmu_bus_idle_req(BUS_ID_GPU, state);
253 		break;
254 	case PD_VIO:
255 		pmu_bus_idle_req(BUS_ID_VIO, state);
256 		break;
257 	case PD_ISP0:
258 		pmu_bus_idle_req(BUS_ID_ISP0, state);
259 		break;
260 	case PD_ISP1:
261 		pmu_bus_idle_req(BUS_ID_ISP1, state);
262 		break;
263 	case PD_VO:
264 		pmu_bus_idle_req(BUS_ID_VOPB, state);
265 		pmu_bus_idle_req(BUS_ID_VOPL, state);
266 		break;
267 	case PD_HDCP:
268 		pmu_bus_idle_req(BUS_ID_HDCP, state);
269 		break;
270 	case PD_TCPD0:
271 		break;
272 	case PD_TCPD1:
273 		break;
274 	case PD_GMAC:
275 		pmu_bus_idle_req(BUS_ID_GMAC, state);
276 		break;
277 	case PD_CCI:
278 		pmu_bus_idle_req(BUS_ID_CCIM0, state);
279 		pmu_bus_idle_req(BUS_ID_CCIM1, state);
280 		break;
281 	case PD_SD:
282 		pmu_bus_idle_req(BUS_ID_SD, state);
283 		break;
284 	case PD_EMMC:
285 		pmu_bus_idle_req(BUS_ID_EMMC, state);
286 		break;
287 	case PD_EDP:
288 		pmu_bus_idle_req(BUS_ID_EDP, state);
289 		break;
290 	case PD_SDIOAUDIO:
291 		pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state);
292 		break;
293 	case PD_GIC:
294 		pmu_bus_idle_req(BUS_ID_GIC, state);
295 		break;
296 	case PD_RGA:
297 		pmu_bus_idle_req(BUS_ID_RGA, state);
298 		break;
299 	case PD_VCODEC:
300 		pmu_bus_idle_req(BUS_ID_VCODEC, state);
301 		break;
302 	case PD_VDU:
303 		pmu_bus_idle_req(BUS_ID_VDU, state);
304 		break;
305 	case PD_IEP:
306 		pmu_bus_idle_req(BUS_ID_IEP, state);
307 		break;
308 	case PD_USB3:
309 		pmu_bus_idle_req(BUS_ID_USB3, state);
310 		break;
311 	case PD_PERIHP:
312 		pmu_bus_idle_req(BUS_ID_PERIHP, state);
313 		break;
314 	default:
315 		break;
316 	}
317 
318 	if (pd_state == pmu_pd_off)
319 		pmu_power_domain_ctr(pd_id, pd_state);
320 
321 out:
322 	return 0;
323 }
324 
325 static uint32_t pmu_powerdomain_state;
326 
327 static void pmu_power_domains_suspend(void)
328 {
329 	clk_gate_con_save();
330 	clk_gate_con_disable();
331 	qos_save();
332 	pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
333 	pmu_set_power_domain(PD_GPU, pmu_pd_off);
334 	pmu_set_power_domain(PD_TCPD0, pmu_pd_off);
335 	pmu_set_power_domain(PD_TCPD1, pmu_pd_off);
336 	pmu_set_power_domain(PD_VO, pmu_pd_off);
337 	pmu_set_power_domain(PD_ISP0, pmu_pd_off);
338 	pmu_set_power_domain(PD_ISP1, pmu_pd_off);
339 	pmu_set_power_domain(PD_HDCP, pmu_pd_off);
340 	pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off);
341 	pmu_set_power_domain(PD_GMAC, pmu_pd_off);
342 	pmu_set_power_domain(PD_EDP, pmu_pd_off);
343 	pmu_set_power_domain(PD_IEP, pmu_pd_off);
344 	pmu_set_power_domain(PD_RGA, pmu_pd_off);
345 	pmu_set_power_domain(PD_VCODEC, pmu_pd_off);
346 	pmu_set_power_domain(PD_VDU, pmu_pd_off);
347 	clk_gate_con_restore();
348 }
349 
350 static void pmu_power_domains_resume(void)
351 {
352 	clk_gate_con_save();
353 	clk_gate_con_disable();
354 	if (!(pmu_powerdomain_state & BIT(PD_VDU)))
355 		pmu_set_power_domain(PD_VDU, pmu_pd_on);
356 	if (!(pmu_powerdomain_state & BIT(PD_VCODEC)))
357 		pmu_set_power_domain(PD_VCODEC, pmu_pd_on);
358 	if (!(pmu_powerdomain_state & BIT(PD_RGA)))
359 		pmu_set_power_domain(PD_RGA, pmu_pd_on);
360 	if (!(pmu_powerdomain_state & BIT(PD_IEP)))
361 		pmu_set_power_domain(PD_IEP, pmu_pd_on);
362 	if (!(pmu_powerdomain_state & BIT(PD_EDP)))
363 		pmu_set_power_domain(PD_EDP, pmu_pd_on);
364 	if (!(pmu_powerdomain_state & BIT(PD_GMAC)))
365 		pmu_set_power_domain(PD_GMAC, pmu_pd_on);
366 	if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO)))
367 		pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on);
368 	if (!(pmu_powerdomain_state & BIT(PD_HDCP)))
369 		pmu_set_power_domain(PD_HDCP, pmu_pd_on);
370 	if (!(pmu_powerdomain_state & BIT(PD_ISP1)))
371 		pmu_set_power_domain(PD_ISP1, pmu_pd_on);
372 	if (!(pmu_powerdomain_state & BIT(PD_ISP0)))
373 		pmu_set_power_domain(PD_ISP0, pmu_pd_on);
374 	if (!(pmu_powerdomain_state & BIT(PD_VO)))
375 		pmu_set_power_domain(PD_VO, pmu_pd_on);
376 	if (!(pmu_powerdomain_state & BIT(PD_TCPD1)))
377 		pmu_set_power_domain(PD_TCPD1, pmu_pd_on);
378 	if (!(pmu_powerdomain_state & BIT(PD_TCPD0)))
379 		pmu_set_power_domain(PD_TCPD0, pmu_pd_on);
380 	if (!(pmu_powerdomain_state & BIT(PD_GPU)))
381 		pmu_set_power_domain(PD_GPU, pmu_pd_on);
382 	qos_restore();
383 	clk_gate_con_restore();
384 }
385 
386 void rk3399_flash_l2_b(void)
387 {
388 	uint32_t wait_cnt = 0;
389 
390 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
391 	dsb();
392 
393 	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
394 		 BIT(L2_FLUSHDONE_CLUSTER_B))) {
395 		wait_cnt++;
396 		if (wait_cnt >= MAX_WAIT_COUNT)
397 			WARN("%s:reg %x,wait\n", __func__,
398 			     mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
399 	}
400 
401 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B));
402 }
403 
404 static void pmu_scu_b_pwrdn(void)
405 {
406 	uint32_t wait_cnt = 0;
407 
408 	if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
409 	     (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) !=
410 	     (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) {
411 		ERROR("%s: not all cpus is off\n", __func__);
412 		return;
413 	}
414 
415 	rk3399_flash_l2_b();
416 
417 	mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
418 
419 	while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) &
420 		 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) {
421 		wait_cnt++;
422 		if (wait_cnt >= MAX_WAIT_COUNT)
423 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
424 			      mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
425 	}
426 }
427 
428 static void pmu_scu_b_pwrup(void)
429 {
430 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG));
431 }
432 
433 void plat_rockchip_pmusram_prepare(void)
434 {
435 	uint32_t *sram_dst, *sram_src;
436 	size_t sram_size = 2;
437 
438 	/*
439 	 * pmu sram code and data prepare
440 	 */
441 	sram_dst = (uint32_t *)PMUSRAM_BASE;
442 	sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
443 	sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
444 		    (uint32_t *)sram_src;
445 
446 	u32_align_cpy(sram_dst, sram_src, sram_size);
447 
448 	psram_sleep_cfg->sp = PSRAM_DT_BASE;
449 }
450 
451 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
452 {
453 	assert(cpu_id < PLATFORM_CORE_COUNT);
454 	return core_pm_cfg_info[cpu_id];
455 }
456 
457 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value)
458 {
459 	assert(cpu_id < PLATFORM_CORE_COUNT);
460 	core_pm_cfg_info[cpu_id] = value;
461 #if !USE_COHERENT_MEM
462 	flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id],
463 			   sizeof(uint32_t));
464 #endif
465 }
466 
467 static int cpus_power_domain_on(uint32_t cpu_id)
468 {
469 	uint32_t cfg_info;
470 	uint32_t cpu_pd = PD_CPUL0 + cpu_id;
471 	/*
472 	  * There are two ways to powering on or off on core.
473 	  * 1) Control it power domain into on or off in PMU_PWRDN_CON reg
474 	  * 2) Enable the core power manage in PMU_CORE_PM_CON reg,
475 	  *     then, if the core enter into wfi, it power domain will be
476 	  *     powered off automatically.
477 	  */
478 
479 	cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
480 
481 	if (cfg_info == core_pwr_pd) {
482 		/* disable core_pm cfg */
483 		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
484 			      CORES_PM_DISABLE);
485 		/* if the cores have be on, power off it firstly */
486 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
487 			mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0);
488 			pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
489 		}
490 
491 		pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
492 	} else {
493 		if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
494 			WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
495 			return -EINVAL;
496 		}
497 
498 		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
499 			      BIT(core_pm_sft_wakeup_en));
500 		dsb();
501 	}
502 
503 	return 0;
504 }
505 
506 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
507 {
508 	uint32_t cpu_pd;
509 	uint32_t core_pm_value;
510 
511 	cpu_pd = PD_CPUL0 + cpu_id;
512 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
513 		return 0;
514 
515 	if (pd_cfg == core_pwr_pd) {
516 		if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
517 			return -EINVAL;
518 
519 		/* disable core_pm cfg */
520 		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
521 			      CORES_PM_DISABLE);
522 
523 		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
524 		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
525 	} else {
526 		set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg);
527 
528 		core_pm_value = BIT(core_pm_en);
529 		if (pd_cfg == core_pwr_wfi_int)
530 			core_pm_value |= BIT(core_pm_int_wakeup_en);
531 		mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
532 			      core_pm_value);
533 		dsb();
534 	}
535 
536 	return 0;
537 }
538 
539 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state)
540 {
541 	uint32_t cpu_id = plat_my_core_pos();
542 	uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st;
543 
544 	assert(cpu_id < PLATFORM_CORE_COUNT);
545 
546 	if (lvl_state == PLAT_MAX_RET_STATE  ||
547 	    lvl_state == PLAT_MAX_OFF_STATE) {
548 		if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) {
549 			pll_id = ALPLL_ID;
550 			clst_st_msk = CLST_L_CPUS_MSK;
551 		} else {
552 			pll_id = ABPLL_ID;
553 			clst_st_msk = CLST_B_CPUS_MSK <<
554 				       PLATFORM_CLUSTER0_CORE_COUNT;
555 		}
556 
557 		clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id));
558 
559 		pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
560 
561 		pmu_st &= clst_st_msk;
562 
563 		if (pmu_st == clst_st_chk_msk) {
564 			mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
565 				      PLL_SLOW_MODE);
566 
567 			clst_warmboot_data[pll_id] = PMU_CLST_RET;
568 
569 			pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST);
570 			pmu_st &= clst_st_msk;
571 			if (pmu_st == clst_st_chk_msk)
572 				return;
573 			/*
574 			 * it is mean that others cpu is up again,
575 			 * we must resume the cfg at once.
576 			 */
577 			mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3),
578 				      PLL_NOMAL_MODE);
579 			clst_warmboot_data[pll_id] = 0;
580 		}
581 	}
582 }
583 
584 static int clst_pwr_domain_resume(plat_local_state_t lvl_state)
585 {
586 	uint32_t cpu_id = plat_my_core_pos();
587 	uint32_t pll_id, pll_st;
588 
589 	assert(cpu_id < PLATFORM_CORE_COUNT);
590 
591 	if (lvl_state == PLAT_MAX_RET_STATE ||
592 	    lvl_state == PLAT_MAX_OFF_STATE) {
593 		if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT)
594 			pll_id = ALPLL_ID;
595 		else
596 			pll_id = ABPLL_ID;
597 
598 		pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >>
599 				 PLL_MODE_SHIFT;
600 
601 		if (pll_st != NORMAL_MODE) {
602 			WARN("%s: clst (%d) is in error mode (%d)\n",
603 			     __func__, pll_id, pll_st);
604 			return -1;
605 		}
606 	}
607 
608 	return 0;
609 }
610 
611 static void nonboot_cpus_off(void)
612 {
613 	uint32_t boot_cpu, cpu;
614 
615 	boot_cpu = plat_my_core_pos();
616 
617 	/* turn off noboot cpus */
618 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
619 		if (cpu == boot_cpu)
620 			continue;
621 		cpus_power_domain_off(cpu, core_pwr_pd);
622 	}
623 }
624 
625 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint)
626 {
627 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
628 
629 	assert(cpu_id < PLATFORM_CORE_COUNT);
630 	assert(cpuson_flags[cpu_id] == 0);
631 	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
632 	cpuson_entry_point[cpu_id] = entrypoint;
633 	dsb();
634 
635 	cpus_power_domain_on(cpu_id);
636 
637 	return 0;
638 }
639 
640 static int cores_pwr_domain_off(void)
641 {
642 	uint32_t cpu_id = plat_my_core_pos();
643 
644 	cpus_power_domain_off(cpu_id, core_pwr_wfi);
645 
646 	return 0;
647 }
648 
649 static int hlvl_pwr_domain_off(uint32_t lvl, plat_local_state_t lvl_state)
650 {
651 	switch (lvl) {
652 	case MPIDR_AFFLVL1:
653 		clst_pwr_domain_suspend(lvl_state);
654 		break;
655 	default:
656 		break;
657 	}
658 
659 	return 0;
660 }
661 
662 static int cores_pwr_domain_suspend(void)
663 {
664 	uint32_t cpu_id = plat_my_core_pos();
665 
666 	assert(cpu_id < PLATFORM_CORE_COUNT);
667 	assert(cpuson_flags[cpu_id] == 0);
668 	cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
669 	cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint();
670 	dsb();
671 
672 	cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
673 
674 	return 0;
675 }
676 
677 static int hlvl_pwr_domain_suspend(uint32_t lvl, plat_local_state_t lvl_state)
678 {
679 	switch (lvl) {
680 	case MPIDR_AFFLVL1:
681 		clst_pwr_domain_suspend(lvl_state);
682 		break;
683 	default:
684 		break;
685 	}
686 
687 	return 0;
688 }
689 
690 static int cores_pwr_domain_on_finish(void)
691 {
692 	uint32_t cpu_id = plat_my_core_pos();
693 
694 	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id),
695 		      CORES_PM_DISABLE);
696 	return 0;
697 }
698 
699 static int hlvl_pwr_domain_on_finish(uint32_t lvl,
700 				     plat_local_state_t lvl_state)
701 {
702 	switch (lvl) {
703 	case MPIDR_AFFLVL1:
704 		clst_pwr_domain_resume(lvl_state);
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	return 0;
711 }
712 
713 static int cores_pwr_domain_resume(void)
714 {
715 	uint32_t cpu_id = plat_my_core_pos();
716 
717 	/* Disable core_pm */
718 	mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE);
719 
720 	return 0;
721 }
722 
723 static int hlvl_pwr_domain_resume(uint32_t lvl, plat_local_state_t lvl_state)
724 {
725 	switch (lvl) {
726 	case MPIDR_AFFLVL1:
727 		clst_pwr_domain_resume(lvl_state);
728 	default:
729 		break;
730 	}
731 
732 	return 0;
733 }
734 
735 static void sys_slp_config(void)
736 {
737 	uint32_t slp_mode_cfg = 0;
738 
739 	mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP);
740 	mmio_write_32(PMU_BASE + PMU_CCI500_CON,
741 		      BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) |
742 		      BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) |
743 		      BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG));
744 
745 	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
746 		      BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) |
747 		      BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) |
748 		      BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW));
749 
750 	slp_mode_cfg = BIT(PMU_PWR_MODE_EN) |
751 		       BIT(PMU_POWER_OFF_REQ_CFG) |
752 		       BIT(PMU_CPU0_PD_EN) |
753 		       BIT(PMU_L2_FLUSH_EN) |
754 		       BIT(PMU_L2_IDLE_EN) |
755 		       BIT(PMU_SCU_PD_EN) |
756 		       BIT(PMU_CCI_PD_EN) |
757 		       BIT(PMU_CLK_CORE_SRC_GATE_EN) |
758 		       BIT(PMU_PERILP_PD_EN) |
759 		       BIT(PMU_CLK_PERILP_SRC_GATE_EN) |
760 		       BIT(PMU_ALIVE_USE_LF) |
761 		       BIT(PMU_SREF0_ENTER_EN) |
762 		       BIT(PMU_SREF1_ENTER_EN) |
763 		       BIT(PMU_DDRC0_GATING_EN) |
764 		       BIT(PMU_DDRC1_GATING_EN) |
765 		       BIT(PMU_DDRIO0_RET_EN) |
766 		       BIT(PMU_DDRIO1_RET_EN) |
767 		       BIT(PMU_DDRIO_RET_HW_DE_REQ) |
768 		       BIT(PMU_PLL_PD_EN) |
769 		       BIT(PMU_CLK_CENTER_SRC_GATE_EN) |
770 		       BIT(PMU_OSC_DIS) |
771 		       BIT(PMU_PMU_USE_LF);
772 
773 	mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_CLUSTER_L_WKUP_EN));
774 	mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_CLUSTER_B_WKUP_EN));
775 	mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN));
776 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg);
777 
778 	mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(3));
779 	mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_MS(3));
780 	mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_MS(3));
781 	mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_MS(3));
782 	mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(3));
783 	mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(3));
784 	mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_24M_CNT_MS(3));
785 	mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(3));
786 	mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(3));
787 	mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3));
788 	mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_MS(3));
789 	mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(3));
790 	mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(PMU_24M_EN_CFG));
791 
792 	mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW);
793 	mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K);
794 	mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /*32k iomux*/
795 }
796 
797 static void set_hw_idle(uint32_t hw_idle)
798 {
799 	mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
800 }
801 
802 static void clr_hw_idle(uint32_t hw_idle)
803 {
804 	mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle);
805 }
806 
807 struct pwm_data_s pwm_data;
808 
809 /*
810  * Save the PWMs data.
811  */
812 static void save_pwms(void)
813 {
814 	uint32_t i;
815 
816 	pwm_data.iomux_bitmask = 0;
817 
818 	/* Save all IOMUXes */
819 	if (mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX) & GPIO4C2_IOMUX_PWM)
820 		pwm_data.iomux_bitmask |= PWM0_IOMUX_PWM_EN;
821 	if (mmio_read_32(GRF_BASE + GRF_GPIO4C_IOMUX) & GPIO4C6_IOMUX_PWM)
822 		pwm_data.iomux_bitmask |= PWM1_IOMUX_PWM_EN;
823 	if (mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX) &
824 			 GPIO1C3_IOMUX_PWM)
825 		pwm_data.iomux_bitmask |= PWM2_IOMUX_PWM_EN;
826 	if (mmio_read_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX) &
827 			 GPIO0A6_IOMUX_PWM)
828 		pwm_data.iomux_bitmask |= PWM3_IOMUX_PWM_EN;
829 
830 	for (i = 0; i < 4; i++) {
831 		/* Save cnt, period, duty and ctrl for PWM i */
832 		pwm_data.cnt[i] = mmio_read_32(PWM_BASE + PWM_CNT(i));
833 		pwm_data.duty[i] = mmio_read_32(PWM_BASE + PWM_PERIOD_HPR(i));
834 		pwm_data.period[i] = mmio_read_32(PWM_BASE + PWM_DUTY_LPR(i));
835 		pwm_data.ctrl[i] = mmio_read_32(PWM_BASE + PWM_CTRL(i));
836 	}
837 
838 	/* PWMs all IOMUXes switch to the gpio mode */
839 	mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, GPIO4C2_IOMUX_GPIO);
840 	mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, GPIO4C6_IOMUX_GPIO);
841 	mmio_write_32(PMUGRF_BASE  + PMUGRF_GPIO1C_IOMUX, GPIO1C3_IOMUX_GPIO);
842 	mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX, GPIO0A6_IOMUX_GPIO);
843 }
844 
845 /*
846  * Restore the PWMs data.
847  */
848 static void restore_pwms(void)
849 {
850 	uint32_t i;
851 
852 	/* Restore all IOMUXes */
853 	if (pwm_data.iomux_bitmask & PWM3_IOMUX_PWM_EN)
854 		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_IOMUX,
855 			      GPIO0A6_IOMUX_PWM);
856 	if (pwm_data.iomux_bitmask & PWM2_IOMUX_PWM_EN)
857 		mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1C_IOMUX,
858 			      GPIO1C3_IOMUX_PWM);
859 	if (pwm_data.iomux_bitmask & PWM1_IOMUX_PWM_EN)
860 		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, GPIO4C6_IOMUX_PWM);
861 	if (pwm_data.iomux_bitmask & PWM0_IOMUX_PWM_EN)
862 		mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, GPIO4C2_IOMUX_PWM);
863 
864 	for (i = 0; i < 4; i++) {
865 		/* Restore ctrl, duty, period and cnt for PWM i */
866 		mmio_write_32(PWM_BASE + PWM_CTRL(i), pwm_data.ctrl[i]);
867 		mmio_write_32(PWM_BASE + PWM_DUTY_LPR(i), pwm_data.period[i]);
868 		mmio_write_32(PWM_BASE + PWM_PERIOD_HPR(i), pwm_data.duty[i]);
869 		mmio_write_32(PWM_BASE + PWM_CNT(i), pwm_data.cnt[i]);
870 	}
871 }
872 
873 static int sys_pwr_domain_suspend(void)
874 {
875 	uint32_t wait_cnt = 0;
876 	uint32_t status = 0;
877 
878 	pmu_power_domains_suspend();
879 	set_hw_idle(BIT(PMU_CLR_CENTER1) |
880 		    BIT(PMU_CLR_ALIVE) |
881 		    BIT(PMU_CLR_MSCH0) |
882 		    BIT(PMU_CLR_MSCH1) |
883 		    BIT(PMU_CLR_CCIM0) |
884 		    BIT(PMU_CLR_CCIM1) |
885 		    BIT(PMU_CLR_CENTER) |
886 		    BIT(PMU_CLR_PERILP) |
887 		    BIT(PMU_CLR_PMU) |
888 		    BIT(PMU_CLR_PERILPM0) |
889 		    BIT(PMU_CLR_GIC));
890 
891 	sys_slp_config();
892 	pmu_sgrf_rst_hld();
893 
894 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1),
895 		      (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
896 		      CPU_BOOT_ADDR_WMASK);
897 
898 	pmu_scu_b_pwrdn();
899 
900 	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
901 		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
902 		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) |
903 		      BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW));
904 	dsb();
905 	status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
906 		BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
907 		BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
908 	while ((mmio_read_32(PMU_BASE +
909 	       PMU_ADB400_ST) & status) != status) {
910 		wait_cnt++;
911 		if (wait_cnt >= MAX_WAIT_COUNT) {
912 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
913 			      mmio_read_32(PMU_BASE + PMU_ADB400_ST));
914 			panic();
915 		}
916 	}
917 	mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN));
918 
919 	save_pwms();
920 
921 	return 0;
922 }
923 
924 static int sys_pwr_domain_resume(void)
925 {
926 	uint32_t wait_cnt = 0;
927 	uint32_t status = 0;
928 
929 	restore_pwms();
930 
931 	pmu_sgrf_rst_hld();
932 
933 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1),
934 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
935 		      CPU_BOOT_ADDR_WMASK);
936 
937 	mmio_write_32(PMU_BASE + PMU_CCI500_CON,
938 		      WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) |
939 		      WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) |
940 		      WMSK_BIT(PMU_QGATING_CCI500_CFG));
941 	dsb();
942 	mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON,
943 			BIT(PMU_SCU_B_PWRDWN_EN));
944 
945 	mmio_write_32(PMU_BASE + PMU_ADB400_CON,
946 		      WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) |
947 		      WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) |
948 		      WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) |
949 		      WMSK_BIT(PMU_CLR_CORE_L_HW) |
950 		      WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) |
951 		      WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW));
952 
953 	status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) |
954 		BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) |
955 		BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST);
956 
957 	while ((mmio_read_32(PMU_BASE +
958 	   PMU_ADB400_ST) & status)) {
959 		wait_cnt++;
960 		if (wait_cnt >= MAX_WAIT_COUNT) {
961 			ERROR("%s:wait cluster-b l2(%x)\n", __func__,
962 			      mmio_read_32(PMU_BASE + PMU_ADB400_ST));
963 			panic();
964 		}
965 	}
966 
967 	pmu_scu_b_pwrup();
968 
969 	pmu_power_domains_resume();
970 	clr_hw_idle(BIT(PMU_CLR_CENTER1) |
971 				BIT(PMU_CLR_ALIVE) |
972 				BIT(PMU_CLR_MSCH0) |
973 				BIT(PMU_CLR_MSCH1) |
974 				BIT(PMU_CLR_CCIM0) |
975 				BIT(PMU_CLR_CCIM1) |
976 				BIT(PMU_CLR_CENTER) |
977 				BIT(PMU_CLR_PERILP) |
978 				BIT(PMU_CLR_PMU) |
979 				BIT(PMU_CLR_GIC));
980 	return 0;
981 }
982 
983 void __dead2 soc_soft_reset(void)
984 {
985 	struct gpio_info *rst_gpio;
986 
987 	rst_gpio = (struct gpio_info *)plat_get_rockchip_gpio_reset();
988 
989 	if (rst_gpio) {
990 		gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT);
991 		gpio_set_value(rst_gpio->index, rst_gpio->polarity);
992 	} else {
993 		soc_global_soft_reset();
994 	}
995 
996 	while (1)
997 		;
998 }
999 
1000 void __dead2 soc_system_off(void)
1001 {
1002 	struct gpio_info *poweroff_gpio;
1003 
1004 	poweroff_gpio = (struct gpio_info *)plat_get_rockchip_gpio_poweroff();
1005 
1006 	if (poweroff_gpio) {
1007 		/*
1008 		 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio,
1009 		 * need to set this pin iomux back to gpio function
1010 		 */
1011 		if (poweroff_gpio->index == TSADC_INT_PIN) {
1012 			mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX,
1013 				      GPIO1A6_IOMUX);
1014 		}
1015 		gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT);
1016 		gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity);
1017 	} else {
1018 		WARN("Do nothing when system off\n");
1019 	}
1020 
1021 	while (1)
1022 		;
1023 }
1024 
1025 static struct rockchip_pm_ops_cb pm_ops = {
1026 	.cores_pwr_dm_on = cores_pwr_domain_on,
1027 	.cores_pwr_dm_off = cores_pwr_domain_off,
1028 	.cores_pwr_dm_on_finish = cores_pwr_domain_on_finish,
1029 	.cores_pwr_dm_suspend = cores_pwr_domain_suspend,
1030 	.cores_pwr_dm_resume = cores_pwr_domain_resume,
1031 	.hlvl_pwr_dm_suspend = hlvl_pwr_domain_suspend,
1032 	.hlvl_pwr_dm_resume = hlvl_pwr_domain_resume,
1033 	.hlvl_pwr_dm_off = hlvl_pwr_domain_off,
1034 	.hlvl_pwr_dm_on_finish = hlvl_pwr_domain_on_finish,
1035 	.sys_pwr_dm_suspend = sys_pwr_domain_suspend,
1036 	.sys_pwr_dm_resume = sys_pwr_domain_resume,
1037 	.sys_gbl_soft_reset = soc_soft_reset,
1038 	.system_off = soc_system_off,
1039 };
1040 
1041 void plat_rockchip_pmu_init(void)
1042 {
1043 	uint32_t cpu;
1044 
1045 	rockchip_pd_lock_init();
1046 	plat_setup_rockchip_pm_ops(&pm_ops);
1047 
1048 	/* register requires 32bits mode, switch it to 32 bits */
1049 	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
1050 
1051 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
1052 		cpuson_flags[cpu] = 0;
1053 
1054 	for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++)
1055 		clst_warmboot_data[cpu] = 0;
1056 
1057 	psram_sleep_cfg->ddr_func = 0x00;
1058 	psram_sleep_cfg->ddr_data = 0x00;
1059 	psram_sleep_cfg->ddr_flag = 0x00;
1060 	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
1061 
1062 	/* config cpu's warm boot address */
1063 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1),
1064 		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
1065 		      CPU_BOOT_ADDR_WMASK);
1066 	mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE);
1067 
1068 	nonboot_cpus_off();
1069 
1070 	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
1071 	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
1072 }
1073