1 /* 2 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <platform_def.h> 12 13 #include <arch_helpers.h> 14 #include <bl31/bl31.h> 15 #include <common/debug.h> 16 #include <drivers/arm/gicv3.h> 17 #include <drivers/delay_timer.h> 18 #include <drivers/gpio.h> 19 #include <lib/bakery_lock.h> 20 #include <lib/mmio.h> 21 #include <plat/common/platform.h> 22 23 #include <dfs.h> 24 #include <m0_ctl.h> 25 #include <plat_params.h> 26 #include <plat_private.h> 27 #include <pmu.h> 28 #include <pmu_com.h> 29 #include <pwm.h> 30 #include <rk3399_def.h> 31 #include <secure.h> 32 #include <soc.h> 33 #include <suspend.h> 34 35 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 36 37 static uint32_t cpu_warm_boot_addr; 38 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 39 static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1]; 40 static uint32_t store_usbphy0[7]; 41 static uint32_t store_usbphy1[7]; 42 static uint32_t store_grf_io_vsel; 43 static uint32_t store_grf_soc_con0; 44 static uint32_t store_grf_soc_con1; 45 static uint32_t store_grf_soc_con2; 46 static uint32_t store_grf_soc_con3; 47 static uint32_t store_grf_soc_con4; 48 static uint32_t store_grf_soc_con7; 49 static uint32_t store_grf_ddrc_con[4]; 50 static uint32_t store_wdt0[2]; 51 static uint32_t store_wdt1[2]; 52 static gicv3_dist_ctx_t dist_ctx; 53 static gicv3_redist_ctx_t rdist_ctx; 54 55 /* 56 * There are two ways to powering on or off on core. 57 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 58 * it is core_pwr_pd mode 59 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 60 * then, if the core enter into wfi, it power domain will be 61 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 62 * so we need core_pm_cfg_info to distinguish which method be used now. 63 */ 64 65 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 66 #if USE_COHERENT_MEM 67 __attribute__ ((section(".tzfw_coherent_mem"))) 68 #endif 69 ;/* coheront */ 70 71 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 72 { 73 uint32_t bus_id = BIT(bus); 74 uint32_t bus_req; 75 uint32_t wait_cnt = 0; 76 uint32_t bus_state, bus_ack; 77 78 if (state) 79 bus_req = BIT(bus); 80 else 81 bus_req = 0; 82 83 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 84 85 do { 86 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 87 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 88 if (bus_state == bus_req && bus_ack == bus_req) 89 break; 90 91 wait_cnt++; 92 udelay(1); 93 } while (wait_cnt < MAX_WAIT_COUNT); 94 95 if (bus_state != bus_req || bus_ack != bus_req) { 96 INFO("%s:st=%x(%x)\n", __func__, 97 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 98 bus_state); 99 INFO("%s:st=%x(%x)\n", __func__, 100 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 101 bus_ack); 102 } 103 } 104 105 struct pmu_slpdata_s pmu_slpdata; 106 107 static void qos_restore(void) 108 { 109 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 110 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 111 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 112 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 113 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 114 } 115 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 116 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 117 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 118 } 119 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 120 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 121 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 122 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 123 } 124 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 125 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 126 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 127 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 128 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 129 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 130 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 131 } 132 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 133 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 134 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 135 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 136 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 137 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 138 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 139 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 140 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 141 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 142 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 143 } 144 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 145 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 146 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 147 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 148 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 149 } 150 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 151 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 152 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 153 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 154 } 155 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 156 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 157 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 158 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 159 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 160 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 161 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 162 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 163 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 164 } 165 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 166 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 167 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 168 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 169 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 170 } 171 } 172 173 static void qos_save(void) 174 { 175 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 176 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 177 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 178 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 179 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 180 } 181 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 182 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 183 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 184 } 185 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 186 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 187 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 188 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 189 } 190 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 191 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 192 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 193 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 194 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 195 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 196 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 197 } 198 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 199 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 200 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 201 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 202 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 203 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 204 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 205 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 206 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 207 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 208 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 209 } 210 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 211 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 212 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 213 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 214 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 215 } 216 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 217 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 218 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 219 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 220 } 221 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 222 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 223 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 224 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 225 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 226 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 227 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 228 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 229 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 230 } 231 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 232 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 233 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 234 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 235 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 236 } 237 } 238 239 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 240 { 241 uint32_t state; 242 243 if (pmu_power_domain_st(pd_id) == pd_state) 244 goto out; 245 246 if (pd_state == pmu_pd_on) 247 pmu_power_domain_ctr(pd_id, pd_state); 248 249 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 250 251 switch (pd_id) { 252 case PD_GPU: 253 pmu_bus_idle_req(BUS_ID_GPU, state); 254 break; 255 case PD_VIO: 256 pmu_bus_idle_req(BUS_ID_VIO, state); 257 break; 258 case PD_ISP0: 259 pmu_bus_idle_req(BUS_ID_ISP0, state); 260 break; 261 case PD_ISP1: 262 pmu_bus_idle_req(BUS_ID_ISP1, state); 263 break; 264 case PD_VO: 265 pmu_bus_idle_req(BUS_ID_VOPB, state); 266 pmu_bus_idle_req(BUS_ID_VOPL, state); 267 break; 268 case PD_HDCP: 269 pmu_bus_idle_req(BUS_ID_HDCP, state); 270 break; 271 case PD_TCPD0: 272 break; 273 case PD_TCPD1: 274 break; 275 case PD_GMAC: 276 pmu_bus_idle_req(BUS_ID_GMAC, state); 277 break; 278 case PD_CCI: 279 pmu_bus_idle_req(BUS_ID_CCIM0, state); 280 pmu_bus_idle_req(BUS_ID_CCIM1, state); 281 break; 282 case PD_SD: 283 pmu_bus_idle_req(BUS_ID_SD, state); 284 break; 285 case PD_EMMC: 286 pmu_bus_idle_req(BUS_ID_EMMC, state); 287 break; 288 case PD_EDP: 289 pmu_bus_idle_req(BUS_ID_EDP, state); 290 break; 291 case PD_SDIOAUDIO: 292 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 293 break; 294 case PD_GIC: 295 pmu_bus_idle_req(BUS_ID_GIC, state); 296 break; 297 case PD_RGA: 298 pmu_bus_idle_req(BUS_ID_RGA, state); 299 break; 300 case PD_VCODEC: 301 pmu_bus_idle_req(BUS_ID_VCODEC, state); 302 break; 303 case PD_VDU: 304 pmu_bus_idle_req(BUS_ID_VDU, state); 305 break; 306 case PD_IEP: 307 pmu_bus_idle_req(BUS_ID_IEP, state); 308 break; 309 case PD_USB3: 310 pmu_bus_idle_req(BUS_ID_USB3, state); 311 break; 312 case PD_PERIHP: 313 pmu_bus_idle_req(BUS_ID_PERIHP, state); 314 break; 315 default: 316 /* Do nothing in default case */ 317 break; 318 } 319 320 if (pd_state == pmu_pd_off) 321 pmu_power_domain_ctr(pd_id, pd_state); 322 323 out: 324 return 0; 325 } 326 327 static uint32_t pmu_powerdomain_state; 328 329 static void pmu_power_domains_suspend(void) 330 { 331 clk_gate_con_save(); 332 clk_gate_con_disable(); 333 qos_save(); 334 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 335 pmu_set_power_domain(PD_GPU, pmu_pd_off); 336 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 337 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 338 pmu_set_power_domain(PD_VO, pmu_pd_off); 339 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 340 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 341 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 342 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 343 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 344 pmu_set_power_domain(PD_EDP, pmu_pd_off); 345 pmu_set_power_domain(PD_IEP, pmu_pd_off); 346 pmu_set_power_domain(PD_RGA, pmu_pd_off); 347 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 348 pmu_set_power_domain(PD_VDU, pmu_pd_off); 349 pmu_set_power_domain(PD_USB3, pmu_pd_off); 350 pmu_set_power_domain(PD_EMMC, pmu_pd_off); 351 pmu_set_power_domain(PD_VIO, pmu_pd_off); 352 pmu_set_power_domain(PD_SD, pmu_pd_off); 353 pmu_set_power_domain(PD_PERIHP, pmu_pd_off); 354 clk_gate_con_restore(); 355 } 356 357 static void pmu_power_domains_resume(void) 358 { 359 clk_gate_con_save(); 360 clk_gate_con_disable(); 361 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 362 pmu_set_power_domain(PD_VDU, pmu_pd_on); 363 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 364 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 365 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 366 pmu_set_power_domain(PD_RGA, pmu_pd_on); 367 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 368 pmu_set_power_domain(PD_IEP, pmu_pd_on); 369 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 370 pmu_set_power_domain(PD_EDP, pmu_pd_on); 371 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 372 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 373 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 374 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 375 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 376 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 377 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 378 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 379 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 380 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 381 if (!(pmu_powerdomain_state & BIT(PD_VO))) 382 pmu_set_power_domain(PD_VO, pmu_pd_on); 383 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 384 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 385 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 386 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 387 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 388 pmu_set_power_domain(PD_GPU, pmu_pd_on); 389 if (!(pmu_powerdomain_state & BIT(PD_USB3))) 390 pmu_set_power_domain(PD_USB3, pmu_pd_on); 391 if (!(pmu_powerdomain_state & BIT(PD_EMMC))) 392 pmu_set_power_domain(PD_EMMC, pmu_pd_on); 393 if (!(pmu_powerdomain_state & BIT(PD_VIO))) 394 pmu_set_power_domain(PD_VIO, pmu_pd_on); 395 if (!(pmu_powerdomain_state & BIT(PD_SD))) 396 pmu_set_power_domain(PD_SD, pmu_pd_on); 397 if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) 398 pmu_set_power_domain(PD_PERIHP, pmu_pd_on); 399 qos_restore(); 400 clk_gate_con_restore(); 401 } 402 403 void pmu_power_domains_on(void) 404 { 405 clk_gate_con_disable(); 406 pmu_set_power_domain(PD_VDU, pmu_pd_on); 407 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 408 pmu_set_power_domain(PD_RGA, pmu_pd_on); 409 pmu_set_power_domain(PD_IEP, pmu_pd_on); 410 pmu_set_power_domain(PD_EDP, pmu_pd_on); 411 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 412 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 413 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 414 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 415 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 416 pmu_set_power_domain(PD_VO, pmu_pd_on); 417 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 418 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 419 pmu_set_power_domain(PD_GPU, pmu_pd_on); 420 } 421 422 void rk3399_flush_l2_b(void) 423 { 424 uint32_t wait_cnt = 0; 425 426 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 427 dsb(); 428 429 /* 430 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 431 * the enough margin. 432 */ 433 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 434 BIT(L2_FLUSHDONE_CLUSTER_B))) { 435 wait_cnt++; 436 udelay(10); 437 if (wait_cnt == 10000 / 10) 438 WARN("L2 cache flush on suspend took longer than 10ms\n"); 439 } 440 441 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 442 } 443 444 static void pmu_scu_b_pwrdn(void) 445 { 446 uint32_t wait_cnt = 0; 447 448 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 449 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 450 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 451 ERROR("%s: not all cpus is off\n", __func__); 452 return; 453 } 454 455 rk3399_flush_l2_b(); 456 457 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 458 459 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 460 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 461 wait_cnt++; 462 udelay(1); 463 if (wait_cnt >= MAX_WAIT_COUNT) 464 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 465 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 466 } 467 } 468 469 static void pmu_scu_b_pwrup(void) 470 { 471 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 472 } 473 474 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 475 { 476 assert(cpu_id < PLATFORM_CORE_COUNT); 477 return core_pm_cfg_info[cpu_id]; 478 } 479 480 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 481 { 482 assert(cpu_id < PLATFORM_CORE_COUNT); 483 core_pm_cfg_info[cpu_id] = value; 484 #if !USE_COHERENT_MEM 485 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 486 sizeof(uint32_t)); 487 #endif 488 } 489 490 static int cpus_power_domain_on(uint32_t cpu_id) 491 { 492 uint32_t cfg_info; 493 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 494 /* 495 * There are two ways to powering on or off on core. 496 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 497 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 498 * then, if the core enter into wfi, it power domain will be 499 * powered off automatically. 500 */ 501 502 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 503 504 if (cfg_info == core_pwr_pd) { 505 /* disable core_pm cfg */ 506 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 507 CORES_PM_DISABLE); 508 /* if the cores have be on, power off it firstly */ 509 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 510 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 511 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 512 } 513 514 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 515 } else { 516 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 517 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 518 return -EINVAL; 519 } 520 521 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 522 BIT(core_pm_sft_wakeup_en)); 523 dsb(); 524 } 525 526 return 0; 527 } 528 529 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 530 { 531 uint32_t cpu_pd; 532 uint32_t core_pm_value; 533 534 cpu_pd = PD_CPUL0 + cpu_id; 535 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 536 return 0; 537 538 if (pd_cfg == core_pwr_pd) { 539 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 540 return -EINVAL; 541 542 /* disable core_pm cfg */ 543 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 544 CORES_PM_DISABLE); 545 546 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 547 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 548 } else { 549 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 550 551 core_pm_value = BIT(core_pm_en); 552 if (pd_cfg == core_pwr_wfi_int) 553 core_pm_value |= BIT(core_pm_int_wakeup_en); 554 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 555 core_pm_value); 556 dsb(); 557 } 558 559 return 0; 560 } 561 562 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 563 { 564 uint32_t cpu_id = plat_my_core_pos(); 565 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 566 567 assert(cpu_id < PLATFORM_CORE_COUNT); 568 569 if (lvl_state == PLAT_MAX_OFF_STATE) { 570 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 571 pll_id = ALPLL_ID; 572 clst_st_msk = CLST_L_CPUS_MSK; 573 } else { 574 pll_id = ABPLL_ID; 575 clst_st_msk = CLST_B_CPUS_MSK << 576 PLATFORM_CLUSTER0_CORE_COUNT; 577 } 578 579 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 580 581 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 582 583 pmu_st &= clst_st_msk; 584 585 if (pmu_st == clst_st_chk_msk) { 586 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 587 PLL_SLOW_MODE); 588 589 clst_warmboot_data[pll_id] = PMU_CLST_RET; 590 591 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 592 pmu_st &= clst_st_msk; 593 if (pmu_st == clst_st_chk_msk) 594 return; 595 /* 596 * it is mean that others cpu is up again, 597 * we must resume the cfg at once. 598 */ 599 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 600 PLL_NOMAL_MODE); 601 clst_warmboot_data[pll_id] = 0; 602 } 603 } 604 } 605 606 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 607 { 608 uint32_t cpu_id = plat_my_core_pos(); 609 uint32_t pll_id, pll_st; 610 611 assert(cpu_id < PLATFORM_CORE_COUNT); 612 613 if (lvl_state == PLAT_MAX_OFF_STATE) { 614 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 615 pll_id = ALPLL_ID; 616 else 617 pll_id = ABPLL_ID; 618 619 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 620 PLL_MODE_SHIFT; 621 622 if (pll_st != NORMAL_MODE) { 623 WARN("%s: clst (%d) is in error mode (%d)\n", 624 __func__, pll_id, pll_st); 625 return -1; 626 } 627 } 628 629 return 0; 630 } 631 632 static void nonboot_cpus_off(void) 633 { 634 uint32_t boot_cpu, cpu; 635 636 boot_cpu = plat_my_core_pos(); 637 638 /* turn off noboot cpus */ 639 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 640 if (cpu == boot_cpu) 641 continue; 642 cpus_power_domain_off(cpu, core_pwr_pd); 643 } 644 } 645 646 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 647 { 648 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 649 650 assert(cpu_id < PLATFORM_CORE_COUNT); 651 assert(cpuson_flags[cpu_id] == 0); 652 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 653 cpuson_entry_point[cpu_id] = entrypoint; 654 dsb(); 655 656 cpus_power_domain_on(cpu_id); 657 658 return PSCI_E_SUCCESS; 659 } 660 661 int rockchip_soc_cores_pwr_dm_off(void) 662 { 663 uint32_t cpu_id = plat_my_core_pos(); 664 665 cpus_power_domain_off(cpu_id, core_pwr_wfi); 666 667 return PSCI_E_SUCCESS; 668 } 669 670 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 671 plat_local_state_t lvl_state) 672 { 673 if (lvl == MPIDR_AFFLVL1) { 674 clst_pwr_domain_suspend(lvl_state); 675 } 676 677 return PSCI_E_SUCCESS; 678 } 679 680 int rockchip_soc_cores_pwr_dm_suspend(void) 681 { 682 uint32_t cpu_id = plat_my_core_pos(); 683 684 assert(cpu_id < PLATFORM_CORE_COUNT); 685 assert(cpuson_flags[cpu_id] == 0); 686 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 687 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 688 dsb(); 689 690 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 691 692 return PSCI_E_SUCCESS; 693 } 694 695 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 696 { 697 if (lvl == MPIDR_AFFLVL1) { 698 clst_pwr_domain_suspend(lvl_state); 699 } 700 701 return PSCI_E_SUCCESS; 702 } 703 704 int rockchip_soc_cores_pwr_dm_on_finish(void) 705 { 706 uint32_t cpu_id = plat_my_core_pos(); 707 708 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 709 CORES_PM_DISABLE); 710 return PSCI_E_SUCCESS; 711 } 712 713 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 714 plat_local_state_t lvl_state) 715 { 716 if (lvl == MPIDR_AFFLVL1) { 717 clst_pwr_domain_resume(lvl_state); 718 } 719 720 return PSCI_E_SUCCESS; 721 } 722 723 int rockchip_soc_cores_pwr_dm_resume(void) 724 { 725 uint32_t cpu_id = plat_my_core_pos(); 726 727 /* Disable core_pm */ 728 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 729 730 return PSCI_E_SUCCESS; 731 } 732 733 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 734 { 735 if (lvl == MPIDR_AFFLVL1) { 736 clst_pwr_domain_resume(lvl_state); 737 } 738 739 return PSCI_E_SUCCESS; 740 } 741 742 /** 743 * init_pmu_counts - Init timing counts in the PMU register area 744 * 745 * At various points when we power up or down parts of the system we need 746 * a delay to wait for power / clocks to become stable. The PMU has counters 747 * to help software do the delay properly. Basically, it works like this: 748 * - Software sets up counter values 749 * - When software turns on something in the PMU, the counter kicks off 750 * - The hardware sets a bit automatically when the counter has finished and 751 * software knows that the initialization is done. 752 * 753 * It's software's job to setup these counters. The hardware power on default 754 * for these settings is conservative, setting everything to 0x5dc0 755 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 756 * 757 * Note that some of these counters are only really used at suspend/resume 758 * time (for instance, that's the only time we turn off/on the oscillator) and 759 * others are used during normal runtime (like turning on/off a CPU or GPU) but 760 * it doesn't hurt to init everything at boot. 761 * 762 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 763 * clock. While the 24 MHz clock can give us more precision, it's not always 764 * available (like when we turn the oscillator off at sleep time). The 765 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 766 * is that counts work like this: 767 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 768 * use the 24M OSC for counts 769 * ELSE 770 * use the 32K OSC for counts 771 * 772 * Notes: 773 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 774 * we always keep that 0. This apparently choose between using the PLL as 775 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 776 * should consider how it affects these counts (if at all). 777 * - The power_mode_en is documented to auto-clear automatically when we leave 778 * "power mode". That's why most clocks are on 24M. Only timings used when 779 * in "power mode" are 32k. 780 * - In some cases the kernel may override these counts. 781 * 782 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 783 * in power mode, we need to ensure that they are available. 784 */ 785 static void init_pmu_counts(void) 786 { 787 /* COUNTS FOR INSIDE POWER MODE */ 788 789 /* 790 * From limited testing, need PMU stable >= 2ms, but go overkill 791 * and choose 30 ms to match testing on past SoCs. Also let 792 * OSC have 30 ms for stabilization. 793 */ 794 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 795 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 796 797 /* Unclear what these should be; try 3 ms */ 798 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 799 800 /* Unclear what this should be, but set the default explicitly */ 801 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 802 803 /* COUNTS FOR OUTSIDE POWER MODE */ 804 805 /* Put something sorta conservative here until we know better */ 806 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 807 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 808 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 809 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 810 811 /* 812 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 813 * M0 code run in SRAM, and we need it to check whether cpu enter 814 * FSM status, so we must wait M0 finish their code and enter WFI, 815 * then we can shutdown SRAM, according FSM order: 816 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 817 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 818 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 819 */ 820 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 821 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 822 823 /* 824 * Set CPU/GPU to 1 us. 825 * 826 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 827 * counts here. After all ATF controls all these other bits and also 828 * chooses which clock these counters use. 829 */ 830 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 831 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 832 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 833 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 834 } 835 836 static uint32_t clk_ddrc_save; 837 838 static void sys_slp_config(void) 839 { 840 uint32_t slp_mode_cfg = 0; 841 842 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 843 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 844 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 845 846 prepare_abpll_for_ddrctrl(); 847 sram_func_set_ddrctl_pll(ABPLL_ID); 848 849 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 850 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 851 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 852 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 853 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 854 855 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 856 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 857 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 858 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 859 860 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 861 BIT(PMU_WKUP_RST_EN) | 862 BIT(PMU_INPUT_CLAMP_EN) | 863 BIT(PMU_POWER_OFF_REQ_CFG) | 864 BIT(PMU_CPU0_PD_EN) | 865 BIT(PMU_L2_FLUSH_EN) | 866 BIT(PMU_L2_IDLE_EN) | 867 BIT(PMU_SCU_PD_EN) | 868 BIT(PMU_CCI_PD_EN) | 869 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 870 BIT(PMU_ALIVE_USE_LF) | 871 BIT(PMU_SREF0_ENTER_EN) | 872 BIT(PMU_SREF1_ENTER_EN) | 873 BIT(PMU_DDRC0_GATING_EN) | 874 BIT(PMU_DDRC1_GATING_EN) | 875 BIT(PMU_DDRIO0_RET_EN) | 876 BIT(PMU_DDRIO0_RET_DE_REQ) | 877 BIT(PMU_DDRIO1_RET_EN) | 878 BIT(PMU_DDRIO1_RET_DE_REQ) | 879 BIT(PMU_CENTER_PD_EN) | 880 BIT(PMU_PERILP_PD_EN) | 881 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 882 BIT(PMU_PLL_PD_EN) | 883 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 884 BIT(PMU_OSC_DIS) | 885 BIT(PMU_PMU_USE_LF); 886 887 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 888 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 889 890 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 891 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 892 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 893 } 894 895 static void set_hw_idle(uint32_t hw_idle) 896 { 897 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 898 } 899 900 static void clr_hw_idle(uint32_t hw_idle) 901 { 902 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 903 } 904 905 static uint32_t iomux_status[12]; 906 static uint32_t pull_mode_status[12]; 907 static uint32_t gpio_direction[3]; 908 static uint32_t gpio_2_4_clk_gate; 909 910 static void suspend_apio(void) 911 { 912 struct bl_aux_rk_apio_info *suspend_apio; 913 int i; 914 915 suspend_apio = plat_get_rockchip_suspend_apio(); 916 917 if (!suspend_apio) 918 return; 919 920 /* save gpio2 ~ gpio4 iomux and pull mode */ 921 for (i = 0; i < 12; i++) { 922 iomux_status[i] = mmio_read_32(GRF_BASE + 923 GRF_GPIO2A_IOMUX + i * 4); 924 pull_mode_status[i] = mmio_read_32(GRF_BASE + 925 GRF_GPIO2A_P + i * 4); 926 } 927 928 /* store gpio2 ~ gpio4 clock gate state */ 929 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 930 PCLK_GPIO2_GATE_SHIFT) & 0x07; 931 932 /* enable gpio2 ~ gpio4 clock gate */ 933 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 934 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 935 936 /* save gpio2 ~ gpio4 direction */ 937 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 938 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 939 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 940 941 /* apio1 charge gpio3a0 ~ gpio3c7 */ 942 if (suspend_apio->apio1) { 943 944 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 945 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 946 REG_SOC_WMSK | GRF_IOMUX_GPIO); 947 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 948 REG_SOC_WMSK | GRF_IOMUX_GPIO); 949 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 950 REG_SOC_WMSK | GRF_IOMUX_GPIO); 951 952 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 953 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 954 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 955 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 956 957 /* set gpio3a0 ~ gpio3c7 to input */ 958 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 959 } 960 961 /* apio2 charge gpio2a0 ~ gpio2b4 */ 962 if (suspend_apio->apio2) { 963 964 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 965 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 966 REG_SOC_WMSK | GRF_IOMUX_GPIO); 967 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 968 REG_SOC_WMSK | GRF_IOMUX_GPIO); 969 970 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 971 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 972 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 973 974 /* set gpio2a0 ~ gpio2b4 to input */ 975 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 976 } 977 978 /* apio3 charge gpio2c0 ~ gpio2d4*/ 979 if (suspend_apio->apio3) { 980 981 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 982 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 983 REG_SOC_WMSK | GRF_IOMUX_GPIO); 984 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 985 REG_SOC_WMSK | GRF_IOMUX_GPIO); 986 987 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 988 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 989 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 990 991 /* set gpio2c0 ~ gpio2d4 to input */ 992 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 993 } 994 995 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 996 if (suspend_apio->apio4) { 997 998 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 999 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 1000 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1001 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 1002 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1003 1004 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 1005 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 1006 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 1007 1008 /* set gpio4c0 ~ gpio4d6 to input */ 1009 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 1010 } 1011 1012 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 1013 if (suspend_apio->apio5) { 1014 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 1015 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 1016 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1017 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 1018 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1019 1020 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 1021 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1022 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1023 1024 /* set gpio4c0 ~ gpio4d6 to input */ 1025 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1026 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1027 } 1028 } 1029 1030 static void resume_apio(void) 1031 { 1032 struct bl_aux_rk_apio_info *suspend_apio; 1033 int i; 1034 1035 suspend_apio = plat_get_rockchip_suspend_apio(); 1036 1037 if (!suspend_apio) 1038 return; 1039 1040 for (i = 0; i < 12; i++) { 1041 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1042 REG_SOC_WMSK | pull_mode_status[i]); 1043 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1044 REG_SOC_WMSK | iomux_status[i]); 1045 } 1046 1047 /* set gpio2 ~ gpio4 direction back to store value */ 1048 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1049 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1050 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1051 1052 /* set gpio2 ~ gpio4 clock gate back to store value */ 1053 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1054 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1055 PCLK_GPIO2_GATE_SHIFT)); 1056 } 1057 1058 static void suspend_gpio(void) 1059 { 1060 struct bl_aux_gpio_info *suspend_gpio; 1061 uint32_t count; 1062 int i; 1063 1064 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1065 1066 for (i = 0; i < count; i++) { 1067 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1068 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1069 udelay(1); 1070 } 1071 } 1072 1073 static void resume_gpio(void) 1074 { 1075 struct bl_aux_gpio_info *suspend_gpio; 1076 uint32_t count; 1077 int i; 1078 1079 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1080 1081 for (i = count - 1; i >= 0; i--) { 1082 gpio_set_value(suspend_gpio[i].index, 1083 !suspend_gpio[i].polarity); 1084 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1085 udelay(1); 1086 } 1087 } 1088 1089 void sram_save(void) 1090 { 1091 size_t text_size = (char *)&__bl31_sram_text_real_end - 1092 (char *)&__bl31_sram_text_start; 1093 size_t data_size = (char *)&__bl31_sram_data_real_end - 1094 (char *)&__bl31_sram_data_start; 1095 size_t incbin_size = (char *)&__sram_incbin_real_end - 1096 (char *)&__sram_incbin_start; 1097 1098 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1099 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1100 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1101 incbin_size); 1102 } 1103 1104 void sram_restore(void) 1105 { 1106 size_t text_size = (char *)&__bl31_sram_text_real_end - 1107 (char *)&__bl31_sram_text_start; 1108 size_t data_size = (char *)&__bl31_sram_data_real_end - 1109 (char *)&__bl31_sram_data_start; 1110 size_t incbin_size = (char *)&__sram_incbin_real_end - 1111 (char *)&__sram_incbin_start; 1112 1113 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1114 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1115 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1116 incbin_size); 1117 } 1118 1119 struct uart_debug { 1120 uint32_t uart_dll; 1121 uint32_t uart_dlh; 1122 uint32_t uart_ier; 1123 uint32_t uart_fcr; 1124 uint32_t uart_mcr; 1125 uint32_t uart_lcr; 1126 }; 1127 1128 #define UART_DLL 0x00 1129 #define UART_DLH 0x04 1130 #define UART_IER 0x04 1131 #define UART_FCR 0x08 1132 #define UART_LCR 0x0c 1133 #define UART_MCR 0x10 1134 #define UARTSRR 0x88 1135 1136 #define UART_RESET BIT(0) 1137 #define UARTFCR_FIFOEN BIT(0) 1138 #define RCVR_FIFO_RESET BIT(1) 1139 #define XMIT_FIFO_RESET BIT(2) 1140 #define DIAGNOSTIC_MODE BIT(4) 1141 #define UARTLCR_DLAB BIT(7) 1142 1143 static struct uart_debug uart_save; 1144 1145 void suspend_uart(void) 1146 { 1147 uint32_t uart_base = rockchip_get_uart_base(); 1148 1149 if (uart_base == 0) 1150 return; 1151 1152 uart_save.uart_lcr = mmio_read_32(uart_base + UART_LCR); 1153 uart_save.uart_ier = mmio_read_32(uart_base + UART_IER); 1154 uart_save.uart_mcr = mmio_read_32(uart_base + UART_MCR); 1155 mmio_write_32(uart_base + UART_LCR, 1156 uart_save.uart_lcr | UARTLCR_DLAB); 1157 uart_save.uart_dll = mmio_read_32(uart_base + UART_DLL); 1158 uart_save.uart_dlh = mmio_read_32(uart_base + UART_DLH); 1159 mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr); 1160 } 1161 1162 void resume_uart(void) 1163 { 1164 uint32_t uart_base = rockchip_get_uart_base(); 1165 uint32_t uart_lcr; 1166 1167 if (uart_base == 0) 1168 return; 1169 1170 mmio_write_32(uart_base + UARTSRR, 1171 XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); 1172 1173 uart_lcr = mmio_read_32(uart_base + UART_LCR); 1174 mmio_write_32(uart_base + UART_MCR, DIAGNOSTIC_MODE); 1175 mmio_write_32(uart_base + UART_LCR, uart_lcr | UARTLCR_DLAB); 1176 mmio_write_32(uart_base + UART_DLL, uart_save.uart_dll); 1177 mmio_write_32(uart_base + UART_DLH, uart_save.uart_dlh); 1178 mmio_write_32(uart_base + UART_LCR, uart_save.uart_lcr); 1179 mmio_write_32(uart_base + UART_IER, uart_save.uart_ier); 1180 mmio_write_32(uart_base + UART_FCR, UARTFCR_FIFOEN); 1181 mmio_write_32(uart_base + UART_MCR, uart_save.uart_mcr); 1182 } 1183 1184 void save_usbphy(void) 1185 { 1186 store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); 1187 store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); 1188 store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); 1189 store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); 1190 store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); 1191 store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); 1192 store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); 1193 1194 store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); 1195 store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); 1196 store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); 1197 store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); 1198 store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); 1199 store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); 1200 store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); 1201 } 1202 1203 void restore_usbphy(void) 1204 { 1205 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, 1206 REG_SOC_WMSK | store_usbphy0[0]); 1207 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, 1208 REG_SOC_WMSK | store_usbphy0[1]); 1209 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, 1210 REG_SOC_WMSK | store_usbphy0[2]); 1211 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, 1212 REG_SOC_WMSK | store_usbphy0[3]); 1213 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, 1214 REG_SOC_WMSK | store_usbphy0[4]); 1215 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, 1216 REG_SOC_WMSK | store_usbphy0[5]); 1217 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, 1218 REG_SOC_WMSK | store_usbphy0[6]); 1219 1220 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, 1221 REG_SOC_WMSK | store_usbphy1[0]); 1222 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, 1223 REG_SOC_WMSK | store_usbphy1[1]); 1224 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, 1225 REG_SOC_WMSK | store_usbphy1[2]); 1226 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, 1227 REG_SOC_WMSK | store_usbphy1[3]); 1228 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, 1229 REG_SOC_WMSK | store_usbphy1[4]); 1230 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, 1231 REG_SOC_WMSK | store_usbphy1[5]); 1232 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, 1233 REG_SOC_WMSK | store_usbphy1[6]); 1234 } 1235 1236 void grf_register_save(void) 1237 { 1238 int i; 1239 1240 store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); 1241 store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); 1242 store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); 1243 store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); 1244 store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); 1245 store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); 1246 1247 for (i = 0; i < 4; i++) 1248 store_grf_ddrc_con[i] = 1249 mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); 1250 1251 store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); 1252 } 1253 1254 void grf_register_restore(void) 1255 { 1256 int i; 1257 1258 mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 1259 REG_SOC_WMSK | store_grf_soc_con0); 1260 mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 1261 REG_SOC_WMSK | store_grf_soc_con1); 1262 mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 1263 REG_SOC_WMSK | store_grf_soc_con2); 1264 mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 1265 REG_SOC_WMSK | store_grf_soc_con3); 1266 mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 1267 REG_SOC_WMSK | store_grf_soc_con4); 1268 mmio_write_32(GRF_BASE + GRF_SOC_CON(7), 1269 REG_SOC_WMSK | store_grf_soc_con7); 1270 1271 for (i = 0; i < 4; i++) 1272 mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, 1273 REG_SOC_WMSK | store_grf_ddrc_con[i]); 1274 1275 mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); 1276 } 1277 1278 void cru_register_save(void) 1279 { 1280 int i; 1281 1282 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) 1283 store_cru[i / 4] = mmio_read_32(CRU_BASE + i); 1284 } 1285 1286 void cru_register_restore(void) 1287 { 1288 int i; 1289 1290 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { 1291 1292 /* 1293 * since DPLL, CRU_CLKSEL_CON6 have been restore in 1294 * dmc_resume, ABPLL will resote later, so skip them 1295 */ 1296 if ((i == CRU_CLKSEL_CON6) || 1297 (i >= CRU_PLL_CON(ABPLL_ID, 0) && 1298 i <= CRU_PLL_CON(DPLL_ID, 5))) 1299 continue; 1300 1301 if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || 1302 (i == CRU_PLL_CON(CPLL_ID, 2)) || 1303 (i == CRU_PLL_CON(GPLL_ID, 2)) || 1304 (i == CRU_PLL_CON(NPLL_ID, 2)) || 1305 (i == CRU_PLL_CON(VPLL_ID, 2))) 1306 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1307 /* 1308 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 1309 * not need do high 16bit mask 1310 */ 1311 else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) 1312 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1313 else 1314 mmio_write_32(CRU_BASE + i, 1315 REG_SOC_WMSK | store_cru[i / 4]); 1316 } 1317 } 1318 1319 void wdt_register_save(void) 1320 { 1321 int i; 1322 1323 for (i = 0; i < 2; i++) { 1324 store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); 1325 store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); 1326 } 1327 pmu_enable_watchdog0 = (uint8_t) store_wdt0[0] & 0x1; 1328 } 1329 1330 void wdt_register_restore(void) 1331 { 1332 int i; 1333 1334 for (i = 1; i >= 0; i--) { 1335 mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); 1336 mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); 1337 } 1338 1339 /* write 0x76 to cnt_restart to keep watchdog alive */ 1340 mmio_write_32(WDT0_BASE + 0x0c, 0x76); 1341 mmio_write_32(WDT1_BASE + 0x0c, 0x76); 1342 } 1343 1344 int rockchip_soc_sys_pwr_dm_suspend(void) 1345 { 1346 uint32_t wait_cnt = 0; 1347 uint32_t status = 0; 1348 1349 ddr_prepare_for_sys_suspend(); 1350 dmc_suspend(); 1351 pmu_scu_b_pwrdn(); 1352 1353 gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx); 1354 gicv3_distif_save(&dist_ctx); 1355 1356 /* need to save usbphy before shutdown PERIHP PD */ 1357 save_usbphy(); 1358 1359 pmu_power_domains_suspend(); 1360 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1361 BIT(PMU_CLR_ALIVE) | 1362 BIT(PMU_CLR_MSCH0) | 1363 BIT(PMU_CLR_MSCH1) | 1364 BIT(PMU_CLR_CCIM0) | 1365 BIT(PMU_CLR_CCIM1) | 1366 BIT(PMU_CLR_CENTER) | 1367 BIT(PMU_CLR_PERILP) | 1368 BIT(PMU_CLR_PERILPM0) | 1369 BIT(PMU_CLR_GIC)); 1370 set_pmu_rsthold(); 1371 sys_slp_config(); 1372 1373 m0_configure_execute_addr(M0PMU_BINCODE_BASE); 1374 m0_start(); 1375 1376 pmu_sgrf_rst_hld(); 1377 1378 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1379 ((uintptr_t)&pmu_cpuson_entrypoint >> 1380 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1381 1382 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1383 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1384 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1385 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1386 dsb(); 1387 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1388 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1389 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1390 while ((mmio_read_32(PMU_BASE + 1391 PMU_ADB400_ST) & status) != status) { 1392 wait_cnt++; 1393 if (wait_cnt >= MAX_WAIT_COUNT) { 1394 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1395 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1396 panic(); 1397 } 1398 udelay(1); 1399 } 1400 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1401 1402 wdt_register_save(); 1403 secure_watchdog_gate(); 1404 1405 /* 1406 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1407 * the last steps in suspend. 1408 */ 1409 disable_dvfs_plls(); 1410 disable_pwms(); 1411 disable_nodvfs_plls(); 1412 1413 suspend_apio(); 1414 suspend_gpio(); 1415 suspend_uart(); 1416 grf_register_save(); 1417 cru_register_save(); 1418 sram_save(); 1419 plat_rockchip_save_gpio(); 1420 1421 return 0; 1422 } 1423 1424 int rockchip_soc_sys_pwr_dm_resume(void) 1425 { 1426 uint32_t wait_cnt = 0; 1427 uint32_t status = 0; 1428 1429 plat_rockchip_restore_gpio(); 1430 cru_register_restore(); 1431 grf_register_restore(); 1432 wdt_register_restore(); 1433 resume_uart(); 1434 resume_apio(); 1435 resume_gpio(); 1436 enable_nodvfs_plls(); 1437 enable_pwms(); 1438 /* PWM regulators take time to come up; give 300us to be safe. */ 1439 udelay(300); 1440 enable_dvfs_plls(); 1441 1442 secure_sgrf_init(); 1443 secure_sgrf_ddr_rgn_init(); 1444 1445 /* restore clk_ddrc_bpll_src_en gate */ 1446 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1447 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1448 1449 /* 1450 * The wakeup status is not cleared by itself, we need to clear it 1451 * manually. Otherwise we will alway query some interrupt next time. 1452 * 1453 * NOTE: If the kernel needs to query this, we might want to stash it 1454 * somewhere. 1455 */ 1456 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1457 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1458 1459 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1460 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1461 CPU_BOOT_ADDR_WMASK); 1462 1463 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1464 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1465 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1466 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1467 dsb(); 1468 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1469 BIT(PMU_SCU_B_PWRDWN_EN)); 1470 1471 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1472 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1473 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1474 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1475 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1476 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1477 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1478 1479 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1480 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1481 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1482 1483 while ((mmio_read_32(PMU_BASE + 1484 PMU_ADB400_ST) & status)) { 1485 wait_cnt++; 1486 if (wait_cnt >= MAX_WAIT_COUNT) { 1487 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1488 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1489 panic(); 1490 } 1491 udelay(1); 1492 } 1493 1494 pmu_scu_b_pwrup(); 1495 pmu_power_domains_resume(); 1496 1497 restore_abpll(); 1498 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1499 BIT(PMU_CLR_ALIVE) | 1500 BIT(PMU_CLR_MSCH0) | 1501 BIT(PMU_CLR_MSCH1) | 1502 BIT(PMU_CLR_CCIM0) | 1503 BIT(PMU_CLR_CCIM1) | 1504 BIT(PMU_CLR_CENTER) | 1505 BIT(PMU_CLR_PERILP) | 1506 BIT(PMU_CLR_PERILPM0) | 1507 BIT(PMU_CLR_GIC)); 1508 1509 gicv3_distif_init_restore(&dist_ctx); 1510 gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx); 1511 plat_rockchip_gic_cpuif_enable(); 1512 m0_stop(); 1513 1514 restore_usbphy(); 1515 1516 ddr_prepare_for_sys_resume(); 1517 1518 return 0; 1519 } 1520 1521 void __dead2 rockchip_soc_soft_reset(void) 1522 { 1523 struct bl_aux_gpio_info *rst_gpio; 1524 1525 rst_gpio = plat_get_rockchip_gpio_reset(); 1526 1527 if (rst_gpio) { 1528 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1529 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1530 } else { 1531 soc_global_soft_reset(); 1532 } 1533 1534 while (1) 1535 ; 1536 } 1537 1538 void __dead2 rockchip_soc_system_off(void) 1539 { 1540 struct bl_aux_gpio_info *poweroff_gpio; 1541 1542 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1543 1544 if (poweroff_gpio) { 1545 /* 1546 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1547 * need to set this pin iomux back to gpio function 1548 */ 1549 if (poweroff_gpio->index == TSADC_INT_PIN) { 1550 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1551 GPIO1A6_IOMUX); 1552 } 1553 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1554 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1555 } else { 1556 WARN("Do nothing when system off\n"); 1557 } 1558 1559 while (1) 1560 ; 1561 } 1562 1563 void rockchip_plat_mmu_el3(void) 1564 { 1565 size_t sram_size; 1566 1567 /* sram.text size */ 1568 sram_size = (char *)&__bl31_sram_text_end - 1569 (char *)&__bl31_sram_text_start; 1570 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1571 (unsigned long)&__bl31_sram_text_start, 1572 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1573 1574 /* sram.data size */ 1575 sram_size = (char *)&__bl31_sram_data_end - 1576 (char *)&__bl31_sram_data_start; 1577 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1578 (unsigned long)&__bl31_sram_data_start, 1579 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1580 1581 sram_size = (char *)&__bl31_sram_stack_end - 1582 (char *)&__bl31_sram_stack_start; 1583 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1584 (unsigned long)&__bl31_sram_stack_start, 1585 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1586 1587 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1588 mmap_add_region((unsigned long)&__sram_incbin_start, 1589 (unsigned long)&__sram_incbin_start, 1590 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1591 } 1592 1593 void plat_rockchip_pmu_init(void) 1594 { 1595 uint32_t cpu; 1596 1597 rockchip_pd_lock_init(); 1598 1599 /* register requires 32bits mode, switch it to 32 bits */ 1600 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1601 1602 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1603 cpuson_flags[cpu] = 0; 1604 1605 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1606 clst_warmboot_data[cpu] = 0; 1607 1608 /* config cpu's warm boot address */ 1609 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1610 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1611 CPU_BOOT_ADDR_WMASK); 1612 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1613 1614 /* 1615 * Enable Schmitt trigger for better 32 kHz input signal, which is 1616 * important for suspend/resume reliability among other things. 1617 */ 1618 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1619 1620 init_pmu_counts(); 1621 1622 nonboot_cpus_off(); 1623 1624 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1625 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1626 } 1627