1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <platform_def.h> 12 13 #include <arch_helpers.h> 14 #include <bl31/bl31.h> 15 #include <common/debug.h> 16 #include <drivers/arm/gicv3.h> 17 #include <drivers/delay_timer.h> 18 #include <drivers/gpio.h> 19 #include <lib/bakery_lock.h> 20 #include <lib/mmio.h> 21 #include <plat/common/platform.h> 22 23 #include <dfs.h> 24 #include <m0_ctl.h> 25 #include <plat_params.h> 26 #include <plat_private.h> 27 #include <pmu.h> 28 #include <pmu_com.h> 29 #include <pwm.h> 30 #include <rk3399_def.h> 31 #include <secure.h> 32 #include <soc.h> 33 #include <suspend.h> 34 35 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 36 37 static uint32_t cpu_warm_boot_addr; 38 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 39 static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1]; 40 static uint32_t store_usbphy0[7]; 41 static uint32_t store_usbphy1[7]; 42 static uint32_t store_grf_io_vsel; 43 static uint32_t store_grf_soc_con0; 44 static uint32_t store_grf_soc_con1; 45 static uint32_t store_grf_soc_con2; 46 static uint32_t store_grf_soc_con3; 47 static uint32_t store_grf_soc_con4; 48 static uint32_t store_grf_soc_con7; 49 static uint32_t store_grf_ddrc_con[4]; 50 static uint32_t store_wdt0[2]; 51 static uint32_t store_wdt1[2]; 52 static gicv3_dist_ctx_t dist_ctx; 53 static gicv3_redist_ctx_t rdist_ctx; 54 55 /* 56 * There are two ways to powering on or off on core. 57 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 58 * it is core_pwr_pd mode 59 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 60 * then, if the core enter into wfi, it power domain will be 61 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 62 * so we need core_pm_cfg_info to distinguish which method be used now. 63 */ 64 65 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 66 #if USE_COHERENT_MEM 67 __attribute__ ((section("tzfw_coherent_mem"))) 68 #endif 69 ;/* coheront */ 70 71 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 72 { 73 uint32_t bus_id = BIT(bus); 74 uint32_t bus_req; 75 uint32_t wait_cnt = 0; 76 uint32_t bus_state, bus_ack; 77 78 if (state) 79 bus_req = BIT(bus); 80 else 81 bus_req = 0; 82 83 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 84 85 do { 86 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 87 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 88 if (bus_state == bus_req && bus_ack == bus_req) 89 break; 90 91 wait_cnt++; 92 udelay(1); 93 } while (wait_cnt < MAX_WAIT_COUNT); 94 95 if (bus_state != bus_req || bus_ack != bus_req) { 96 INFO("%s:st=%x(%x)\n", __func__, 97 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 98 bus_state); 99 INFO("%s:st=%x(%x)\n", __func__, 100 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 101 bus_ack); 102 } 103 } 104 105 struct pmu_slpdata_s pmu_slpdata; 106 107 static void qos_restore(void) 108 { 109 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 110 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 111 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 112 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 113 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 114 } 115 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 116 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 117 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 118 } 119 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 120 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 121 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 122 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 123 } 124 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 125 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 126 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 127 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 128 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 129 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 130 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 131 } 132 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 133 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 134 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 135 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 136 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 137 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 138 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 139 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 140 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 141 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 142 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 143 } 144 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 145 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 146 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 147 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 148 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 149 } 150 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 151 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 152 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 153 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 154 } 155 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 156 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 157 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 158 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 159 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 160 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 161 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 162 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 163 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 164 } 165 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 166 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 167 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 168 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 169 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 170 } 171 } 172 173 static void qos_save(void) 174 { 175 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 176 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 177 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 178 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 179 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 180 } 181 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 182 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 183 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 184 } 185 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 186 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 187 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 188 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 189 } 190 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 191 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 192 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 193 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 194 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 195 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 196 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 197 } 198 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 199 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 200 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 201 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 202 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 203 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 204 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 205 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 206 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 207 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 208 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 209 } 210 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 211 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 212 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 213 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 214 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 215 } 216 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 217 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 218 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 219 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 220 } 221 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 222 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 223 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 224 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 225 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 226 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 227 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 228 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 229 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 230 } 231 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 232 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 233 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 234 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 235 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 236 } 237 } 238 239 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 240 { 241 uint32_t state; 242 243 if (pmu_power_domain_st(pd_id) == pd_state) 244 goto out; 245 246 if (pd_state == pmu_pd_on) 247 pmu_power_domain_ctr(pd_id, pd_state); 248 249 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 250 251 switch (pd_id) { 252 case PD_GPU: 253 pmu_bus_idle_req(BUS_ID_GPU, state); 254 break; 255 case PD_VIO: 256 pmu_bus_idle_req(BUS_ID_VIO, state); 257 break; 258 case PD_ISP0: 259 pmu_bus_idle_req(BUS_ID_ISP0, state); 260 break; 261 case PD_ISP1: 262 pmu_bus_idle_req(BUS_ID_ISP1, state); 263 break; 264 case PD_VO: 265 pmu_bus_idle_req(BUS_ID_VOPB, state); 266 pmu_bus_idle_req(BUS_ID_VOPL, state); 267 break; 268 case PD_HDCP: 269 pmu_bus_idle_req(BUS_ID_HDCP, state); 270 break; 271 case PD_TCPD0: 272 break; 273 case PD_TCPD1: 274 break; 275 case PD_GMAC: 276 pmu_bus_idle_req(BUS_ID_GMAC, state); 277 break; 278 case PD_CCI: 279 pmu_bus_idle_req(BUS_ID_CCIM0, state); 280 pmu_bus_idle_req(BUS_ID_CCIM1, state); 281 break; 282 case PD_SD: 283 pmu_bus_idle_req(BUS_ID_SD, state); 284 break; 285 case PD_EMMC: 286 pmu_bus_idle_req(BUS_ID_EMMC, state); 287 break; 288 case PD_EDP: 289 pmu_bus_idle_req(BUS_ID_EDP, state); 290 break; 291 case PD_SDIOAUDIO: 292 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 293 break; 294 case PD_GIC: 295 pmu_bus_idle_req(BUS_ID_GIC, state); 296 break; 297 case PD_RGA: 298 pmu_bus_idle_req(BUS_ID_RGA, state); 299 break; 300 case PD_VCODEC: 301 pmu_bus_idle_req(BUS_ID_VCODEC, state); 302 break; 303 case PD_VDU: 304 pmu_bus_idle_req(BUS_ID_VDU, state); 305 break; 306 case PD_IEP: 307 pmu_bus_idle_req(BUS_ID_IEP, state); 308 break; 309 case PD_USB3: 310 pmu_bus_idle_req(BUS_ID_USB3, state); 311 break; 312 case PD_PERIHP: 313 pmu_bus_idle_req(BUS_ID_PERIHP, state); 314 break; 315 default: 316 /* Do nothing in default case */ 317 break; 318 } 319 320 if (pd_state == pmu_pd_off) 321 pmu_power_domain_ctr(pd_id, pd_state); 322 323 out: 324 return 0; 325 } 326 327 static uint32_t pmu_powerdomain_state; 328 329 static void pmu_power_domains_suspend(void) 330 { 331 clk_gate_con_save(); 332 clk_gate_con_disable(); 333 qos_save(); 334 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 335 pmu_set_power_domain(PD_GPU, pmu_pd_off); 336 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 337 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 338 pmu_set_power_domain(PD_VO, pmu_pd_off); 339 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 340 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 341 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 342 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 343 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 344 pmu_set_power_domain(PD_EDP, pmu_pd_off); 345 pmu_set_power_domain(PD_IEP, pmu_pd_off); 346 pmu_set_power_domain(PD_RGA, pmu_pd_off); 347 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 348 pmu_set_power_domain(PD_VDU, pmu_pd_off); 349 pmu_set_power_domain(PD_USB3, pmu_pd_off); 350 pmu_set_power_domain(PD_EMMC, pmu_pd_off); 351 pmu_set_power_domain(PD_VIO, pmu_pd_off); 352 pmu_set_power_domain(PD_SD, pmu_pd_off); 353 pmu_set_power_domain(PD_PERIHP, pmu_pd_off); 354 clk_gate_con_restore(); 355 } 356 357 static void pmu_power_domains_resume(void) 358 { 359 clk_gate_con_save(); 360 clk_gate_con_disable(); 361 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 362 pmu_set_power_domain(PD_VDU, pmu_pd_on); 363 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 364 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 365 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 366 pmu_set_power_domain(PD_RGA, pmu_pd_on); 367 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 368 pmu_set_power_domain(PD_IEP, pmu_pd_on); 369 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 370 pmu_set_power_domain(PD_EDP, pmu_pd_on); 371 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 372 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 373 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 374 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 375 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 376 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 377 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 378 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 379 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 380 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 381 if (!(pmu_powerdomain_state & BIT(PD_VO))) 382 pmu_set_power_domain(PD_VO, pmu_pd_on); 383 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 384 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 385 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 386 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 387 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 388 pmu_set_power_domain(PD_GPU, pmu_pd_on); 389 if (!(pmu_powerdomain_state & BIT(PD_USB3))) 390 pmu_set_power_domain(PD_USB3, pmu_pd_on); 391 if (!(pmu_powerdomain_state & BIT(PD_EMMC))) 392 pmu_set_power_domain(PD_EMMC, pmu_pd_on); 393 if (!(pmu_powerdomain_state & BIT(PD_VIO))) 394 pmu_set_power_domain(PD_VIO, pmu_pd_on); 395 if (!(pmu_powerdomain_state & BIT(PD_SD))) 396 pmu_set_power_domain(PD_SD, pmu_pd_on); 397 if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) 398 pmu_set_power_domain(PD_PERIHP, pmu_pd_on); 399 qos_restore(); 400 clk_gate_con_restore(); 401 } 402 403 void rk3399_flush_l2_b(void) 404 { 405 uint32_t wait_cnt = 0; 406 407 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 408 dsb(); 409 410 /* 411 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 412 * the enough margin. 413 */ 414 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 415 BIT(L2_FLUSHDONE_CLUSTER_B))) { 416 wait_cnt++; 417 udelay(10); 418 if (wait_cnt == 10000 / 10) 419 WARN("L2 cache flush on suspend took longer than 10ms\n"); 420 } 421 422 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 423 } 424 425 static void pmu_scu_b_pwrdn(void) 426 { 427 uint32_t wait_cnt = 0; 428 429 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 430 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 431 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 432 ERROR("%s: not all cpus is off\n", __func__); 433 return; 434 } 435 436 rk3399_flush_l2_b(); 437 438 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 439 440 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 441 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 442 wait_cnt++; 443 udelay(1); 444 if (wait_cnt >= MAX_WAIT_COUNT) 445 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 446 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 447 } 448 } 449 450 static void pmu_scu_b_pwrup(void) 451 { 452 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 453 } 454 455 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 456 { 457 assert(cpu_id < PLATFORM_CORE_COUNT); 458 return core_pm_cfg_info[cpu_id]; 459 } 460 461 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 462 { 463 assert(cpu_id < PLATFORM_CORE_COUNT); 464 core_pm_cfg_info[cpu_id] = value; 465 #if !USE_COHERENT_MEM 466 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 467 sizeof(uint32_t)); 468 #endif 469 } 470 471 static int cpus_power_domain_on(uint32_t cpu_id) 472 { 473 uint32_t cfg_info; 474 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 475 /* 476 * There are two ways to powering on or off on core. 477 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 478 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 479 * then, if the core enter into wfi, it power domain will be 480 * powered off automatically. 481 */ 482 483 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 484 485 if (cfg_info == core_pwr_pd) { 486 /* disable core_pm cfg */ 487 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 488 CORES_PM_DISABLE); 489 /* if the cores have be on, power off it firstly */ 490 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 491 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 492 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 493 } 494 495 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 496 } else { 497 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 498 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 499 return -EINVAL; 500 } 501 502 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 503 BIT(core_pm_sft_wakeup_en)); 504 dsb(); 505 } 506 507 return 0; 508 } 509 510 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 511 { 512 uint32_t cpu_pd; 513 uint32_t core_pm_value; 514 515 cpu_pd = PD_CPUL0 + cpu_id; 516 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 517 return 0; 518 519 if (pd_cfg == core_pwr_pd) { 520 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 521 return -EINVAL; 522 523 /* disable core_pm cfg */ 524 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 525 CORES_PM_DISABLE); 526 527 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 528 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 529 } else { 530 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 531 532 core_pm_value = BIT(core_pm_en); 533 if (pd_cfg == core_pwr_wfi_int) 534 core_pm_value |= BIT(core_pm_int_wakeup_en); 535 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 536 core_pm_value); 537 dsb(); 538 } 539 540 return 0; 541 } 542 543 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 544 { 545 uint32_t cpu_id = plat_my_core_pos(); 546 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 547 548 assert(cpu_id < PLATFORM_CORE_COUNT); 549 550 if (lvl_state == PLAT_MAX_OFF_STATE) { 551 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 552 pll_id = ALPLL_ID; 553 clst_st_msk = CLST_L_CPUS_MSK; 554 } else { 555 pll_id = ABPLL_ID; 556 clst_st_msk = CLST_B_CPUS_MSK << 557 PLATFORM_CLUSTER0_CORE_COUNT; 558 } 559 560 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 561 562 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 563 564 pmu_st &= clst_st_msk; 565 566 if (pmu_st == clst_st_chk_msk) { 567 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 568 PLL_SLOW_MODE); 569 570 clst_warmboot_data[pll_id] = PMU_CLST_RET; 571 572 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 573 pmu_st &= clst_st_msk; 574 if (pmu_st == clst_st_chk_msk) 575 return; 576 /* 577 * it is mean that others cpu is up again, 578 * we must resume the cfg at once. 579 */ 580 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 581 PLL_NOMAL_MODE); 582 clst_warmboot_data[pll_id] = 0; 583 } 584 } 585 } 586 587 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 588 { 589 uint32_t cpu_id = plat_my_core_pos(); 590 uint32_t pll_id, pll_st; 591 592 assert(cpu_id < PLATFORM_CORE_COUNT); 593 594 if (lvl_state == PLAT_MAX_OFF_STATE) { 595 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 596 pll_id = ALPLL_ID; 597 else 598 pll_id = ABPLL_ID; 599 600 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 601 PLL_MODE_SHIFT; 602 603 if (pll_st != NORMAL_MODE) { 604 WARN("%s: clst (%d) is in error mode (%d)\n", 605 __func__, pll_id, pll_st); 606 return -1; 607 } 608 } 609 610 return 0; 611 } 612 613 static void nonboot_cpus_off(void) 614 { 615 uint32_t boot_cpu, cpu; 616 617 boot_cpu = plat_my_core_pos(); 618 619 /* turn off noboot cpus */ 620 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 621 if (cpu == boot_cpu) 622 continue; 623 cpus_power_domain_off(cpu, core_pwr_pd); 624 } 625 } 626 627 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 628 { 629 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 630 631 assert(cpu_id < PLATFORM_CORE_COUNT); 632 assert(cpuson_flags[cpu_id] == 0); 633 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 634 cpuson_entry_point[cpu_id] = entrypoint; 635 dsb(); 636 637 cpus_power_domain_on(cpu_id); 638 639 return PSCI_E_SUCCESS; 640 } 641 642 int rockchip_soc_cores_pwr_dm_off(void) 643 { 644 uint32_t cpu_id = plat_my_core_pos(); 645 646 cpus_power_domain_off(cpu_id, core_pwr_wfi); 647 648 return PSCI_E_SUCCESS; 649 } 650 651 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 652 plat_local_state_t lvl_state) 653 { 654 if (lvl == MPIDR_AFFLVL1) { 655 clst_pwr_domain_suspend(lvl_state); 656 } 657 658 return PSCI_E_SUCCESS; 659 } 660 661 int rockchip_soc_cores_pwr_dm_suspend(void) 662 { 663 uint32_t cpu_id = plat_my_core_pos(); 664 665 assert(cpu_id < PLATFORM_CORE_COUNT); 666 assert(cpuson_flags[cpu_id] == 0); 667 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 668 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 669 dsb(); 670 671 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 672 673 return PSCI_E_SUCCESS; 674 } 675 676 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 677 { 678 if (lvl == MPIDR_AFFLVL1) { 679 clst_pwr_domain_suspend(lvl_state); 680 } 681 682 return PSCI_E_SUCCESS; 683 } 684 685 int rockchip_soc_cores_pwr_dm_on_finish(void) 686 { 687 uint32_t cpu_id = plat_my_core_pos(); 688 689 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 690 CORES_PM_DISABLE); 691 return PSCI_E_SUCCESS; 692 } 693 694 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 695 plat_local_state_t lvl_state) 696 { 697 if (lvl == MPIDR_AFFLVL1) { 698 clst_pwr_domain_resume(lvl_state); 699 } 700 701 return PSCI_E_SUCCESS; 702 } 703 704 int rockchip_soc_cores_pwr_dm_resume(void) 705 { 706 uint32_t cpu_id = plat_my_core_pos(); 707 708 /* Disable core_pm */ 709 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 710 711 return PSCI_E_SUCCESS; 712 } 713 714 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 715 { 716 if (lvl == MPIDR_AFFLVL1) { 717 clst_pwr_domain_resume(lvl_state); 718 } 719 720 return PSCI_E_SUCCESS; 721 } 722 723 /** 724 * init_pmu_counts - Init timing counts in the PMU register area 725 * 726 * At various points when we power up or down parts of the system we need 727 * a delay to wait for power / clocks to become stable. The PMU has counters 728 * to help software do the delay properly. Basically, it works like this: 729 * - Software sets up counter values 730 * - When software turns on something in the PMU, the counter kicks off 731 * - The hardware sets a bit automatically when the counter has finished and 732 * software knows that the initialization is done. 733 * 734 * It's software's job to setup these counters. The hardware power on default 735 * for these settings is conservative, setting everything to 0x5dc0 736 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 737 * 738 * Note that some of these counters are only really used at suspend/resume 739 * time (for instance, that's the only time we turn off/on the oscillator) and 740 * others are used during normal runtime (like turning on/off a CPU or GPU) but 741 * it doesn't hurt to init everything at boot. 742 * 743 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 744 * clock. While the 24 MHz clock can give us more precision, it's not always 745 * available (like when we turn the oscillator off at sleep time). The 746 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 747 * is that counts work like this: 748 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 749 * use the 24M OSC for counts 750 * ELSE 751 * use the 32K OSC for counts 752 * 753 * Notes: 754 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 755 * we always keep that 0. This apparently choose between using the PLL as 756 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 757 * should consider how it affects these counts (if at all). 758 * - The power_mode_en is documented to auto-clear automatically when we leave 759 * "power mode". That's why most clocks are on 24M. Only timings used when 760 * in "power mode" are 32k. 761 * - In some cases the kernel may override these counts. 762 * 763 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 764 * in power mode, we need to ensure that they are available. 765 */ 766 static void init_pmu_counts(void) 767 { 768 /* COUNTS FOR INSIDE POWER MODE */ 769 770 /* 771 * From limited testing, need PMU stable >= 2ms, but go overkill 772 * and choose 30 ms to match testing on past SoCs. Also let 773 * OSC have 30 ms for stabilization. 774 */ 775 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 776 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 777 778 /* Unclear what these should be; try 3 ms */ 779 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 780 781 /* Unclear what this should be, but set the default explicitly */ 782 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 783 784 /* COUNTS FOR OUTSIDE POWER MODE */ 785 786 /* Put something sorta conservative here until we know better */ 787 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 788 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 789 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 790 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 791 792 /* 793 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 794 * M0 code run in SRAM, and we need it to check whether cpu enter 795 * FSM status, so we must wait M0 finish their code and enter WFI, 796 * then we can shutdown SRAM, according FSM order: 797 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 798 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 799 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 800 */ 801 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 802 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 803 804 /* 805 * Set CPU/GPU to 1 us. 806 * 807 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 808 * counts here. After all ATF controls all these other bits and also 809 * chooses which clock these counters use. 810 */ 811 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 812 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 813 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 814 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 815 } 816 817 static uint32_t clk_ddrc_save; 818 819 static void sys_slp_config(void) 820 { 821 uint32_t slp_mode_cfg = 0; 822 823 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 824 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 825 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 826 827 prepare_abpll_for_ddrctrl(); 828 sram_func_set_ddrctl_pll(ABPLL_ID); 829 830 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 831 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 832 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 833 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 834 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 835 836 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 837 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 838 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 839 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 840 841 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 842 BIT(PMU_WKUP_RST_EN) | 843 BIT(PMU_INPUT_CLAMP_EN) | 844 BIT(PMU_POWER_OFF_REQ_CFG) | 845 BIT(PMU_CPU0_PD_EN) | 846 BIT(PMU_L2_FLUSH_EN) | 847 BIT(PMU_L2_IDLE_EN) | 848 BIT(PMU_SCU_PD_EN) | 849 BIT(PMU_CCI_PD_EN) | 850 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 851 BIT(PMU_ALIVE_USE_LF) | 852 BIT(PMU_SREF0_ENTER_EN) | 853 BIT(PMU_SREF1_ENTER_EN) | 854 BIT(PMU_DDRC0_GATING_EN) | 855 BIT(PMU_DDRC1_GATING_EN) | 856 BIT(PMU_DDRIO0_RET_EN) | 857 BIT(PMU_DDRIO0_RET_DE_REQ) | 858 BIT(PMU_DDRIO1_RET_EN) | 859 BIT(PMU_DDRIO1_RET_DE_REQ) | 860 BIT(PMU_CENTER_PD_EN) | 861 BIT(PMU_PERILP_PD_EN) | 862 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 863 BIT(PMU_PLL_PD_EN) | 864 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 865 BIT(PMU_OSC_DIS) | 866 BIT(PMU_PMU_USE_LF); 867 868 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 869 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 870 871 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 872 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 873 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 874 } 875 876 static void set_hw_idle(uint32_t hw_idle) 877 { 878 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 879 } 880 881 static void clr_hw_idle(uint32_t hw_idle) 882 { 883 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 884 } 885 886 static uint32_t iomux_status[12]; 887 static uint32_t pull_mode_status[12]; 888 static uint32_t gpio_direction[3]; 889 static uint32_t gpio_2_4_clk_gate; 890 891 static void suspend_apio(void) 892 { 893 struct apio_info *suspend_apio; 894 int i; 895 896 suspend_apio = plat_get_rockchip_suspend_apio(); 897 898 if (!suspend_apio) 899 return; 900 901 /* save gpio2 ~ gpio4 iomux and pull mode */ 902 for (i = 0; i < 12; i++) { 903 iomux_status[i] = mmio_read_32(GRF_BASE + 904 GRF_GPIO2A_IOMUX + i * 4); 905 pull_mode_status[i] = mmio_read_32(GRF_BASE + 906 GRF_GPIO2A_P + i * 4); 907 } 908 909 /* store gpio2 ~ gpio4 clock gate state */ 910 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 911 PCLK_GPIO2_GATE_SHIFT) & 0x07; 912 913 /* enable gpio2 ~ gpio4 clock gate */ 914 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 915 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 916 917 /* save gpio2 ~ gpio4 direction */ 918 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 919 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 920 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 921 922 /* apio1 charge gpio3a0 ~ gpio3c7 */ 923 if (suspend_apio->apio1) { 924 925 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 926 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 927 REG_SOC_WMSK | GRF_IOMUX_GPIO); 928 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 929 REG_SOC_WMSK | GRF_IOMUX_GPIO); 930 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 931 REG_SOC_WMSK | GRF_IOMUX_GPIO); 932 933 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 934 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 935 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 936 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 937 938 /* set gpio3a0 ~ gpio3c7 to input */ 939 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 940 } 941 942 /* apio2 charge gpio2a0 ~ gpio2b4 */ 943 if (suspend_apio->apio2) { 944 945 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 946 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 947 REG_SOC_WMSK | GRF_IOMUX_GPIO); 948 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 949 REG_SOC_WMSK | GRF_IOMUX_GPIO); 950 951 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 952 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 953 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 954 955 /* set gpio2a0 ~ gpio2b4 to input */ 956 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 957 } 958 959 /* apio3 charge gpio2c0 ~ gpio2d4*/ 960 if (suspend_apio->apio3) { 961 962 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 963 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 964 REG_SOC_WMSK | GRF_IOMUX_GPIO); 965 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 966 REG_SOC_WMSK | GRF_IOMUX_GPIO); 967 968 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 969 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 970 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 971 972 /* set gpio2c0 ~ gpio2d4 to input */ 973 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 974 } 975 976 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 977 if (suspend_apio->apio4) { 978 979 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 980 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 981 REG_SOC_WMSK | GRF_IOMUX_GPIO); 982 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 983 REG_SOC_WMSK | GRF_IOMUX_GPIO); 984 985 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 986 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 987 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 988 989 /* set gpio4c0 ~ gpio4d6 to input */ 990 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 991 } 992 993 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 994 if (suspend_apio->apio5) { 995 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 996 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 997 REG_SOC_WMSK | GRF_IOMUX_GPIO); 998 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 999 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1000 1001 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 1002 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1003 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1004 1005 /* set gpio4c0 ~ gpio4d6 to input */ 1006 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1007 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1008 } 1009 } 1010 1011 static void resume_apio(void) 1012 { 1013 struct apio_info *suspend_apio; 1014 int i; 1015 1016 suspend_apio = plat_get_rockchip_suspend_apio(); 1017 1018 if (!suspend_apio) 1019 return; 1020 1021 for (i = 0; i < 12; i++) { 1022 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1023 REG_SOC_WMSK | pull_mode_status[i]); 1024 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1025 REG_SOC_WMSK | iomux_status[i]); 1026 } 1027 1028 /* set gpio2 ~ gpio4 direction back to store value */ 1029 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1030 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1031 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1032 1033 /* set gpio2 ~ gpio4 clock gate back to store value */ 1034 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1035 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1036 PCLK_GPIO2_GATE_SHIFT)); 1037 } 1038 1039 static void suspend_gpio(void) 1040 { 1041 struct gpio_info *suspend_gpio; 1042 uint32_t count; 1043 int i; 1044 1045 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1046 1047 for (i = 0; i < count; i++) { 1048 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1049 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1050 udelay(1); 1051 } 1052 } 1053 1054 static void resume_gpio(void) 1055 { 1056 struct gpio_info *suspend_gpio; 1057 uint32_t count; 1058 int i; 1059 1060 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1061 1062 for (i = count - 1; i >= 0; i--) { 1063 gpio_set_value(suspend_gpio[i].index, 1064 !suspend_gpio[i].polarity); 1065 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1066 udelay(1); 1067 } 1068 } 1069 1070 void sram_save(void) 1071 { 1072 size_t text_size = (char *)&__bl31_sram_text_real_end - 1073 (char *)&__bl31_sram_text_start; 1074 size_t data_size = (char *)&__bl31_sram_data_real_end - 1075 (char *)&__bl31_sram_data_start; 1076 size_t incbin_size = (char *)&__sram_incbin_real_end - 1077 (char *)&__sram_incbin_start; 1078 1079 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1080 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1081 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1082 incbin_size); 1083 } 1084 1085 void sram_restore(void) 1086 { 1087 size_t text_size = (char *)&__bl31_sram_text_real_end - 1088 (char *)&__bl31_sram_text_start; 1089 size_t data_size = (char *)&__bl31_sram_data_real_end - 1090 (char *)&__bl31_sram_data_start; 1091 size_t incbin_size = (char *)&__sram_incbin_real_end - 1092 (char *)&__sram_incbin_start; 1093 1094 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1095 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1096 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1097 incbin_size); 1098 } 1099 1100 struct uart_debug { 1101 uint32_t uart_dll; 1102 uint32_t uart_dlh; 1103 uint32_t uart_ier; 1104 uint32_t uart_fcr; 1105 uint32_t uart_mcr; 1106 uint32_t uart_lcr; 1107 }; 1108 1109 #define UART_DLL 0x00 1110 #define UART_DLH 0x04 1111 #define UART_IER 0x04 1112 #define UART_FCR 0x08 1113 #define UART_LCR 0x0c 1114 #define UART_MCR 0x10 1115 #define UARTSRR 0x88 1116 1117 #define UART_RESET BIT(0) 1118 #define UARTFCR_FIFOEN BIT(0) 1119 #define RCVR_FIFO_RESET BIT(1) 1120 #define XMIT_FIFO_RESET BIT(2) 1121 #define DIAGNOSTIC_MODE BIT(4) 1122 #define UARTLCR_DLAB BIT(7) 1123 1124 static struct uart_debug uart_save; 1125 1126 void suspend_uart(void) 1127 { 1128 uart_save.uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1129 uart_save.uart_ier = mmio_read_32(PLAT_RK_UART_BASE + UART_IER); 1130 uart_save.uart_mcr = mmio_read_32(PLAT_RK_UART_BASE + UART_MCR); 1131 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, 1132 uart_save.uart_lcr | UARTLCR_DLAB); 1133 uart_save.uart_dll = mmio_read_32(PLAT_RK_UART_BASE + UART_DLL); 1134 uart_save.uart_dlh = mmio_read_32(PLAT_RK_UART_BASE + UART_DLH); 1135 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1136 } 1137 1138 void resume_uart(void) 1139 { 1140 uint32_t uart_lcr; 1141 1142 mmio_write_32(PLAT_RK_UART_BASE + UARTSRR, 1143 XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); 1144 1145 uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1146 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, DIAGNOSTIC_MODE); 1147 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_lcr | UARTLCR_DLAB); 1148 mmio_write_32(PLAT_RK_UART_BASE + UART_DLL, uart_save.uart_dll); 1149 mmio_write_32(PLAT_RK_UART_BASE + UART_DLH, uart_save.uart_dlh); 1150 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1151 mmio_write_32(PLAT_RK_UART_BASE + UART_IER, uart_save.uart_ier); 1152 mmio_write_32(PLAT_RK_UART_BASE + UART_FCR, UARTFCR_FIFOEN); 1153 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, uart_save.uart_mcr); 1154 } 1155 1156 void save_usbphy(void) 1157 { 1158 store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); 1159 store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); 1160 store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); 1161 store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); 1162 store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); 1163 store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); 1164 store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); 1165 1166 store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); 1167 store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); 1168 store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); 1169 store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); 1170 store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); 1171 store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); 1172 store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); 1173 } 1174 1175 void restore_usbphy(void) 1176 { 1177 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, 1178 REG_SOC_WMSK | store_usbphy0[0]); 1179 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, 1180 REG_SOC_WMSK | store_usbphy0[1]); 1181 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, 1182 REG_SOC_WMSK | store_usbphy0[2]); 1183 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, 1184 REG_SOC_WMSK | store_usbphy0[3]); 1185 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, 1186 REG_SOC_WMSK | store_usbphy0[4]); 1187 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, 1188 REG_SOC_WMSK | store_usbphy0[5]); 1189 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, 1190 REG_SOC_WMSK | store_usbphy0[6]); 1191 1192 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, 1193 REG_SOC_WMSK | store_usbphy1[0]); 1194 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, 1195 REG_SOC_WMSK | store_usbphy1[1]); 1196 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, 1197 REG_SOC_WMSK | store_usbphy1[2]); 1198 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, 1199 REG_SOC_WMSK | store_usbphy1[3]); 1200 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, 1201 REG_SOC_WMSK | store_usbphy1[4]); 1202 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, 1203 REG_SOC_WMSK | store_usbphy1[5]); 1204 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, 1205 REG_SOC_WMSK | store_usbphy1[6]); 1206 } 1207 1208 void grf_register_save(void) 1209 { 1210 int i; 1211 1212 store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); 1213 store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); 1214 store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); 1215 store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); 1216 store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); 1217 store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); 1218 1219 for (i = 0; i < 4; i++) 1220 store_grf_ddrc_con[i] = 1221 mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); 1222 1223 store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); 1224 } 1225 1226 void grf_register_restore(void) 1227 { 1228 int i; 1229 1230 mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 1231 REG_SOC_WMSK | store_grf_soc_con0); 1232 mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 1233 REG_SOC_WMSK | store_grf_soc_con1); 1234 mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 1235 REG_SOC_WMSK | store_grf_soc_con2); 1236 mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 1237 REG_SOC_WMSK | store_grf_soc_con3); 1238 mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 1239 REG_SOC_WMSK | store_grf_soc_con4); 1240 mmio_write_32(GRF_BASE + GRF_SOC_CON(7), 1241 REG_SOC_WMSK | store_grf_soc_con7); 1242 1243 for (i = 0; i < 4; i++) 1244 mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, 1245 REG_SOC_WMSK | store_grf_ddrc_con[i]); 1246 1247 mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); 1248 } 1249 1250 void cru_register_save(void) 1251 { 1252 int i; 1253 1254 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) 1255 store_cru[i / 4] = mmio_read_32(CRU_BASE + i); 1256 } 1257 1258 void cru_register_restore(void) 1259 { 1260 int i; 1261 1262 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { 1263 1264 /* 1265 * since DPLL, CRU_CLKSEL_CON6 have been restore in 1266 * dmc_resume, ABPLL will resote later, so skip them 1267 */ 1268 if ((i == CRU_CLKSEL_CON6) || 1269 (i >= CRU_PLL_CON(ABPLL_ID, 0) && 1270 i <= CRU_PLL_CON(DPLL_ID, 5))) 1271 continue; 1272 1273 if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || 1274 (i == CRU_PLL_CON(CPLL_ID, 2)) || 1275 (i == CRU_PLL_CON(GPLL_ID, 2)) || 1276 (i == CRU_PLL_CON(NPLL_ID, 2)) || 1277 (i == CRU_PLL_CON(VPLL_ID, 2))) 1278 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1279 /* 1280 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 1281 * not need do high 16bit mask 1282 */ 1283 else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) 1284 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1285 else 1286 mmio_write_32(CRU_BASE + i, 1287 REG_SOC_WMSK | store_cru[i / 4]); 1288 } 1289 } 1290 1291 void wdt_register_save(void) 1292 { 1293 int i; 1294 1295 for (i = 0; i < 2; i++) { 1296 store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); 1297 store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); 1298 } 1299 } 1300 1301 void wdt_register_restore(void) 1302 { 1303 int i; 1304 1305 for (i = 1; i >= 0; i--) { 1306 mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); 1307 mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); 1308 } 1309 1310 /* write 0x76 to cnt_restart to keep watchdog alive */ 1311 mmio_write_32(WDT0_BASE + 0x0c, 0x76); 1312 mmio_write_32(WDT1_BASE + 0x0c, 0x76); 1313 } 1314 1315 int rockchip_soc_sys_pwr_dm_suspend(void) 1316 { 1317 uint32_t wait_cnt = 0; 1318 uint32_t status = 0; 1319 1320 ddr_prepare_for_sys_suspend(); 1321 dmc_suspend(); 1322 pmu_scu_b_pwrdn(); 1323 1324 gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx); 1325 gicv3_distif_save(&dist_ctx); 1326 1327 /* need to save usbphy before shutdown PERIHP PD */ 1328 save_usbphy(); 1329 1330 pmu_power_domains_suspend(); 1331 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1332 BIT(PMU_CLR_ALIVE) | 1333 BIT(PMU_CLR_MSCH0) | 1334 BIT(PMU_CLR_MSCH1) | 1335 BIT(PMU_CLR_CCIM0) | 1336 BIT(PMU_CLR_CCIM1) | 1337 BIT(PMU_CLR_CENTER) | 1338 BIT(PMU_CLR_PERILP) | 1339 BIT(PMU_CLR_PERILPM0) | 1340 BIT(PMU_CLR_GIC)); 1341 set_pmu_rsthold(); 1342 sys_slp_config(); 1343 1344 m0_configure_execute_addr(M0PMU_BINCODE_BASE); 1345 m0_start(); 1346 1347 pmu_sgrf_rst_hld(); 1348 1349 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1350 ((uintptr_t)&pmu_cpuson_entrypoint >> 1351 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1352 1353 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1354 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1355 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1356 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1357 dsb(); 1358 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1359 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1360 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1361 while ((mmio_read_32(PMU_BASE + 1362 PMU_ADB400_ST) & status) != status) { 1363 wait_cnt++; 1364 if (wait_cnt >= MAX_WAIT_COUNT) { 1365 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1366 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1367 panic(); 1368 } 1369 udelay(1); 1370 } 1371 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1372 1373 wdt_register_save(); 1374 secure_watchdog_gate(); 1375 1376 /* 1377 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1378 * the last steps in suspend. 1379 */ 1380 disable_dvfs_plls(); 1381 disable_pwms(); 1382 disable_nodvfs_plls(); 1383 1384 suspend_apio(); 1385 suspend_gpio(); 1386 suspend_uart(); 1387 grf_register_save(); 1388 cru_register_save(); 1389 sram_save(); 1390 plat_rockchip_save_gpio(); 1391 1392 return 0; 1393 } 1394 1395 int rockchip_soc_sys_pwr_dm_resume(void) 1396 { 1397 uint32_t wait_cnt = 0; 1398 uint32_t status = 0; 1399 1400 plat_rockchip_restore_gpio(); 1401 cru_register_restore(); 1402 grf_register_restore(); 1403 wdt_register_restore(); 1404 resume_uart(); 1405 resume_apio(); 1406 resume_gpio(); 1407 enable_nodvfs_plls(); 1408 enable_pwms(); 1409 /* PWM regulators take time to come up; give 300us to be safe. */ 1410 udelay(300); 1411 enable_dvfs_plls(); 1412 1413 secure_sgrf_init(); 1414 secure_sgrf_ddr_rgn_init(); 1415 1416 /* restore clk_ddrc_bpll_src_en gate */ 1417 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1418 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1419 1420 /* 1421 * The wakeup status is not cleared by itself, we need to clear it 1422 * manually. Otherwise we will alway query some interrupt next time. 1423 * 1424 * NOTE: If the kernel needs to query this, we might want to stash it 1425 * somewhere. 1426 */ 1427 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1428 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1429 1430 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1431 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1432 CPU_BOOT_ADDR_WMASK); 1433 1434 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1435 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1436 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1437 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1438 dsb(); 1439 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1440 BIT(PMU_SCU_B_PWRDWN_EN)); 1441 1442 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1443 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1444 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1445 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1446 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1447 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1448 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1449 1450 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1451 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1452 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1453 1454 while ((mmio_read_32(PMU_BASE + 1455 PMU_ADB400_ST) & status)) { 1456 wait_cnt++; 1457 if (wait_cnt >= MAX_WAIT_COUNT) { 1458 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1459 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1460 panic(); 1461 } 1462 udelay(1); 1463 } 1464 1465 pmu_scu_b_pwrup(); 1466 pmu_power_domains_resume(); 1467 1468 restore_abpll(); 1469 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1470 BIT(PMU_CLR_ALIVE) | 1471 BIT(PMU_CLR_MSCH0) | 1472 BIT(PMU_CLR_MSCH1) | 1473 BIT(PMU_CLR_CCIM0) | 1474 BIT(PMU_CLR_CCIM1) | 1475 BIT(PMU_CLR_CENTER) | 1476 BIT(PMU_CLR_PERILP) | 1477 BIT(PMU_CLR_PERILPM0) | 1478 BIT(PMU_CLR_GIC)); 1479 1480 gicv3_distif_init_restore(&dist_ctx); 1481 gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx); 1482 plat_rockchip_gic_cpuif_enable(); 1483 m0_stop(); 1484 1485 restore_usbphy(); 1486 1487 ddr_prepare_for_sys_resume(); 1488 1489 return 0; 1490 } 1491 1492 void __dead2 rockchip_soc_soft_reset(void) 1493 { 1494 struct gpio_info *rst_gpio; 1495 1496 rst_gpio = plat_get_rockchip_gpio_reset(); 1497 1498 if (rst_gpio) { 1499 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1500 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1501 } else { 1502 soc_global_soft_reset(); 1503 } 1504 1505 while (1) 1506 ; 1507 } 1508 1509 void __dead2 rockchip_soc_system_off(void) 1510 { 1511 struct gpio_info *poweroff_gpio; 1512 1513 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1514 1515 if (poweroff_gpio) { 1516 /* 1517 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1518 * need to set this pin iomux back to gpio function 1519 */ 1520 if (poweroff_gpio->index == TSADC_INT_PIN) { 1521 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1522 GPIO1A6_IOMUX); 1523 } 1524 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1525 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1526 } else { 1527 WARN("Do nothing when system off\n"); 1528 } 1529 1530 while (1) 1531 ; 1532 } 1533 1534 void rockchip_plat_mmu_el3(void) 1535 { 1536 size_t sram_size; 1537 1538 /* sram.text size */ 1539 sram_size = (char *)&__bl31_sram_text_end - 1540 (char *)&__bl31_sram_text_start; 1541 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1542 (unsigned long)&__bl31_sram_text_start, 1543 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1544 1545 /* sram.data size */ 1546 sram_size = (char *)&__bl31_sram_data_end - 1547 (char *)&__bl31_sram_data_start; 1548 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1549 (unsigned long)&__bl31_sram_data_start, 1550 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1551 1552 sram_size = (char *)&__bl31_sram_stack_end - 1553 (char *)&__bl31_sram_stack_start; 1554 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1555 (unsigned long)&__bl31_sram_stack_start, 1556 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1557 1558 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1559 mmap_add_region((unsigned long)&__sram_incbin_start, 1560 (unsigned long)&__sram_incbin_start, 1561 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1562 } 1563 1564 void plat_rockchip_pmu_init(void) 1565 { 1566 uint32_t cpu; 1567 1568 rockchip_pd_lock_init(); 1569 1570 /* register requires 32bits mode, switch it to 32 bits */ 1571 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1572 1573 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1574 cpuson_flags[cpu] = 0; 1575 1576 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1577 clst_warmboot_data[cpu] = 0; 1578 1579 /* config cpu's warm boot address */ 1580 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1581 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1582 CPU_BOOT_ADDR_WMASK); 1583 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1584 1585 /* 1586 * Enable Schmitt trigger for better 32 kHz input signal, which is 1587 * important for suspend/resume reliability among other things. 1588 */ 1589 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1590 1591 init_pmu_counts(); 1592 1593 nonboot_cpus_off(); 1594 1595 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1596 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1597 } 1598