1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <debug.h> 11 #include <delay_timer.h> 12 #include <dfs.h> 13 #include <errno.h> 14 #include <gpio.h> 15 #include <mmio.h> 16 #include <m0_ctl.h> 17 #include <platform.h> 18 #include <platform_def.h> 19 #include <plat_params.h> 20 #include <plat_private.h> 21 #include <rk3399_def.h> 22 #include <pmu_sram.h> 23 #include <secure.h> 24 #include <soc.h> 25 #include <pmu.h> 26 #include <pmu_com.h> 27 #include <pwm.h> 28 #include <bl31.h> 29 #include <suspend.h> 30 31 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 32 33 static struct psram_data_t *psram_sleep_cfg = 34 (struct psram_data_t *)PSRAM_DT_BASE; 35 36 static uint32_t cpu_warm_boot_addr; 37 38 /* 39 * There are two ways to powering on or off on core. 40 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 41 * it is core_pwr_pd mode 42 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 43 * then, if the core enter into wfi, it power domain will be 44 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 45 * so we need core_pm_cfg_info to distinguish which method be used now. 46 */ 47 48 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 49 #if USE_COHERENT_MEM 50 __attribute__ ((section("tzfw_coherent_mem"))) 51 #endif 52 ;/* coheront */ 53 54 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 55 { 56 uint32_t bus_id = BIT(bus); 57 uint32_t bus_req; 58 uint32_t wait_cnt = 0; 59 uint32_t bus_state, bus_ack; 60 61 if (state) 62 bus_req = BIT(bus); 63 else 64 bus_req = 0; 65 66 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 67 68 do { 69 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 70 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 71 wait_cnt++; 72 } while ((bus_state != bus_req || bus_ack != bus_req) && 73 (wait_cnt < MAX_WAIT_COUNT)); 74 75 if (bus_state != bus_req || bus_ack != bus_req) { 76 INFO("%s:st=%x(%x)\n", __func__, 77 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 78 bus_state); 79 INFO("%s:st=%x(%x)\n", __func__, 80 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 81 bus_ack); 82 } 83 } 84 85 struct pmu_slpdata_s pmu_slpdata; 86 87 static void qos_save(void) 88 { 89 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 90 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 91 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 92 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 93 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 94 } 95 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 96 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 97 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 98 } 99 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 100 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 101 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 102 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 103 } 104 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 105 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 106 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 107 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 108 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 109 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 110 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 111 } 112 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 113 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 114 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 115 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 116 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 117 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 118 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 119 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 120 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 121 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 122 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 123 } 124 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 125 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 126 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 127 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 128 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 129 } 130 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 131 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 132 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 133 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 134 } 135 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 136 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 137 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 138 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 139 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 140 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 141 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 142 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 143 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 144 } 145 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 146 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 147 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 148 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 149 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 150 } 151 } 152 153 static void qos_restore(void) 154 { 155 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 156 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 157 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 158 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 159 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 160 } 161 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 162 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 163 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 164 } 165 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 166 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 167 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 168 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 169 } 170 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 171 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 172 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 173 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 174 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 175 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 176 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 177 } 178 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 179 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 180 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 181 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 182 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 183 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 184 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 185 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 186 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 187 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 188 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 189 } 190 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 191 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 192 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 193 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 194 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 195 } 196 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 197 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 198 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 199 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 200 } 201 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 202 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 203 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 204 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 205 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 206 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 207 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 208 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 209 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 210 } 211 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 212 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 213 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 214 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 215 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 216 } 217 } 218 219 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 220 { 221 uint32_t state; 222 223 if (pmu_power_domain_st(pd_id) == pd_state) 224 goto out; 225 226 if (pd_state == pmu_pd_on) 227 pmu_power_domain_ctr(pd_id, pd_state); 228 229 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 230 231 switch (pd_id) { 232 case PD_GPU: 233 pmu_bus_idle_req(BUS_ID_GPU, state); 234 break; 235 case PD_VIO: 236 pmu_bus_idle_req(BUS_ID_VIO, state); 237 break; 238 case PD_ISP0: 239 pmu_bus_idle_req(BUS_ID_ISP0, state); 240 break; 241 case PD_ISP1: 242 pmu_bus_idle_req(BUS_ID_ISP1, state); 243 break; 244 case PD_VO: 245 pmu_bus_idle_req(BUS_ID_VOPB, state); 246 pmu_bus_idle_req(BUS_ID_VOPL, state); 247 break; 248 case PD_HDCP: 249 pmu_bus_idle_req(BUS_ID_HDCP, state); 250 break; 251 case PD_TCPD0: 252 break; 253 case PD_TCPD1: 254 break; 255 case PD_GMAC: 256 pmu_bus_idle_req(BUS_ID_GMAC, state); 257 break; 258 case PD_CCI: 259 pmu_bus_idle_req(BUS_ID_CCIM0, state); 260 pmu_bus_idle_req(BUS_ID_CCIM1, state); 261 break; 262 case PD_SD: 263 pmu_bus_idle_req(BUS_ID_SD, state); 264 break; 265 case PD_EMMC: 266 pmu_bus_idle_req(BUS_ID_EMMC, state); 267 break; 268 case PD_EDP: 269 pmu_bus_idle_req(BUS_ID_EDP, state); 270 break; 271 case PD_SDIOAUDIO: 272 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 273 break; 274 case PD_GIC: 275 pmu_bus_idle_req(BUS_ID_GIC, state); 276 break; 277 case PD_RGA: 278 pmu_bus_idle_req(BUS_ID_RGA, state); 279 break; 280 case PD_VCODEC: 281 pmu_bus_idle_req(BUS_ID_VCODEC, state); 282 break; 283 case PD_VDU: 284 pmu_bus_idle_req(BUS_ID_VDU, state); 285 break; 286 case PD_IEP: 287 pmu_bus_idle_req(BUS_ID_IEP, state); 288 break; 289 case PD_USB3: 290 pmu_bus_idle_req(BUS_ID_USB3, state); 291 break; 292 case PD_PERIHP: 293 pmu_bus_idle_req(BUS_ID_PERIHP, state); 294 break; 295 default: 296 break; 297 } 298 299 if (pd_state == pmu_pd_off) 300 pmu_power_domain_ctr(pd_id, pd_state); 301 302 out: 303 return 0; 304 } 305 306 static uint32_t pmu_powerdomain_state; 307 308 static void pmu_power_domains_suspend(void) 309 { 310 clk_gate_con_save(); 311 clk_gate_con_disable(); 312 qos_save(); 313 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 314 pmu_set_power_domain(PD_GPU, pmu_pd_off); 315 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 316 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 317 pmu_set_power_domain(PD_VO, pmu_pd_off); 318 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 319 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 320 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 321 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 322 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 323 pmu_set_power_domain(PD_EDP, pmu_pd_off); 324 pmu_set_power_domain(PD_IEP, pmu_pd_off); 325 pmu_set_power_domain(PD_RGA, pmu_pd_off); 326 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 327 pmu_set_power_domain(PD_VDU, pmu_pd_off); 328 clk_gate_con_restore(); 329 } 330 331 static void pmu_power_domains_resume(void) 332 { 333 clk_gate_con_save(); 334 clk_gate_con_disable(); 335 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 336 pmu_set_power_domain(PD_VDU, pmu_pd_on); 337 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 338 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 339 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 340 pmu_set_power_domain(PD_RGA, pmu_pd_on); 341 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 342 pmu_set_power_domain(PD_IEP, pmu_pd_on); 343 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 344 pmu_set_power_domain(PD_EDP, pmu_pd_on); 345 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 346 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 347 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 348 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 349 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 350 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 351 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 352 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 353 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 354 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 355 if (!(pmu_powerdomain_state & BIT(PD_VO))) 356 pmu_set_power_domain(PD_VO, pmu_pd_on); 357 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 358 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 360 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 361 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 362 pmu_set_power_domain(PD_GPU, pmu_pd_on); 363 qos_restore(); 364 clk_gate_con_restore(); 365 } 366 367 void rk3399_flash_l2_b(void) 368 { 369 uint32_t wait_cnt = 0; 370 371 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 372 dsb(); 373 374 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 375 BIT(L2_FLUSHDONE_CLUSTER_B))) { 376 wait_cnt++; 377 if (wait_cnt >= MAX_WAIT_COUNT) 378 WARN("%s:reg %x,wait\n", __func__, 379 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 380 } 381 382 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 383 } 384 385 static void pmu_scu_b_pwrdn(void) 386 { 387 uint32_t wait_cnt = 0; 388 389 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 390 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 391 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 392 ERROR("%s: not all cpus is off\n", __func__); 393 return; 394 } 395 396 rk3399_flash_l2_b(); 397 398 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 399 400 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 401 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 402 wait_cnt++; 403 if (wait_cnt >= MAX_WAIT_COUNT) 404 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 405 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 406 } 407 } 408 409 static void pmu_scu_b_pwrup(void) 410 { 411 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 412 } 413 414 void plat_rockchip_pmusram_prepare(void) 415 { 416 uint32_t *sram_dst, *sram_src; 417 size_t sram_size; 418 419 /* 420 * pmu sram code and data prepare 421 */ 422 sram_dst = (uint32_t *)PMUSRAM_BASE; 423 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; 424 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - 425 (uint32_t *)sram_src; 426 427 u32_align_cpy(sram_dst, sram_src, sram_size); 428 429 psram_sleep_cfg->sp = PSRAM_DT_BASE; 430 } 431 432 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 433 { 434 assert(cpu_id < PLATFORM_CORE_COUNT); 435 return core_pm_cfg_info[cpu_id]; 436 } 437 438 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 439 { 440 assert(cpu_id < PLATFORM_CORE_COUNT); 441 core_pm_cfg_info[cpu_id] = value; 442 #if !USE_COHERENT_MEM 443 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 444 sizeof(uint32_t)); 445 #endif 446 } 447 448 static int cpus_power_domain_on(uint32_t cpu_id) 449 { 450 uint32_t cfg_info; 451 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 452 /* 453 * There are two ways to powering on or off on core. 454 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 455 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 456 * then, if the core enter into wfi, it power domain will be 457 * powered off automatically. 458 */ 459 460 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 461 462 if (cfg_info == core_pwr_pd) { 463 /* disable core_pm cfg */ 464 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 465 CORES_PM_DISABLE); 466 /* if the cores have be on, power off it firstly */ 467 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 468 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 469 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 470 } 471 472 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 473 } else { 474 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 475 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 476 return -EINVAL; 477 } 478 479 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 480 BIT(core_pm_sft_wakeup_en)); 481 dsb(); 482 } 483 484 return 0; 485 } 486 487 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 488 { 489 uint32_t cpu_pd; 490 uint32_t core_pm_value; 491 492 cpu_pd = PD_CPUL0 + cpu_id; 493 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 494 return 0; 495 496 if (pd_cfg == core_pwr_pd) { 497 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 498 return -EINVAL; 499 500 /* disable core_pm cfg */ 501 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 502 CORES_PM_DISABLE); 503 504 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 505 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 506 } else { 507 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 508 509 core_pm_value = BIT(core_pm_en); 510 if (pd_cfg == core_pwr_wfi_int) 511 core_pm_value |= BIT(core_pm_int_wakeup_en); 512 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 513 core_pm_value); 514 dsb(); 515 } 516 517 return 0; 518 } 519 520 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 521 { 522 uint32_t cpu_id = plat_my_core_pos(); 523 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 524 525 assert(cpu_id < PLATFORM_CORE_COUNT); 526 527 if (lvl_state == PLAT_MAX_OFF_STATE) { 528 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 529 pll_id = ALPLL_ID; 530 clst_st_msk = CLST_L_CPUS_MSK; 531 } else { 532 pll_id = ABPLL_ID; 533 clst_st_msk = CLST_B_CPUS_MSK << 534 PLATFORM_CLUSTER0_CORE_COUNT; 535 } 536 537 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 538 539 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 540 541 pmu_st &= clst_st_msk; 542 543 if (pmu_st == clst_st_chk_msk) { 544 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 545 PLL_SLOW_MODE); 546 547 clst_warmboot_data[pll_id] = PMU_CLST_RET; 548 549 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 550 pmu_st &= clst_st_msk; 551 if (pmu_st == clst_st_chk_msk) 552 return; 553 /* 554 * it is mean that others cpu is up again, 555 * we must resume the cfg at once. 556 */ 557 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 558 PLL_NOMAL_MODE); 559 clst_warmboot_data[pll_id] = 0; 560 } 561 } 562 } 563 564 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 565 { 566 uint32_t cpu_id = plat_my_core_pos(); 567 uint32_t pll_id, pll_st; 568 569 assert(cpu_id < PLATFORM_CORE_COUNT); 570 571 if (lvl_state == PLAT_MAX_OFF_STATE) { 572 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 573 pll_id = ALPLL_ID; 574 else 575 pll_id = ABPLL_ID; 576 577 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 578 PLL_MODE_SHIFT; 579 580 if (pll_st != NORMAL_MODE) { 581 WARN("%s: clst (%d) is in error mode (%d)\n", 582 __func__, pll_id, pll_st); 583 return -1; 584 } 585 } 586 587 return 0; 588 } 589 590 static void nonboot_cpus_off(void) 591 { 592 uint32_t boot_cpu, cpu; 593 594 boot_cpu = plat_my_core_pos(); 595 596 /* turn off noboot cpus */ 597 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 598 if (cpu == boot_cpu) 599 continue; 600 cpus_power_domain_off(cpu, core_pwr_pd); 601 } 602 } 603 604 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 605 { 606 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 607 608 assert(cpu_id < PLATFORM_CORE_COUNT); 609 assert(cpuson_flags[cpu_id] == 0); 610 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 611 cpuson_entry_point[cpu_id] = entrypoint; 612 dsb(); 613 614 cpus_power_domain_on(cpu_id); 615 616 return PSCI_E_SUCCESS; 617 } 618 619 int rockchip_soc_cores_pwr_dm_off(void) 620 { 621 uint32_t cpu_id = plat_my_core_pos(); 622 623 cpus_power_domain_off(cpu_id, core_pwr_wfi); 624 625 return PSCI_E_SUCCESS; 626 } 627 628 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 629 plat_local_state_t lvl_state) 630 { 631 switch (lvl) { 632 case MPIDR_AFFLVL1: 633 clst_pwr_domain_suspend(lvl_state); 634 break; 635 default: 636 break; 637 } 638 639 return PSCI_E_SUCCESS; 640 } 641 642 int rockchip_soc_cores_pwr_dm_suspend(void) 643 { 644 uint32_t cpu_id = plat_my_core_pos(); 645 646 assert(cpu_id < PLATFORM_CORE_COUNT); 647 assert(cpuson_flags[cpu_id] == 0); 648 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 649 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 650 dsb(); 651 652 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 653 654 return PSCI_E_SUCCESS; 655 } 656 657 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 658 { 659 switch (lvl) { 660 case MPIDR_AFFLVL1: 661 clst_pwr_domain_suspend(lvl_state); 662 break; 663 default: 664 break; 665 } 666 667 return PSCI_E_SUCCESS; 668 } 669 670 int rockchip_soc_cores_pwr_dm_on_finish(void) 671 { 672 uint32_t cpu_id = plat_my_core_pos(); 673 674 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 675 CORES_PM_DISABLE); 676 return PSCI_E_SUCCESS; 677 } 678 679 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 680 plat_local_state_t lvl_state) 681 { 682 switch (lvl) { 683 case MPIDR_AFFLVL1: 684 clst_pwr_domain_resume(lvl_state); 685 break; 686 default: 687 break; 688 } 689 690 return PSCI_E_SUCCESS; 691 } 692 693 int rockchip_soc_cores_pwr_dm_resume(void) 694 { 695 uint32_t cpu_id = plat_my_core_pos(); 696 697 /* Disable core_pm */ 698 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 699 700 return PSCI_E_SUCCESS; 701 } 702 703 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 704 { 705 switch (lvl) { 706 case MPIDR_AFFLVL1: 707 clst_pwr_domain_resume(lvl_state); 708 default: 709 break; 710 } 711 712 return PSCI_E_SUCCESS; 713 } 714 715 /** 716 * init_pmu_counts - Init timing counts in the PMU register area 717 * 718 * At various points when we power up or down parts of the system we need 719 * a delay to wait for power / clocks to become stable. The PMU has counters 720 * to help software do the delay properly. Basically, it works like this: 721 * - Software sets up counter values 722 * - When software turns on something in the PMU, the counter kicks off 723 * - The hardware sets a bit automatically when the counter has finished and 724 * software knows that the initialization is done. 725 * 726 * It's software's job to setup these counters. The hardware power on default 727 * for these settings is conservative, setting everything to 0x5dc0 728 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 729 * 730 * Note that some of these counters are only really used at suspend/resume 731 * time (for instance, that's the only time we turn off/on the oscillator) and 732 * others are used during normal runtime (like turning on/off a CPU or GPU) but 733 * it doesn't hurt to init everything at boot. 734 * 735 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 736 * clock. While the 24 MHz clock can give us more precision, it's not always 737 * available (like when we turn the oscillator off at sleep time). The 738 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 739 * is that counts work like this: 740 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 741 * use the 24M OSC for counts 742 * ELSE 743 * use the 32K OSC for counts 744 * 745 * Notes: 746 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 747 * we always keep that 0. This apparently choose between using the PLL as 748 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 749 * should consider how it affects these counts (if at all). 750 * - The power_mode_en is documented to auto-clear automatically when we leave 751 * "power mode". That's why most clocks are on 24M. Only timings used when 752 * in "power mode" are 32k. 753 * - In some cases the kernel may override these counts. 754 * 755 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 756 * in power mode, we need to ensure that they are available. 757 */ 758 static void init_pmu_counts(void) 759 { 760 /* COUNTS FOR INSIDE POWER MODE */ 761 762 /* 763 * From limited testing, need PMU stable >= 2ms, but go overkill 764 * and choose 30 ms to match testing on past SoCs. Also let 765 * OSC have 30 ms for stabilization. 766 */ 767 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 768 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 769 770 /* Unclear what these should be; try 3 ms */ 771 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 772 773 /* Unclear what this should be, but set the default explicitly */ 774 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 775 776 /* COUNTS FOR OUTSIDE POWER MODE */ 777 778 /* Put something sorta conservative here until we know better */ 779 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 780 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 781 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 782 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 783 784 /* 785 * Set CPU/GPU to 1 us. 786 * 787 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 788 * counts here. After all ATF controls all these other bits and also 789 * chooses which clock these counters use. 790 */ 791 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_US(1)); 792 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 793 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 794 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 795 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 796 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 797 } 798 799 static uint32_t clk_ddrc_save; 800 801 static void sys_slp_config(void) 802 { 803 uint32_t slp_mode_cfg = 0; 804 805 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 806 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 807 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 808 809 prepare_abpll_for_ddrctrl(); 810 sram_func_set_ddrctl_pll(ABPLL_ID); 811 812 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 813 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 814 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 815 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 816 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 817 818 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 819 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 820 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 821 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 822 823 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 824 BIT(PMU_POWER_OFF_REQ_CFG) | 825 BIT(PMU_CPU0_PD_EN) | 826 BIT(PMU_L2_FLUSH_EN) | 827 BIT(PMU_L2_IDLE_EN) | 828 BIT(PMU_SCU_PD_EN) | 829 BIT(PMU_CCI_PD_EN) | 830 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 831 BIT(PMU_ALIVE_USE_LF) | 832 BIT(PMU_SREF0_ENTER_EN) | 833 BIT(PMU_SREF1_ENTER_EN) | 834 BIT(PMU_DDRC0_GATING_EN) | 835 BIT(PMU_DDRC1_GATING_EN) | 836 BIT(PMU_DDRIO0_RET_EN) | 837 BIT(PMU_DDRIO1_RET_EN) | 838 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 839 BIT(PMU_CENTER_PD_EN) | 840 BIT(PMU_PLL_PD_EN) | 841 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 842 BIT(PMU_OSC_DIS) | 843 BIT(PMU_PMU_USE_LF); 844 845 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 846 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 847 848 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 849 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 850 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 851 } 852 853 static void set_hw_idle(uint32_t hw_idle) 854 { 855 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 856 } 857 858 static void clr_hw_idle(uint32_t hw_idle) 859 { 860 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 861 } 862 863 static uint32_t iomux_status[12]; 864 static uint32_t pull_mode_status[12]; 865 static uint32_t gpio_direction[3]; 866 static uint32_t gpio_2_4_clk_gate; 867 868 static void suspend_apio(void) 869 { 870 struct apio_info *suspend_apio; 871 int i; 872 873 suspend_apio = plat_get_rockchip_suspend_apio(); 874 875 if (!suspend_apio) 876 return; 877 878 /* save gpio2 ~ gpio4 iomux and pull mode */ 879 for (i = 0; i < 12; i++) { 880 iomux_status[i] = mmio_read_32(GRF_BASE + 881 GRF_GPIO2A_IOMUX + i * 4); 882 pull_mode_status[i] = mmio_read_32(GRF_BASE + 883 GRF_GPIO2A_P + i * 4); 884 } 885 886 /* store gpio2 ~ gpio4 clock gate state */ 887 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 888 PCLK_GPIO2_GATE_SHIFT) & 0x07; 889 890 /* enable gpio2 ~ gpio4 clock gate */ 891 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 892 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 893 894 /* save gpio2 ~ gpio4 direction */ 895 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 896 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 897 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 898 899 /* apio1 charge gpio3a0 ~ gpio3c7 */ 900 if (suspend_apio->apio1) { 901 902 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 903 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 904 REG_SOC_WMSK | GRF_IOMUX_GPIO); 905 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 906 REG_SOC_WMSK | GRF_IOMUX_GPIO); 907 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 908 REG_SOC_WMSK | GRF_IOMUX_GPIO); 909 910 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 911 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 912 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 913 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 914 915 /* set gpio3a0 ~ gpio3c7 to input */ 916 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 917 } 918 919 /* apio2 charge gpio2a0 ~ gpio2b4 */ 920 if (suspend_apio->apio2) { 921 922 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 923 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 924 REG_SOC_WMSK | GRF_IOMUX_GPIO); 925 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 926 REG_SOC_WMSK | GRF_IOMUX_GPIO); 927 928 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 929 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 930 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 931 932 /* set gpio2a0 ~ gpio2b4 to input */ 933 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 934 } 935 936 /* apio3 charge gpio2c0 ~ gpio2d4*/ 937 if (suspend_apio->apio3) { 938 939 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 940 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 941 REG_SOC_WMSK | GRF_IOMUX_GPIO); 942 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 943 REG_SOC_WMSK | GRF_IOMUX_GPIO); 944 945 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 946 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 947 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 948 949 /* set gpio2c0 ~ gpio2d4 to input */ 950 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 951 } 952 953 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 954 if (suspend_apio->apio4) { 955 956 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 957 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 958 REG_SOC_WMSK | GRF_IOMUX_GPIO); 959 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 960 REG_SOC_WMSK | GRF_IOMUX_GPIO); 961 962 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 963 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 964 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 965 966 /* set gpio4c0 ~ gpio4d6 to input */ 967 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 968 } 969 970 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 971 if (suspend_apio->apio5) { 972 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 973 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 974 REG_SOC_WMSK | GRF_IOMUX_GPIO); 975 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 976 REG_SOC_WMSK | GRF_IOMUX_GPIO); 977 978 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 979 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 980 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 981 982 /* set gpio4c0 ~ gpio4d6 to input */ 983 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 984 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 985 } 986 } 987 988 static void resume_apio(void) 989 { 990 struct apio_info *suspend_apio; 991 int i; 992 993 suspend_apio = plat_get_rockchip_suspend_apio(); 994 995 if (!suspend_apio) 996 return; 997 998 for (i = 0; i < 12; i++) { 999 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1000 REG_SOC_WMSK | pull_mode_status[i]); 1001 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1002 REG_SOC_WMSK | iomux_status[i]); 1003 } 1004 1005 /* set gpio2 ~ gpio4 direction back to store value */ 1006 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1007 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1008 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1009 1010 /* set gpio2 ~ gpio4 clock gate back to store value */ 1011 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1012 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1013 PCLK_GPIO2_GATE_SHIFT)); 1014 } 1015 1016 static void suspend_gpio(void) 1017 { 1018 struct gpio_info *suspend_gpio; 1019 uint32_t count; 1020 int i; 1021 1022 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1023 1024 for (i = 0; i < count; i++) { 1025 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1026 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1027 udelay(1); 1028 } 1029 } 1030 1031 static void resume_gpio(void) 1032 { 1033 struct gpio_info *suspend_gpio; 1034 uint32_t count; 1035 int i; 1036 1037 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1038 1039 for (i = count - 1; i >= 0; i--) { 1040 gpio_set_value(suspend_gpio[i].index, 1041 !suspend_gpio[i].polarity); 1042 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1043 udelay(1); 1044 } 1045 } 1046 1047 static void m0_configure_suspend(void) 1048 { 1049 /* set PARAM to M0_FUNC_SUSPEND */ 1050 mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND); 1051 } 1052 1053 int rockchip_soc_sys_pwr_dm_suspend(void) 1054 { 1055 uint32_t wait_cnt = 0; 1056 uint32_t status = 0; 1057 1058 ddr_prepare_for_sys_suspend(); 1059 dmc_save(); 1060 pmu_scu_b_pwrdn(); 1061 1062 pmu_power_domains_suspend(); 1063 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1064 BIT(PMU_CLR_ALIVE) | 1065 BIT(PMU_CLR_MSCH0) | 1066 BIT(PMU_CLR_MSCH1) | 1067 BIT(PMU_CLR_CCIM0) | 1068 BIT(PMU_CLR_CCIM1) | 1069 BIT(PMU_CLR_CENTER) | 1070 BIT(PMU_CLR_GIC)); 1071 1072 sys_slp_config(); 1073 1074 m0_configure_suspend(); 1075 m0_start(); 1076 1077 pmu_sgrf_rst_hld(); 1078 1079 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1080 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | 1081 CPU_BOOT_ADDR_WMASK); 1082 1083 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1084 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1085 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1086 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1087 dsb(); 1088 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1089 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1090 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1091 while ((mmio_read_32(PMU_BASE + 1092 PMU_ADB400_ST) & status) != status) { 1093 wait_cnt++; 1094 if (wait_cnt >= MAX_WAIT_COUNT) { 1095 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1096 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1097 panic(); 1098 } 1099 } 1100 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1101 1102 secure_watchdog_disable(); 1103 1104 /* 1105 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1106 * the last steps in suspend. 1107 */ 1108 disable_dvfs_plls(); 1109 disable_pwms(); 1110 disable_nodvfs_plls(); 1111 1112 suspend_apio(); 1113 suspend_gpio(); 1114 1115 return 0; 1116 } 1117 1118 int rockchip_soc_sys_pwr_dm_resume(void) 1119 { 1120 uint32_t wait_cnt = 0; 1121 uint32_t status = 0; 1122 1123 resume_apio(); 1124 resume_gpio(); 1125 enable_nodvfs_plls(); 1126 enable_pwms(); 1127 /* PWM regulators take time to come up; give 300us to be safe. */ 1128 udelay(300); 1129 enable_dvfs_plls(); 1130 1131 secure_watchdog_enable(); 1132 1133 /* restore clk_ddrc_bpll_src_en gate */ 1134 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1135 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1136 1137 /* 1138 * The wakeup status is not cleared by itself, we need to clear it 1139 * manually. Otherwise we will alway query some interrupt next time. 1140 * 1141 * NOTE: If the kernel needs to query this, we might want to stash it 1142 * somewhere. 1143 */ 1144 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1145 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1146 1147 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1148 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1149 CPU_BOOT_ADDR_WMASK); 1150 1151 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1152 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1153 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1154 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1155 dsb(); 1156 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1157 BIT(PMU_SCU_B_PWRDWN_EN)); 1158 1159 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1160 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1161 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1162 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1163 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1164 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1165 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1166 1167 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1168 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1169 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1170 1171 while ((mmio_read_32(PMU_BASE + 1172 PMU_ADB400_ST) & status)) { 1173 wait_cnt++; 1174 if (wait_cnt >= MAX_WAIT_COUNT) { 1175 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1176 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1177 panic(); 1178 } 1179 } 1180 1181 pmu_sgrf_rst_hld_release(); 1182 pmu_scu_b_pwrup(); 1183 pmu_power_domains_resume(); 1184 1185 restore_dpll(); 1186 sram_func_set_ddrctl_pll(DPLL_ID); 1187 restore_abpll(); 1188 1189 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1190 BIT(PMU_CLR_ALIVE) | 1191 BIT(PMU_CLR_MSCH0) | 1192 BIT(PMU_CLR_MSCH1) | 1193 BIT(PMU_CLR_CCIM0) | 1194 BIT(PMU_CLR_CCIM1) | 1195 BIT(PMU_CLR_CENTER) | 1196 BIT(PMU_CLR_GIC)); 1197 1198 plat_rockchip_gic_cpuif_enable(); 1199 m0_stop(); 1200 1201 ddr_prepare_for_sys_resume(); 1202 1203 return 0; 1204 } 1205 1206 void __dead2 rockchip_soc_soft_reset(void) 1207 { 1208 struct gpio_info *rst_gpio; 1209 1210 rst_gpio = plat_get_rockchip_gpio_reset(); 1211 1212 if (rst_gpio) { 1213 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1214 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1215 } else { 1216 soc_global_soft_reset(); 1217 } 1218 1219 while (1) 1220 ; 1221 } 1222 1223 void __dead2 rockchip_soc_system_off(void) 1224 { 1225 struct gpio_info *poweroff_gpio; 1226 1227 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1228 1229 if (poweroff_gpio) { 1230 /* 1231 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1232 * need to set this pin iomux back to gpio function 1233 */ 1234 if (poweroff_gpio->index == TSADC_INT_PIN) { 1235 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1236 GPIO1A6_IOMUX); 1237 } 1238 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1239 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1240 } else { 1241 WARN("Do nothing when system off\n"); 1242 } 1243 1244 while (1) 1245 ; 1246 } 1247 1248 void plat_rockchip_pmu_init(void) 1249 { 1250 uint32_t cpu; 1251 1252 rockchip_pd_lock_init(); 1253 1254 /* register requires 32bits mode, switch it to 32 bits */ 1255 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1256 1257 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1258 cpuson_flags[cpu] = 0; 1259 1260 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1261 clst_warmboot_data[cpu] = 0; 1262 1263 psram_sleep_cfg->ddr_func = (uint64_t)dmc_restore; 1264 psram_sleep_cfg->ddr_data = (uint64_t)&sdram_config; 1265 psram_sleep_cfg->ddr_flag = 0x01; 1266 1267 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; 1268 1269 /* config cpu's warm boot address */ 1270 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1271 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1272 CPU_BOOT_ADDR_WMASK); 1273 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1274 1275 /* 1276 * Enable Schmitt trigger for better 32 kHz input signal, which is 1277 * important for suspend/resume reliability among other things. 1278 */ 1279 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1280 1281 init_pmu_counts(); 1282 1283 nonboot_cpus_off(); 1284 1285 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1286 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1287 } 1288