1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <bl31.h> 11 #include <debug.h> 12 #include <delay_timer.h> 13 #include <dfs.h> 14 #include <errno.h> 15 #include <gpio.h> 16 #include <m0_ctl.h> 17 #include <mmio.h> 18 #include <plat_params.h> 19 #include <plat_private.h> 20 #include <platform.h> 21 #include <platform_def.h> 22 #include <pmu.h> 23 #include <pmu_com.h> 24 #include <pwm.h> 25 #include <rk3399_def.h> 26 #include <secure.h> 27 #include <soc.h> 28 #include <string.h> 29 #include <suspend.h> 30 31 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 32 33 static uint32_t cpu_warm_boot_addr; 34 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 35 36 /* 37 * There are two ways to powering on or off on core. 38 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 39 * it is core_pwr_pd mode 40 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 41 * then, if the core enter into wfi, it power domain will be 42 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 43 * so we need core_pm_cfg_info to distinguish which method be used now. 44 */ 45 46 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 47 #if USE_COHERENT_MEM 48 __attribute__ ((section("tzfw_coherent_mem"))) 49 #endif 50 ;/* coheront */ 51 52 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 53 { 54 uint32_t bus_id = BIT(bus); 55 uint32_t bus_req; 56 uint32_t wait_cnt = 0; 57 uint32_t bus_state, bus_ack; 58 59 if (state) 60 bus_req = BIT(bus); 61 else 62 bus_req = 0; 63 64 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 65 66 do { 67 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 68 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 69 wait_cnt++; 70 } while ((bus_state != bus_req || bus_ack != bus_req) && 71 (wait_cnt < MAX_WAIT_COUNT)); 72 73 if (bus_state != bus_req || bus_ack != bus_req) { 74 INFO("%s:st=%x(%x)\n", __func__, 75 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 76 bus_state); 77 INFO("%s:st=%x(%x)\n", __func__, 78 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 79 bus_ack); 80 } 81 } 82 83 struct pmu_slpdata_s pmu_slpdata; 84 85 static void qos_save(void) 86 { 87 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 88 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 89 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 90 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 91 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 92 } 93 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 94 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 95 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 96 } 97 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 98 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 99 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 100 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 101 } 102 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 103 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 104 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 105 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 106 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 107 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 108 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 109 } 110 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 111 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 112 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 113 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 114 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 115 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 116 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 117 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 118 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 119 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 120 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 121 } 122 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 123 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 124 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 125 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 126 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 127 } 128 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 129 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 130 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 131 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 132 } 133 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 134 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 135 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 136 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 137 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 138 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 139 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 140 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 141 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 142 } 143 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 144 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 145 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 146 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 147 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 148 } 149 } 150 151 static void qos_restore(void) 152 { 153 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 154 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 155 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 156 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 157 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 158 } 159 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 160 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 161 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 162 } 163 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 164 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 165 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 166 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 167 } 168 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 169 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 170 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 171 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 172 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 173 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 174 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 175 } 176 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 177 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 178 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 179 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 180 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 181 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 182 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 183 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 184 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 185 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 186 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 187 } 188 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 189 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 190 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 191 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 192 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 193 } 194 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 195 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 196 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 197 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 198 } 199 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 200 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 201 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 202 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 203 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 204 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 205 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 206 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 207 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 208 } 209 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 210 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 211 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 212 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 213 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 214 } 215 } 216 217 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 218 { 219 uint32_t state; 220 221 if (pmu_power_domain_st(pd_id) == pd_state) 222 goto out; 223 224 if (pd_state == pmu_pd_on) 225 pmu_power_domain_ctr(pd_id, pd_state); 226 227 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 228 229 switch (pd_id) { 230 case PD_GPU: 231 pmu_bus_idle_req(BUS_ID_GPU, state); 232 break; 233 case PD_VIO: 234 pmu_bus_idle_req(BUS_ID_VIO, state); 235 break; 236 case PD_ISP0: 237 pmu_bus_idle_req(BUS_ID_ISP0, state); 238 break; 239 case PD_ISP1: 240 pmu_bus_idle_req(BUS_ID_ISP1, state); 241 break; 242 case PD_VO: 243 pmu_bus_idle_req(BUS_ID_VOPB, state); 244 pmu_bus_idle_req(BUS_ID_VOPL, state); 245 break; 246 case PD_HDCP: 247 pmu_bus_idle_req(BUS_ID_HDCP, state); 248 break; 249 case PD_TCPD0: 250 break; 251 case PD_TCPD1: 252 break; 253 case PD_GMAC: 254 pmu_bus_idle_req(BUS_ID_GMAC, state); 255 break; 256 case PD_CCI: 257 pmu_bus_idle_req(BUS_ID_CCIM0, state); 258 pmu_bus_idle_req(BUS_ID_CCIM1, state); 259 break; 260 case PD_SD: 261 pmu_bus_idle_req(BUS_ID_SD, state); 262 break; 263 case PD_EMMC: 264 pmu_bus_idle_req(BUS_ID_EMMC, state); 265 break; 266 case PD_EDP: 267 pmu_bus_idle_req(BUS_ID_EDP, state); 268 break; 269 case PD_SDIOAUDIO: 270 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 271 break; 272 case PD_GIC: 273 pmu_bus_idle_req(BUS_ID_GIC, state); 274 break; 275 case PD_RGA: 276 pmu_bus_idle_req(BUS_ID_RGA, state); 277 break; 278 case PD_VCODEC: 279 pmu_bus_idle_req(BUS_ID_VCODEC, state); 280 break; 281 case PD_VDU: 282 pmu_bus_idle_req(BUS_ID_VDU, state); 283 break; 284 case PD_IEP: 285 pmu_bus_idle_req(BUS_ID_IEP, state); 286 break; 287 case PD_USB3: 288 pmu_bus_idle_req(BUS_ID_USB3, state); 289 break; 290 case PD_PERIHP: 291 pmu_bus_idle_req(BUS_ID_PERIHP, state); 292 break; 293 default: 294 break; 295 } 296 297 if (pd_state == pmu_pd_off) 298 pmu_power_domain_ctr(pd_id, pd_state); 299 300 out: 301 return 0; 302 } 303 304 static uint32_t pmu_powerdomain_state; 305 306 static void pmu_power_domains_suspend(void) 307 { 308 clk_gate_con_save(); 309 clk_gate_con_disable(); 310 qos_save(); 311 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 312 pmu_set_power_domain(PD_GPU, pmu_pd_off); 313 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 314 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 315 pmu_set_power_domain(PD_VO, pmu_pd_off); 316 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 317 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 318 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 319 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 320 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 321 pmu_set_power_domain(PD_EDP, pmu_pd_off); 322 pmu_set_power_domain(PD_IEP, pmu_pd_off); 323 pmu_set_power_domain(PD_RGA, pmu_pd_off); 324 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 325 pmu_set_power_domain(PD_VDU, pmu_pd_off); 326 clk_gate_con_restore(); 327 } 328 329 static void pmu_power_domains_resume(void) 330 { 331 clk_gate_con_save(); 332 clk_gate_con_disable(); 333 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 334 pmu_set_power_domain(PD_VDU, pmu_pd_on); 335 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 336 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 337 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 338 pmu_set_power_domain(PD_RGA, pmu_pd_on); 339 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 340 pmu_set_power_domain(PD_IEP, pmu_pd_on); 341 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 342 pmu_set_power_domain(PD_EDP, pmu_pd_on); 343 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 344 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 345 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 346 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 347 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 348 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 349 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 350 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 351 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 352 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 353 if (!(pmu_powerdomain_state & BIT(PD_VO))) 354 pmu_set_power_domain(PD_VO, pmu_pd_on); 355 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 356 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 357 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 358 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 360 pmu_set_power_domain(PD_GPU, pmu_pd_on); 361 qos_restore(); 362 clk_gate_con_restore(); 363 } 364 365 void rk3399_flush_l2_b(void) 366 { 367 uint32_t wait_cnt = 0; 368 369 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 370 dsb(); 371 372 /* 373 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 374 * the enough margin. 375 */ 376 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 377 BIT(L2_FLUSHDONE_CLUSTER_B))) { 378 wait_cnt++; 379 udelay(10); 380 if (wait_cnt == 10000 / 10) 381 WARN("L2 cache flush on suspend took longer than 10ms\n"); 382 } 383 384 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 385 } 386 387 static void pmu_scu_b_pwrdn(void) 388 { 389 uint32_t wait_cnt = 0; 390 391 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 392 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 393 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 394 ERROR("%s: not all cpus is off\n", __func__); 395 return; 396 } 397 398 rk3399_flush_l2_b(); 399 400 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 401 402 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 403 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 404 wait_cnt++; 405 if (wait_cnt >= MAX_WAIT_COUNT) 406 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 407 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 408 } 409 } 410 411 static void pmu_scu_b_pwrup(void) 412 { 413 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 414 } 415 416 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 417 { 418 assert(cpu_id < PLATFORM_CORE_COUNT); 419 return core_pm_cfg_info[cpu_id]; 420 } 421 422 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 423 { 424 assert(cpu_id < PLATFORM_CORE_COUNT); 425 core_pm_cfg_info[cpu_id] = value; 426 #if !USE_COHERENT_MEM 427 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 428 sizeof(uint32_t)); 429 #endif 430 } 431 432 static int cpus_power_domain_on(uint32_t cpu_id) 433 { 434 uint32_t cfg_info; 435 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 436 /* 437 * There are two ways to powering on or off on core. 438 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 439 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 440 * then, if the core enter into wfi, it power domain will be 441 * powered off automatically. 442 */ 443 444 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 445 446 if (cfg_info == core_pwr_pd) { 447 /* disable core_pm cfg */ 448 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 449 CORES_PM_DISABLE); 450 /* if the cores have be on, power off it firstly */ 451 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 452 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 453 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 454 } 455 456 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 457 } else { 458 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 459 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 460 return -EINVAL; 461 } 462 463 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 464 BIT(core_pm_sft_wakeup_en)); 465 dsb(); 466 } 467 468 return 0; 469 } 470 471 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 472 { 473 uint32_t cpu_pd; 474 uint32_t core_pm_value; 475 476 cpu_pd = PD_CPUL0 + cpu_id; 477 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 478 return 0; 479 480 if (pd_cfg == core_pwr_pd) { 481 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 482 return -EINVAL; 483 484 /* disable core_pm cfg */ 485 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 486 CORES_PM_DISABLE); 487 488 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 489 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 490 } else { 491 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 492 493 core_pm_value = BIT(core_pm_en); 494 if (pd_cfg == core_pwr_wfi_int) 495 core_pm_value |= BIT(core_pm_int_wakeup_en); 496 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 497 core_pm_value); 498 dsb(); 499 } 500 501 return 0; 502 } 503 504 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 505 { 506 uint32_t cpu_id = plat_my_core_pos(); 507 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 508 509 assert(cpu_id < PLATFORM_CORE_COUNT); 510 511 if (lvl_state == PLAT_MAX_OFF_STATE) { 512 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 513 pll_id = ALPLL_ID; 514 clst_st_msk = CLST_L_CPUS_MSK; 515 } else { 516 pll_id = ABPLL_ID; 517 clst_st_msk = CLST_B_CPUS_MSK << 518 PLATFORM_CLUSTER0_CORE_COUNT; 519 } 520 521 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 522 523 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 524 525 pmu_st &= clst_st_msk; 526 527 if (pmu_st == clst_st_chk_msk) { 528 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 529 PLL_SLOW_MODE); 530 531 clst_warmboot_data[pll_id] = PMU_CLST_RET; 532 533 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 534 pmu_st &= clst_st_msk; 535 if (pmu_st == clst_st_chk_msk) 536 return; 537 /* 538 * it is mean that others cpu is up again, 539 * we must resume the cfg at once. 540 */ 541 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 542 PLL_NOMAL_MODE); 543 clst_warmboot_data[pll_id] = 0; 544 } 545 } 546 } 547 548 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 549 { 550 uint32_t cpu_id = plat_my_core_pos(); 551 uint32_t pll_id, pll_st; 552 553 assert(cpu_id < PLATFORM_CORE_COUNT); 554 555 if (lvl_state == PLAT_MAX_OFF_STATE) { 556 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 557 pll_id = ALPLL_ID; 558 else 559 pll_id = ABPLL_ID; 560 561 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 562 PLL_MODE_SHIFT; 563 564 if (pll_st != NORMAL_MODE) { 565 WARN("%s: clst (%d) is in error mode (%d)\n", 566 __func__, pll_id, pll_st); 567 return -1; 568 } 569 } 570 571 return 0; 572 } 573 574 static void nonboot_cpus_off(void) 575 { 576 uint32_t boot_cpu, cpu; 577 578 boot_cpu = plat_my_core_pos(); 579 580 /* turn off noboot cpus */ 581 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 582 if (cpu == boot_cpu) 583 continue; 584 cpus_power_domain_off(cpu, core_pwr_pd); 585 } 586 } 587 588 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 589 { 590 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 591 592 assert(cpu_id < PLATFORM_CORE_COUNT); 593 assert(cpuson_flags[cpu_id] == 0); 594 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 595 cpuson_entry_point[cpu_id] = entrypoint; 596 dsb(); 597 598 cpus_power_domain_on(cpu_id); 599 600 return PSCI_E_SUCCESS; 601 } 602 603 int rockchip_soc_cores_pwr_dm_off(void) 604 { 605 uint32_t cpu_id = plat_my_core_pos(); 606 607 cpus_power_domain_off(cpu_id, core_pwr_wfi); 608 609 return PSCI_E_SUCCESS; 610 } 611 612 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 613 plat_local_state_t lvl_state) 614 { 615 switch (lvl) { 616 case MPIDR_AFFLVL1: 617 clst_pwr_domain_suspend(lvl_state); 618 break; 619 default: 620 break; 621 } 622 623 return PSCI_E_SUCCESS; 624 } 625 626 int rockchip_soc_cores_pwr_dm_suspend(void) 627 { 628 uint32_t cpu_id = plat_my_core_pos(); 629 630 assert(cpu_id < PLATFORM_CORE_COUNT); 631 assert(cpuson_flags[cpu_id] == 0); 632 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 633 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 634 dsb(); 635 636 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 637 638 return PSCI_E_SUCCESS; 639 } 640 641 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 642 { 643 switch (lvl) { 644 case MPIDR_AFFLVL1: 645 clst_pwr_domain_suspend(lvl_state); 646 break; 647 default: 648 break; 649 } 650 651 return PSCI_E_SUCCESS; 652 } 653 654 int rockchip_soc_cores_pwr_dm_on_finish(void) 655 { 656 uint32_t cpu_id = plat_my_core_pos(); 657 658 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 659 CORES_PM_DISABLE); 660 return PSCI_E_SUCCESS; 661 } 662 663 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 664 plat_local_state_t lvl_state) 665 { 666 switch (lvl) { 667 case MPIDR_AFFLVL1: 668 clst_pwr_domain_resume(lvl_state); 669 break; 670 default: 671 break; 672 } 673 674 return PSCI_E_SUCCESS; 675 } 676 677 int rockchip_soc_cores_pwr_dm_resume(void) 678 { 679 uint32_t cpu_id = plat_my_core_pos(); 680 681 /* Disable core_pm */ 682 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 683 684 return PSCI_E_SUCCESS; 685 } 686 687 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 688 { 689 switch (lvl) { 690 case MPIDR_AFFLVL1: 691 clst_pwr_domain_resume(lvl_state); 692 default: 693 break; 694 } 695 696 return PSCI_E_SUCCESS; 697 } 698 699 /** 700 * init_pmu_counts - Init timing counts in the PMU register area 701 * 702 * At various points when we power up or down parts of the system we need 703 * a delay to wait for power / clocks to become stable. The PMU has counters 704 * to help software do the delay properly. Basically, it works like this: 705 * - Software sets up counter values 706 * - When software turns on something in the PMU, the counter kicks off 707 * - The hardware sets a bit automatically when the counter has finished and 708 * software knows that the initialization is done. 709 * 710 * It's software's job to setup these counters. The hardware power on default 711 * for these settings is conservative, setting everything to 0x5dc0 712 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 713 * 714 * Note that some of these counters are only really used at suspend/resume 715 * time (for instance, that's the only time we turn off/on the oscillator) and 716 * others are used during normal runtime (like turning on/off a CPU or GPU) but 717 * it doesn't hurt to init everything at boot. 718 * 719 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 720 * clock. While the 24 MHz clock can give us more precision, it's not always 721 * available (like when we turn the oscillator off at sleep time). The 722 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 723 * is that counts work like this: 724 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 725 * use the 24M OSC for counts 726 * ELSE 727 * use the 32K OSC for counts 728 * 729 * Notes: 730 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 731 * we always keep that 0. This apparently choose between using the PLL as 732 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 733 * should consider how it affects these counts (if at all). 734 * - The power_mode_en is documented to auto-clear automatically when we leave 735 * "power mode". That's why most clocks are on 24M. Only timings used when 736 * in "power mode" are 32k. 737 * - In some cases the kernel may override these counts. 738 * 739 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 740 * in power mode, we need to ensure that they are available. 741 */ 742 static void init_pmu_counts(void) 743 { 744 /* COUNTS FOR INSIDE POWER MODE */ 745 746 /* 747 * From limited testing, need PMU stable >= 2ms, but go overkill 748 * and choose 30 ms to match testing on past SoCs. Also let 749 * OSC have 30 ms for stabilization. 750 */ 751 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 752 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 753 754 /* Unclear what these should be; try 3 ms */ 755 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 756 757 /* Unclear what this should be, but set the default explicitly */ 758 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 759 760 /* COUNTS FOR OUTSIDE POWER MODE */ 761 762 /* Put something sorta conservative here until we know better */ 763 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 764 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 765 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 766 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 767 768 /* 769 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 770 * M0 code run in SRAM, and we need it to check whether cpu enter 771 * FSM status, so we must wait M0 finish their code and enter WFI, 772 * then we can shutdown SRAM, according FSM order: 773 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 774 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 775 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 776 */ 777 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 778 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 779 780 /* 781 * Set CPU/GPU to 1 us. 782 * 783 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 784 * counts here. After all ATF controls all these other bits and also 785 * chooses which clock these counters use. 786 */ 787 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 788 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 789 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 790 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 791 } 792 793 static uint32_t clk_ddrc_save; 794 795 static void sys_slp_config(void) 796 { 797 uint32_t slp_mode_cfg = 0; 798 799 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 800 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 801 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 802 803 prepare_abpll_for_ddrctrl(); 804 sram_func_set_ddrctl_pll(ABPLL_ID); 805 806 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 807 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 808 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 809 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 810 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 811 812 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 813 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 814 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 815 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 816 817 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 818 BIT(PMU_POWER_OFF_REQ_CFG) | 819 BIT(PMU_CPU0_PD_EN) | 820 BIT(PMU_L2_FLUSH_EN) | 821 BIT(PMU_L2_IDLE_EN) | 822 BIT(PMU_SCU_PD_EN) | 823 BIT(PMU_CCI_PD_EN) | 824 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 825 BIT(PMU_ALIVE_USE_LF) | 826 BIT(PMU_SREF0_ENTER_EN) | 827 BIT(PMU_SREF1_ENTER_EN) | 828 BIT(PMU_DDRC0_GATING_EN) | 829 BIT(PMU_DDRC1_GATING_EN) | 830 BIT(PMU_DDRIO0_RET_EN) | 831 BIT(PMU_DDRIO1_RET_EN) | 832 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 833 BIT(PMU_CENTER_PD_EN) | 834 BIT(PMU_PERILP_PD_EN) | 835 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 836 BIT(PMU_PLL_PD_EN) | 837 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 838 BIT(PMU_OSC_DIS) | 839 BIT(PMU_PMU_USE_LF); 840 841 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 842 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 843 844 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 845 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 846 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 847 } 848 849 static void set_hw_idle(uint32_t hw_idle) 850 { 851 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 852 } 853 854 static void clr_hw_idle(uint32_t hw_idle) 855 { 856 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 857 } 858 859 static uint32_t iomux_status[12]; 860 static uint32_t pull_mode_status[12]; 861 static uint32_t gpio_direction[3]; 862 static uint32_t gpio_2_4_clk_gate; 863 864 static void suspend_apio(void) 865 { 866 struct apio_info *suspend_apio; 867 int i; 868 869 suspend_apio = plat_get_rockchip_suspend_apio(); 870 871 if (!suspend_apio) 872 return; 873 874 /* save gpio2 ~ gpio4 iomux and pull mode */ 875 for (i = 0; i < 12; i++) { 876 iomux_status[i] = mmio_read_32(GRF_BASE + 877 GRF_GPIO2A_IOMUX + i * 4); 878 pull_mode_status[i] = mmio_read_32(GRF_BASE + 879 GRF_GPIO2A_P + i * 4); 880 } 881 882 /* store gpio2 ~ gpio4 clock gate state */ 883 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 884 PCLK_GPIO2_GATE_SHIFT) & 0x07; 885 886 /* enable gpio2 ~ gpio4 clock gate */ 887 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 888 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 889 890 /* save gpio2 ~ gpio4 direction */ 891 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 892 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 893 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 894 895 /* apio1 charge gpio3a0 ~ gpio3c7 */ 896 if (suspend_apio->apio1) { 897 898 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 899 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 900 REG_SOC_WMSK | GRF_IOMUX_GPIO); 901 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 902 REG_SOC_WMSK | GRF_IOMUX_GPIO); 903 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 904 REG_SOC_WMSK | GRF_IOMUX_GPIO); 905 906 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 907 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 908 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 909 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 910 911 /* set gpio3a0 ~ gpio3c7 to input */ 912 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 913 } 914 915 /* apio2 charge gpio2a0 ~ gpio2b4 */ 916 if (suspend_apio->apio2) { 917 918 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 919 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 920 REG_SOC_WMSK | GRF_IOMUX_GPIO); 921 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 922 REG_SOC_WMSK | GRF_IOMUX_GPIO); 923 924 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 925 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 926 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 927 928 /* set gpio2a0 ~ gpio2b4 to input */ 929 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 930 } 931 932 /* apio3 charge gpio2c0 ~ gpio2d4*/ 933 if (suspend_apio->apio3) { 934 935 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 936 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 937 REG_SOC_WMSK | GRF_IOMUX_GPIO); 938 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 939 REG_SOC_WMSK | GRF_IOMUX_GPIO); 940 941 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 942 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 943 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 944 945 /* set gpio2c0 ~ gpio2d4 to input */ 946 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 947 } 948 949 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 950 if (suspend_apio->apio4) { 951 952 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 953 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 954 REG_SOC_WMSK | GRF_IOMUX_GPIO); 955 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 956 REG_SOC_WMSK | GRF_IOMUX_GPIO); 957 958 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 959 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 960 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 961 962 /* set gpio4c0 ~ gpio4d6 to input */ 963 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 964 } 965 966 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 967 if (suspend_apio->apio5) { 968 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 969 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 970 REG_SOC_WMSK | GRF_IOMUX_GPIO); 971 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 972 REG_SOC_WMSK | GRF_IOMUX_GPIO); 973 974 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 975 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 976 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 977 978 /* set gpio4c0 ~ gpio4d6 to input */ 979 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 980 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 981 } 982 } 983 984 static void resume_apio(void) 985 { 986 struct apio_info *suspend_apio; 987 int i; 988 989 suspend_apio = plat_get_rockchip_suspend_apio(); 990 991 if (!suspend_apio) 992 return; 993 994 for (i = 0; i < 12; i++) { 995 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 996 REG_SOC_WMSK | pull_mode_status[i]); 997 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 998 REG_SOC_WMSK | iomux_status[i]); 999 } 1000 1001 /* set gpio2 ~ gpio4 direction back to store value */ 1002 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1003 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1004 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1005 1006 /* set gpio2 ~ gpio4 clock gate back to store value */ 1007 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1008 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1009 PCLK_GPIO2_GATE_SHIFT)); 1010 } 1011 1012 static void suspend_gpio(void) 1013 { 1014 struct gpio_info *suspend_gpio; 1015 uint32_t count; 1016 int i; 1017 1018 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1019 1020 for (i = 0; i < count; i++) { 1021 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1022 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1023 udelay(1); 1024 } 1025 } 1026 1027 static void resume_gpio(void) 1028 { 1029 struct gpio_info *suspend_gpio; 1030 uint32_t count; 1031 int i; 1032 1033 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1034 1035 for (i = count - 1; i >= 0; i--) { 1036 gpio_set_value(suspend_gpio[i].index, 1037 !suspend_gpio[i].polarity); 1038 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1039 udelay(1); 1040 } 1041 } 1042 1043 static void m0_configure_suspend(void) 1044 { 1045 /* set PARAM to M0_FUNC_SUSPEND */ 1046 mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND); 1047 } 1048 1049 void sram_save(void) 1050 { 1051 size_t text_size = (char *)&__bl31_sram_text_real_end - 1052 (char *)&__bl31_sram_text_start; 1053 size_t data_size = (char *)&__bl31_sram_data_real_end - 1054 (char *)&__bl31_sram_data_start; 1055 size_t incbin_size = (char *)&__sram_incbin_real_end - 1056 (char *)&__sram_incbin_start; 1057 1058 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1059 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1060 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1061 incbin_size); 1062 } 1063 1064 void sram_restore(void) 1065 { 1066 size_t text_size = (char *)&__bl31_sram_text_real_end - 1067 (char *)&__bl31_sram_text_start; 1068 size_t data_size = (char *)&__bl31_sram_data_real_end - 1069 (char *)&__bl31_sram_data_start; 1070 size_t incbin_size = (char *)&__sram_incbin_real_end - 1071 (char *)&__sram_incbin_start; 1072 1073 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1074 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1075 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1076 incbin_size); 1077 } 1078 1079 int rockchip_soc_sys_pwr_dm_suspend(void) 1080 { 1081 uint32_t wait_cnt = 0; 1082 uint32_t status = 0; 1083 1084 ddr_prepare_for_sys_suspend(); 1085 dmc_save(); 1086 pmu_scu_b_pwrdn(); 1087 1088 pmu_power_domains_suspend(); 1089 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1090 BIT(PMU_CLR_ALIVE) | 1091 BIT(PMU_CLR_MSCH0) | 1092 BIT(PMU_CLR_MSCH1) | 1093 BIT(PMU_CLR_CCIM0) | 1094 BIT(PMU_CLR_CCIM1) | 1095 BIT(PMU_CLR_CENTER) | 1096 BIT(PMU_CLR_PERILP) | 1097 BIT(PMU_CLR_PERILPM0) | 1098 BIT(PMU_CLR_GIC)); 1099 1100 sys_slp_config(); 1101 1102 m0_configure_suspend(); 1103 m0_start(); 1104 1105 pmu_sgrf_rst_hld(); 1106 1107 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1108 ((uintptr_t)&pmu_cpuson_entrypoint >> 1109 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1110 1111 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1112 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1113 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1114 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1115 dsb(); 1116 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1117 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1118 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1119 while ((mmio_read_32(PMU_BASE + 1120 PMU_ADB400_ST) & status) != status) { 1121 wait_cnt++; 1122 if (wait_cnt >= MAX_WAIT_COUNT) { 1123 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1124 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1125 panic(); 1126 } 1127 } 1128 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1129 1130 secure_watchdog_disable(); 1131 1132 /* 1133 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1134 * the last steps in suspend. 1135 */ 1136 disable_dvfs_plls(); 1137 disable_pwms(); 1138 disable_nodvfs_plls(); 1139 1140 suspend_apio(); 1141 suspend_gpio(); 1142 1143 sram_save(); 1144 return 0; 1145 } 1146 1147 int rockchip_soc_sys_pwr_dm_resume(void) 1148 { 1149 uint32_t wait_cnt = 0; 1150 uint32_t status = 0; 1151 1152 resume_apio(); 1153 resume_gpio(); 1154 enable_nodvfs_plls(); 1155 enable_pwms(); 1156 /* PWM regulators take time to come up; give 300us to be safe. */ 1157 udelay(300); 1158 enable_dvfs_plls(); 1159 1160 secure_watchdog_enable(); 1161 1162 /* restore clk_ddrc_bpll_src_en gate */ 1163 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1164 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1165 1166 /* 1167 * The wakeup status is not cleared by itself, we need to clear it 1168 * manually. Otherwise we will alway query some interrupt next time. 1169 * 1170 * NOTE: If the kernel needs to query this, we might want to stash it 1171 * somewhere. 1172 */ 1173 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1174 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1175 1176 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1177 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1178 CPU_BOOT_ADDR_WMASK); 1179 1180 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1181 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1182 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1183 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1184 dsb(); 1185 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1186 BIT(PMU_SCU_B_PWRDWN_EN)); 1187 1188 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1189 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1190 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1191 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1192 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1193 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1194 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1195 1196 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1197 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1198 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1199 1200 while ((mmio_read_32(PMU_BASE + 1201 PMU_ADB400_ST) & status)) { 1202 wait_cnt++; 1203 if (wait_cnt >= MAX_WAIT_COUNT) { 1204 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1205 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1206 panic(); 1207 } 1208 } 1209 1210 pmu_sgrf_rst_hld_release(); 1211 pmu_scu_b_pwrup(); 1212 pmu_power_domains_resume(); 1213 1214 restore_dpll(); 1215 sram_func_set_ddrctl_pll(DPLL_ID); 1216 restore_abpll(); 1217 1218 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1219 BIT(PMU_CLR_ALIVE) | 1220 BIT(PMU_CLR_MSCH0) | 1221 BIT(PMU_CLR_MSCH1) | 1222 BIT(PMU_CLR_CCIM0) | 1223 BIT(PMU_CLR_CCIM1) | 1224 BIT(PMU_CLR_CENTER) | 1225 BIT(PMU_CLR_PERILP) | 1226 BIT(PMU_CLR_PERILPM0) | 1227 BIT(PMU_CLR_GIC)); 1228 1229 plat_rockchip_gic_cpuif_enable(); 1230 m0_stop(); 1231 1232 ddr_prepare_for_sys_resume(); 1233 1234 return 0; 1235 } 1236 1237 void __dead2 rockchip_soc_soft_reset(void) 1238 { 1239 struct gpio_info *rst_gpio; 1240 1241 rst_gpio = plat_get_rockchip_gpio_reset(); 1242 1243 if (rst_gpio) { 1244 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1245 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1246 } else { 1247 soc_global_soft_reset(); 1248 } 1249 1250 while (1) 1251 ; 1252 } 1253 1254 void __dead2 rockchip_soc_system_off(void) 1255 { 1256 struct gpio_info *poweroff_gpio; 1257 1258 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1259 1260 if (poweroff_gpio) { 1261 /* 1262 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1263 * need to set this pin iomux back to gpio function 1264 */ 1265 if (poweroff_gpio->index == TSADC_INT_PIN) { 1266 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1267 GPIO1A6_IOMUX); 1268 } 1269 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1270 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1271 } else { 1272 WARN("Do nothing when system off\n"); 1273 } 1274 1275 while (1) 1276 ; 1277 } 1278 1279 void rockchip_plat_mmu_el3(void) 1280 { 1281 size_t sram_size; 1282 1283 /* sram.text size */ 1284 sram_size = (char *)&__bl31_sram_text_end - 1285 (char *)&__bl31_sram_text_start; 1286 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1287 (unsigned long)&__bl31_sram_text_start, 1288 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1289 1290 /* sram.data size */ 1291 sram_size = (char *)&__bl31_sram_data_end - 1292 (char *)&__bl31_sram_data_start; 1293 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1294 (unsigned long)&__bl31_sram_data_start, 1295 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1296 1297 sram_size = (char *)&__bl31_sram_stack_end - 1298 (char *)&__bl31_sram_stack_start; 1299 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1300 (unsigned long)&__bl31_sram_stack_start, 1301 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1302 1303 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1304 mmap_add_region((unsigned long)&__sram_incbin_start, 1305 (unsigned long)&__sram_incbin_start, 1306 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1307 } 1308 1309 void plat_rockchip_pmu_init(void) 1310 { 1311 uint32_t cpu; 1312 1313 rockchip_pd_lock_init(); 1314 1315 /* register requires 32bits mode, switch it to 32 bits */ 1316 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1317 1318 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1319 cpuson_flags[cpu] = 0; 1320 1321 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1322 clst_warmboot_data[cpu] = 0; 1323 1324 /* config cpu's warm boot address */ 1325 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1326 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1327 CPU_BOOT_ADDR_WMASK); 1328 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1329 1330 /* 1331 * Enable Schmitt trigger for better 32 kHz input signal, which is 1332 * important for suspend/resume reliability among other things. 1333 */ 1334 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1335 1336 init_pmu_counts(); 1337 1338 nonboot_cpus_off(); 1339 1340 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1341 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1342 } 1343