1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <debug.h> 11 #include <delay_timer.h> 12 #include <dfs.h> 13 #include <errno.h> 14 #include <gpio.h> 15 #include <mmio.h> 16 #include <m0_ctl.h> 17 #include <platform.h> 18 #include <platform_def.h> 19 #include <plat_params.h> 20 #include <plat_private.h> 21 #include <rk3399_def.h> 22 #include <secure.h> 23 #include <soc.h> 24 #include <string.h> 25 #include <pmu.h> 26 #include <pmu_com.h> 27 #include <pwm.h> 28 #include <bl31.h> 29 #include <suspend.h> 30 31 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 32 33 static uint32_t cpu_warm_boot_addr; 34 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 35 36 /* 37 * There are two ways to powering on or off on core. 38 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 39 * it is core_pwr_pd mode 40 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 41 * then, if the core enter into wfi, it power domain will be 42 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 43 * so we need core_pm_cfg_info to distinguish which method be used now. 44 */ 45 46 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 47 #if USE_COHERENT_MEM 48 __attribute__ ((section("tzfw_coherent_mem"))) 49 #endif 50 ;/* coheront */ 51 52 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 53 { 54 uint32_t bus_id = BIT(bus); 55 uint32_t bus_req; 56 uint32_t wait_cnt = 0; 57 uint32_t bus_state, bus_ack; 58 59 if (state) 60 bus_req = BIT(bus); 61 else 62 bus_req = 0; 63 64 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 65 66 do { 67 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 68 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 69 wait_cnt++; 70 } while ((bus_state != bus_req || bus_ack != bus_req) && 71 (wait_cnt < MAX_WAIT_COUNT)); 72 73 if (bus_state != bus_req || bus_ack != bus_req) { 74 INFO("%s:st=%x(%x)\n", __func__, 75 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 76 bus_state); 77 INFO("%s:st=%x(%x)\n", __func__, 78 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 79 bus_ack); 80 } 81 } 82 83 struct pmu_slpdata_s pmu_slpdata; 84 85 static void qos_save(void) 86 { 87 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 88 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 89 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 90 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 91 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 92 } 93 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 94 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 95 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 96 } 97 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 98 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 99 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 100 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 101 } 102 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 103 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 104 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 105 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 106 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 107 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 108 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 109 } 110 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 111 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 112 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 113 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 114 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 115 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 116 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 117 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 118 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 119 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 120 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 121 } 122 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 123 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 124 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 125 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 126 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 127 } 128 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 129 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 130 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 131 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 132 } 133 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 134 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 135 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 136 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 137 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 138 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 139 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 140 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 141 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 142 } 143 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 144 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 145 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 146 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 147 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 148 } 149 } 150 151 static void qos_restore(void) 152 { 153 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 154 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 155 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 156 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 157 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 158 } 159 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 160 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 161 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 162 } 163 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 164 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 165 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 166 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 167 } 168 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 169 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 170 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 171 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 172 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 173 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 174 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 175 } 176 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 177 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 178 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 179 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 180 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 181 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 182 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 183 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 184 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 185 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 186 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 187 } 188 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 189 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 190 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 191 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 192 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 193 } 194 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 195 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 196 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 197 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 198 } 199 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 200 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 201 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 202 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 203 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 204 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 205 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 206 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 207 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 208 } 209 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 210 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 211 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 212 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 213 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 214 } 215 } 216 217 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 218 { 219 uint32_t state; 220 221 if (pmu_power_domain_st(pd_id) == pd_state) 222 goto out; 223 224 if (pd_state == pmu_pd_on) 225 pmu_power_domain_ctr(pd_id, pd_state); 226 227 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 228 229 switch (pd_id) { 230 case PD_GPU: 231 pmu_bus_idle_req(BUS_ID_GPU, state); 232 break; 233 case PD_VIO: 234 pmu_bus_idle_req(BUS_ID_VIO, state); 235 break; 236 case PD_ISP0: 237 pmu_bus_idle_req(BUS_ID_ISP0, state); 238 break; 239 case PD_ISP1: 240 pmu_bus_idle_req(BUS_ID_ISP1, state); 241 break; 242 case PD_VO: 243 pmu_bus_idle_req(BUS_ID_VOPB, state); 244 pmu_bus_idle_req(BUS_ID_VOPL, state); 245 break; 246 case PD_HDCP: 247 pmu_bus_idle_req(BUS_ID_HDCP, state); 248 break; 249 case PD_TCPD0: 250 break; 251 case PD_TCPD1: 252 break; 253 case PD_GMAC: 254 pmu_bus_idle_req(BUS_ID_GMAC, state); 255 break; 256 case PD_CCI: 257 pmu_bus_idle_req(BUS_ID_CCIM0, state); 258 pmu_bus_idle_req(BUS_ID_CCIM1, state); 259 break; 260 case PD_SD: 261 pmu_bus_idle_req(BUS_ID_SD, state); 262 break; 263 case PD_EMMC: 264 pmu_bus_idle_req(BUS_ID_EMMC, state); 265 break; 266 case PD_EDP: 267 pmu_bus_idle_req(BUS_ID_EDP, state); 268 break; 269 case PD_SDIOAUDIO: 270 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 271 break; 272 case PD_GIC: 273 pmu_bus_idle_req(BUS_ID_GIC, state); 274 break; 275 case PD_RGA: 276 pmu_bus_idle_req(BUS_ID_RGA, state); 277 break; 278 case PD_VCODEC: 279 pmu_bus_idle_req(BUS_ID_VCODEC, state); 280 break; 281 case PD_VDU: 282 pmu_bus_idle_req(BUS_ID_VDU, state); 283 break; 284 case PD_IEP: 285 pmu_bus_idle_req(BUS_ID_IEP, state); 286 break; 287 case PD_USB3: 288 pmu_bus_idle_req(BUS_ID_USB3, state); 289 break; 290 case PD_PERIHP: 291 pmu_bus_idle_req(BUS_ID_PERIHP, state); 292 break; 293 default: 294 break; 295 } 296 297 if (pd_state == pmu_pd_off) 298 pmu_power_domain_ctr(pd_id, pd_state); 299 300 out: 301 return 0; 302 } 303 304 static uint32_t pmu_powerdomain_state; 305 306 static void pmu_power_domains_suspend(void) 307 { 308 clk_gate_con_save(); 309 clk_gate_con_disable(); 310 qos_save(); 311 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 312 pmu_set_power_domain(PD_GPU, pmu_pd_off); 313 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 314 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 315 pmu_set_power_domain(PD_VO, pmu_pd_off); 316 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 317 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 318 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 319 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 320 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 321 pmu_set_power_domain(PD_EDP, pmu_pd_off); 322 pmu_set_power_domain(PD_IEP, pmu_pd_off); 323 pmu_set_power_domain(PD_RGA, pmu_pd_off); 324 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 325 pmu_set_power_domain(PD_VDU, pmu_pd_off); 326 clk_gate_con_restore(); 327 } 328 329 static void pmu_power_domains_resume(void) 330 { 331 clk_gate_con_save(); 332 clk_gate_con_disable(); 333 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 334 pmu_set_power_domain(PD_VDU, pmu_pd_on); 335 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 336 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 337 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 338 pmu_set_power_domain(PD_RGA, pmu_pd_on); 339 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 340 pmu_set_power_domain(PD_IEP, pmu_pd_on); 341 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 342 pmu_set_power_domain(PD_EDP, pmu_pd_on); 343 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 344 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 345 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 346 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 347 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 348 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 349 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 350 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 351 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 352 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 353 if (!(pmu_powerdomain_state & BIT(PD_VO))) 354 pmu_set_power_domain(PD_VO, pmu_pd_on); 355 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 356 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 357 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 358 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 360 pmu_set_power_domain(PD_GPU, pmu_pd_on); 361 qos_restore(); 362 clk_gate_con_restore(); 363 } 364 365 void rk3399_flash_l2_b(void) 366 { 367 uint32_t wait_cnt = 0; 368 369 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 370 dsb(); 371 372 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 373 BIT(L2_FLUSHDONE_CLUSTER_B))) { 374 wait_cnt++; 375 if (wait_cnt >= MAX_WAIT_COUNT) 376 WARN("%s:reg %x,wait\n", __func__, 377 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 378 } 379 380 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 381 } 382 383 static void pmu_scu_b_pwrdn(void) 384 { 385 uint32_t wait_cnt = 0; 386 387 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 388 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 389 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 390 ERROR("%s: not all cpus is off\n", __func__); 391 return; 392 } 393 394 rk3399_flash_l2_b(); 395 396 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 397 398 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 399 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 400 wait_cnt++; 401 if (wait_cnt >= MAX_WAIT_COUNT) 402 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 403 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 404 } 405 } 406 407 static void pmu_scu_b_pwrup(void) 408 { 409 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 410 } 411 412 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 413 { 414 assert(cpu_id < PLATFORM_CORE_COUNT); 415 return core_pm_cfg_info[cpu_id]; 416 } 417 418 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 419 { 420 assert(cpu_id < PLATFORM_CORE_COUNT); 421 core_pm_cfg_info[cpu_id] = value; 422 #if !USE_COHERENT_MEM 423 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 424 sizeof(uint32_t)); 425 #endif 426 } 427 428 static int cpus_power_domain_on(uint32_t cpu_id) 429 { 430 uint32_t cfg_info; 431 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 432 /* 433 * There are two ways to powering on or off on core. 434 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 435 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 436 * then, if the core enter into wfi, it power domain will be 437 * powered off automatically. 438 */ 439 440 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 441 442 if (cfg_info == core_pwr_pd) { 443 /* disable core_pm cfg */ 444 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 445 CORES_PM_DISABLE); 446 /* if the cores have be on, power off it firstly */ 447 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 448 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 449 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 450 } 451 452 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 453 } else { 454 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 455 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 456 return -EINVAL; 457 } 458 459 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 460 BIT(core_pm_sft_wakeup_en)); 461 dsb(); 462 } 463 464 return 0; 465 } 466 467 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 468 { 469 uint32_t cpu_pd; 470 uint32_t core_pm_value; 471 472 cpu_pd = PD_CPUL0 + cpu_id; 473 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 474 return 0; 475 476 if (pd_cfg == core_pwr_pd) { 477 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 478 return -EINVAL; 479 480 /* disable core_pm cfg */ 481 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 482 CORES_PM_DISABLE); 483 484 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 485 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 486 } else { 487 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 488 489 core_pm_value = BIT(core_pm_en); 490 if (pd_cfg == core_pwr_wfi_int) 491 core_pm_value |= BIT(core_pm_int_wakeup_en); 492 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 493 core_pm_value); 494 dsb(); 495 } 496 497 return 0; 498 } 499 500 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 501 { 502 uint32_t cpu_id = plat_my_core_pos(); 503 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 504 505 assert(cpu_id < PLATFORM_CORE_COUNT); 506 507 if (lvl_state == PLAT_MAX_OFF_STATE) { 508 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 509 pll_id = ALPLL_ID; 510 clst_st_msk = CLST_L_CPUS_MSK; 511 } else { 512 pll_id = ABPLL_ID; 513 clst_st_msk = CLST_B_CPUS_MSK << 514 PLATFORM_CLUSTER0_CORE_COUNT; 515 } 516 517 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 518 519 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 520 521 pmu_st &= clst_st_msk; 522 523 if (pmu_st == clst_st_chk_msk) { 524 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 525 PLL_SLOW_MODE); 526 527 clst_warmboot_data[pll_id] = PMU_CLST_RET; 528 529 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 530 pmu_st &= clst_st_msk; 531 if (pmu_st == clst_st_chk_msk) 532 return; 533 /* 534 * it is mean that others cpu is up again, 535 * we must resume the cfg at once. 536 */ 537 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 538 PLL_NOMAL_MODE); 539 clst_warmboot_data[pll_id] = 0; 540 } 541 } 542 } 543 544 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 545 { 546 uint32_t cpu_id = plat_my_core_pos(); 547 uint32_t pll_id, pll_st; 548 549 assert(cpu_id < PLATFORM_CORE_COUNT); 550 551 if (lvl_state == PLAT_MAX_OFF_STATE) { 552 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 553 pll_id = ALPLL_ID; 554 else 555 pll_id = ABPLL_ID; 556 557 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 558 PLL_MODE_SHIFT; 559 560 if (pll_st != NORMAL_MODE) { 561 WARN("%s: clst (%d) is in error mode (%d)\n", 562 __func__, pll_id, pll_st); 563 return -1; 564 } 565 } 566 567 return 0; 568 } 569 570 static void nonboot_cpus_off(void) 571 { 572 uint32_t boot_cpu, cpu; 573 574 boot_cpu = plat_my_core_pos(); 575 576 /* turn off noboot cpus */ 577 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 578 if (cpu == boot_cpu) 579 continue; 580 cpus_power_domain_off(cpu, core_pwr_pd); 581 } 582 } 583 584 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 585 { 586 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 587 588 assert(cpu_id < PLATFORM_CORE_COUNT); 589 assert(cpuson_flags[cpu_id] == 0); 590 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 591 cpuson_entry_point[cpu_id] = entrypoint; 592 dsb(); 593 594 cpus_power_domain_on(cpu_id); 595 596 return PSCI_E_SUCCESS; 597 } 598 599 int rockchip_soc_cores_pwr_dm_off(void) 600 { 601 uint32_t cpu_id = plat_my_core_pos(); 602 603 cpus_power_domain_off(cpu_id, core_pwr_wfi); 604 605 return PSCI_E_SUCCESS; 606 } 607 608 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 609 plat_local_state_t lvl_state) 610 { 611 switch (lvl) { 612 case MPIDR_AFFLVL1: 613 clst_pwr_domain_suspend(lvl_state); 614 break; 615 default: 616 break; 617 } 618 619 return PSCI_E_SUCCESS; 620 } 621 622 int rockchip_soc_cores_pwr_dm_suspend(void) 623 { 624 uint32_t cpu_id = plat_my_core_pos(); 625 626 assert(cpu_id < PLATFORM_CORE_COUNT); 627 assert(cpuson_flags[cpu_id] == 0); 628 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 629 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 630 dsb(); 631 632 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 633 634 return PSCI_E_SUCCESS; 635 } 636 637 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 638 { 639 switch (lvl) { 640 case MPIDR_AFFLVL1: 641 clst_pwr_domain_suspend(lvl_state); 642 break; 643 default: 644 break; 645 } 646 647 return PSCI_E_SUCCESS; 648 } 649 650 int rockchip_soc_cores_pwr_dm_on_finish(void) 651 { 652 uint32_t cpu_id = plat_my_core_pos(); 653 654 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 655 CORES_PM_DISABLE); 656 return PSCI_E_SUCCESS; 657 } 658 659 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 660 plat_local_state_t lvl_state) 661 { 662 switch (lvl) { 663 case MPIDR_AFFLVL1: 664 clst_pwr_domain_resume(lvl_state); 665 break; 666 default: 667 break; 668 } 669 670 return PSCI_E_SUCCESS; 671 } 672 673 int rockchip_soc_cores_pwr_dm_resume(void) 674 { 675 uint32_t cpu_id = plat_my_core_pos(); 676 677 /* Disable core_pm */ 678 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 679 680 return PSCI_E_SUCCESS; 681 } 682 683 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 684 { 685 switch (lvl) { 686 case MPIDR_AFFLVL1: 687 clst_pwr_domain_resume(lvl_state); 688 default: 689 break; 690 } 691 692 return PSCI_E_SUCCESS; 693 } 694 695 /** 696 * init_pmu_counts - Init timing counts in the PMU register area 697 * 698 * At various points when we power up or down parts of the system we need 699 * a delay to wait for power / clocks to become stable. The PMU has counters 700 * to help software do the delay properly. Basically, it works like this: 701 * - Software sets up counter values 702 * - When software turns on something in the PMU, the counter kicks off 703 * - The hardware sets a bit automatically when the counter has finished and 704 * software knows that the initialization is done. 705 * 706 * It's software's job to setup these counters. The hardware power on default 707 * for these settings is conservative, setting everything to 0x5dc0 708 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 709 * 710 * Note that some of these counters are only really used at suspend/resume 711 * time (for instance, that's the only time we turn off/on the oscillator) and 712 * others are used during normal runtime (like turning on/off a CPU or GPU) but 713 * it doesn't hurt to init everything at boot. 714 * 715 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 716 * clock. While the 24 MHz clock can give us more precision, it's not always 717 * available (like when we turn the oscillator off at sleep time). The 718 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 719 * is that counts work like this: 720 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 721 * use the 24M OSC for counts 722 * ELSE 723 * use the 32K OSC for counts 724 * 725 * Notes: 726 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 727 * we always keep that 0. This apparently choose between using the PLL as 728 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 729 * should consider how it affects these counts (if at all). 730 * - The power_mode_en is documented to auto-clear automatically when we leave 731 * "power mode". That's why most clocks are on 24M. Only timings used when 732 * in "power mode" are 32k. 733 * - In some cases the kernel may override these counts. 734 * 735 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 736 * in power mode, we need to ensure that they are available. 737 */ 738 static void init_pmu_counts(void) 739 { 740 /* COUNTS FOR INSIDE POWER MODE */ 741 742 /* 743 * From limited testing, need PMU stable >= 2ms, but go overkill 744 * and choose 30 ms to match testing on past SoCs. Also let 745 * OSC have 30 ms for stabilization. 746 */ 747 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 748 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 749 750 /* Unclear what these should be; try 3 ms */ 751 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 752 753 /* Unclear what this should be, but set the default explicitly */ 754 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 755 756 /* COUNTS FOR OUTSIDE POWER MODE */ 757 758 /* Put something sorta conservative here until we know better */ 759 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 760 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 761 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 762 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 763 764 /* 765 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 766 * M0 code run in SRAM, and we need it to check whether cpu enter 767 * FSM status, so we must wait M0 finish their code and enter WFI, 768 * then we can shutdown SRAM, according FSM order: 769 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 770 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 771 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 772 */ 773 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 774 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 775 776 /* 777 * Set CPU/GPU to 1 us. 778 * 779 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 780 * counts here. After all ATF controls all these other bits and also 781 * chooses which clock these counters use. 782 */ 783 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 784 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 785 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 786 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 787 } 788 789 static uint32_t clk_ddrc_save; 790 791 static void sys_slp_config(void) 792 { 793 uint32_t slp_mode_cfg = 0; 794 795 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 796 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 797 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 798 799 prepare_abpll_for_ddrctrl(); 800 sram_func_set_ddrctl_pll(ABPLL_ID); 801 802 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 803 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 804 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 805 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 806 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 807 808 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 809 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 810 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 811 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 812 813 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 814 BIT(PMU_POWER_OFF_REQ_CFG) | 815 BIT(PMU_CPU0_PD_EN) | 816 BIT(PMU_L2_FLUSH_EN) | 817 BIT(PMU_L2_IDLE_EN) | 818 BIT(PMU_SCU_PD_EN) | 819 BIT(PMU_CCI_PD_EN) | 820 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 821 BIT(PMU_ALIVE_USE_LF) | 822 BIT(PMU_SREF0_ENTER_EN) | 823 BIT(PMU_SREF1_ENTER_EN) | 824 BIT(PMU_DDRC0_GATING_EN) | 825 BIT(PMU_DDRC1_GATING_EN) | 826 BIT(PMU_DDRIO0_RET_EN) | 827 BIT(PMU_DDRIO1_RET_EN) | 828 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 829 BIT(PMU_CENTER_PD_EN) | 830 BIT(PMU_PERILP_PD_EN) | 831 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 832 BIT(PMU_PLL_PD_EN) | 833 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 834 BIT(PMU_OSC_DIS) | 835 BIT(PMU_PMU_USE_LF); 836 837 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 838 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 839 840 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 841 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 842 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 843 } 844 845 static void set_hw_idle(uint32_t hw_idle) 846 { 847 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 848 } 849 850 static void clr_hw_idle(uint32_t hw_idle) 851 { 852 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 853 } 854 855 static uint32_t iomux_status[12]; 856 static uint32_t pull_mode_status[12]; 857 static uint32_t gpio_direction[3]; 858 static uint32_t gpio_2_4_clk_gate; 859 860 static void suspend_apio(void) 861 { 862 struct apio_info *suspend_apio; 863 int i; 864 865 suspend_apio = plat_get_rockchip_suspend_apio(); 866 867 if (!suspend_apio) 868 return; 869 870 /* save gpio2 ~ gpio4 iomux and pull mode */ 871 for (i = 0; i < 12; i++) { 872 iomux_status[i] = mmio_read_32(GRF_BASE + 873 GRF_GPIO2A_IOMUX + i * 4); 874 pull_mode_status[i] = mmio_read_32(GRF_BASE + 875 GRF_GPIO2A_P + i * 4); 876 } 877 878 /* store gpio2 ~ gpio4 clock gate state */ 879 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 880 PCLK_GPIO2_GATE_SHIFT) & 0x07; 881 882 /* enable gpio2 ~ gpio4 clock gate */ 883 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 884 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 885 886 /* save gpio2 ~ gpio4 direction */ 887 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 888 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 889 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 890 891 /* apio1 charge gpio3a0 ~ gpio3c7 */ 892 if (suspend_apio->apio1) { 893 894 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 895 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 896 REG_SOC_WMSK | GRF_IOMUX_GPIO); 897 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 898 REG_SOC_WMSK | GRF_IOMUX_GPIO); 899 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 900 REG_SOC_WMSK | GRF_IOMUX_GPIO); 901 902 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 903 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 904 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 905 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 906 907 /* set gpio3a0 ~ gpio3c7 to input */ 908 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 909 } 910 911 /* apio2 charge gpio2a0 ~ gpio2b4 */ 912 if (suspend_apio->apio2) { 913 914 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 915 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 916 REG_SOC_WMSK | GRF_IOMUX_GPIO); 917 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 918 REG_SOC_WMSK | GRF_IOMUX_GPIO); 919 920 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 921 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 922 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 923 924 /* set gpio2a0 ~ gpio2b4 to input */ 925 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 926 } 927 928 /* apio3 charge gpio2c0 ~ gpio2d4*/ 929 if (suspend_apio->apio3) { 930 931 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 932 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 933 REG_SOC_WMSK | GRF_IOMUX_GPIO); 934 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 935 REG_SOC_WMSK | GRF_IOMUX_GPIO); 936 937 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 938 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 939 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 940 941 /* set gpio2c0 ~ gpio2d4 to input */ 942 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 943 } 944 945 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 946 if (suspend_apio->apio4) { 947 948 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 949 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 950 REG_SOC_WMSK | GRF_IOMUX_GPIO); 951 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 952 REG_SOC_WMSK | GRF_IOMUX_GPIO); 953 954 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 955 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 956 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 957 958 /* set gpio4c0 ~ gpio4d6 to input */ 959 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 960 } 961 962 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 963 if (suspend_apio->apio5) { 964 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 965 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 966 REG_SOC_WMSK | GRF_IOMUX_GPIO); 967 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 968 REG_SOC_WMSK | GRF_IOMUX_GPIO); 969 970 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 971 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 972 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 973 974 /* set gpio4c0 ~ gpio4d6 to input */ 975 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 976 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 977 } 978 } 979 980 static void resume_apio(void) 981 { 982 struct apio_info *suspend_apio; 983 int i; 984 985 suspend_apio = plat_get_rockchip_suspend_apio(); 986 987 if (!suspend_apio) 988 return; 989 990 for (i = 0; i < 12; i++) { 991 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 992 REG_SOC_WMSK | pull_mode_status[i]); 993 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 994 REG_SOC_WMSK | iomux_status[i]); 995 } 996 997 /* set gpio2 ~ gpio4 direction back to store value */ 998 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 999 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1000 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1001 1002 /* set gpio2 ~ gpio4 clock gate back to store value */ 1003 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1004 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1005 PCLK_GPIO2_GATE_SHIFT)); 1006 } 1007 1008 static void suspend_gpio(void) 1009 { 1010 struct gpio_info *suspend_gpio; 1011 uint32_t count; 1012 int i; 1013 1014 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1015 1016 for (i = 0; i < count; i++) { 1017 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1018 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1019 udelay(1); 1020 } 1021 } 1022 1023 static void resume_gpio(void) 1024 { 1025 struct gpio_info *suspend_gpio; 1026 uint32_t count; 1027 int i; 1028 1029 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1030 1031 for (i = count - 1; i >= 0; i--) { 1032 gpio_set_value(suspend_gpio[i].index, 1033 !suspend_gpio[i].polarity); 1034 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1035 udelay(1); 1036 } 1037 } 1038 1039 static void m0_configure_suspend(void) 1040 { 1041 /* set PARAM to M0_FUNC_SUSPEND */ 1042 mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND); 1043 } 1044 1045 void sram_save(void) 1046 { 1047 size_t text_size = (char *)&__bl31_sram_text_real_end - 1048 (char *)&__bl31_sram_text_start; 1049 size_t data_size = (char *)&__bl31_sram_data_real_end - 1050 (char *)&__bl31_sram_data_start; 1051 size_t incbin_size = (char *)&__sram_incbin_real_end - 1052 (char *)&__sram_incbin_start; 1053 1054 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1055 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1056 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1057 incbin_size); 1058 } 1059 1060 void sram_restore(void) 1061 { 1062 size_t text_size = (char *)&__bl31_sram_text_real_end - 1063 (char *)&__bl31_sram_text_start; 1064 size_t data_size = (char *)&__bl31_sram_data_real_end - 1065 (char *)&__bl31_sram_data_start; 1066 size_t incbin_size = (char *)&__sram_incbin_real_end - 1067 (char *)&__sram_incbin_start; 1068 1069 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1070 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1071 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1072 incbin_size); 1073 } 1074 1075 int rockchip_soc_sys_pwr_dm_suspend(void) 1076 { 1077 uint32_t wait_cnt = 0; 1078 uint32_t status = 0; 1079 1080 ddr_prepare_for_sys_suspend(); 1081 dmc_save(); 1082 pmu_scu_b_pwrdn(); 1083 1084 pmu_power_domains_suspend(); 1085 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1086 BIT(PMU_CLR_ALIVE) | 1087 BIT(PMU_CLR_MSCH0) | 1088 BIT(PMU_CLR_MSCH1) | 1089 BIT(PMU_CLR_CCIM0) | 1090 BIT(PMU_CLR_CCIM1) | 1091 BIT(PMU_CLR_CENTER) | 1092 BIT(PMU_CLR_PERILP) | 1093 BIT(PMU_CLR_PERILPM0) | 1094 BIT(PMU_CLR_GIC)); 1095 1096 sys_slp_config(); 1097 1098 m0_configure_suspend(); 1099 m0_start(); 1100 1101 pmu_sgrf_rst_hld(); 1102 1103 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1104 ((uintptr_t)&pmu_cpuson_entrypoint >> 1105 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1106 1107 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1108 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1109 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1110 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1111 dsb(); 1112 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1113 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1114 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1115 while ((mmio_read_32(PMU_BASE + 1116 PMU_ADB400_ST) & status) != status) { 1117 wait_cnt++; 1118 if (wait_cnt >= MAX_WAIT_COUNT) { 1119 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1120 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1121 panic(); 1122 } 1123 } 1124 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1125 1126 secure_watchdog_disable(); 1127 1128 /* 1129 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1130 * the last steps in suspend. 1131 */ 1132 disable_dvfs_plls(); 1133 disable_pwms(); 1134 disable_nodvfs_plls(); 1135 1136 suspend_apio(); 1137 suspend_gpio(); 1138 1139 sram_save(); 1140 return 0; 1141 } 1142 1143 int rockchip_soc_sys_pwr_dm_resume(void) 1144 { 1145 uint32_t wait_cnt = 0; 1146 uint32_t status = 0; 1147 1148 resume_apio(); 1149 resume_gpio(); 1150 enable_nodvfs_plls(); 1151 enable_pwms(); 1152 /* PWM regulators take time to come up; give 300us to be safe. */ 1153 udelay(300); 1154 enable_dvfs_plls(); 1155 1156 secure_watchdog_enable(); 1157 1158 /* restore clk_ddrc_bpll_src_en gate */ 1159 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1160 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1161 1162 /* 1163 * The wakeup status is not cleared by itself, we need to clear it 1164 * manually. Otherwise we will alway query some interrupt next time. 1165 * 1166 * NOTE: If the kernel needs to query this, we might want to stash it 1167 * somewhere. 1168 */ 1169 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1170 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1171 1172 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1173 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1174 CPU_BOOT_ADDR_WMASK); 1175 1176 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1177 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1178 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1179 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1180 dsb(); 1181 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1182 BIT(PMU_SCU_B_PWRDWN_EN)); 1183 1184 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1185 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1186 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1187 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1188 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1189 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1190 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1191 1192 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1193 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1194 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1195 1196 while ((mmio_read_32(PMU_BASE + 1197 PMU_ADB400_ST) & status)) { 1198 wait_cnt++; 1199 if (wait_cnt >= MAX_WAIT_COUNT) { 1200 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1201 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1202 panic(); 1203 } 1204 } 1205 1206 pmu_sgrf_rst_hld_release(); 1207 pmu_scu_b_pwrup(); 1208 pmu_power_domains_resume(); 1209 1210 restore_dpll(); 1211 sram_func_set_ddrctl_pll(DPLL_ID); 1212 restore_abpll(); 1213 1214 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1215 BIT(PMU_CLR_ALIVE) | 1216 BIT(PMU_CLR_MSCH0) | 1217 BIT(PMU_CLR_MSCH1) | 1218 BIT(PMU_CLR_CCIM0) | 1219 BIT(PMU_CLR_CCIM1) | 1220 BIT(PMU_CLR_CENTER) | 1221 BIT(PMU_CLR_PERILP) | 1222 BIT(PMU_CLR_PERILPM0) | 1223 BIT(PMU_CLR_GIC)); 1224 1225 plat_rockchip_gic_cpuif_enable(); 1226 m0_stop(); 1227 1228 ddr_prepare_for_sys_resume(); 1229 1230 return 0; 1231 } 1232 1233 void __dead2 rockchip_soc_soft_reset(void) 1234 { 1235 struct gpio_info *rst_gpio; 1236 1237 rst_gpio = plat_get_rockchip_gpio_reset(); 1238 1239 if (rst_gpio) { 1240 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1241 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1242 } else { 1243 soc_global_soft_reset(); 1244 } 1245 1246 while (1) 1247 ; 1248 } 1249 1250 void __dead2 rockchip_soc_system_off(void) 1251 { 1252 struct gpio_info *poweroff_gpio; 1253 1254 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1255 1256 if (poweroff_gpio) { 1257 /* 1258 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1259 * need to set this pin iomux back to gpio function 1260 */ 1261 if (poweroff_gpio->index == TSADC_INT_PIN) { 1262 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1263 GPIO1A6_IOMUX); 1264 } 1265 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1266 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1267 } else { 1268 WARN("Do nothing when system off\n"); 1269 } 1270 1271 while (1) 1272 ; 1273 } 1274 1275 void rockchip_plat_mmu_el3(void) 1276 { 1277 size_t sram_size; 1278 1279 /* sram.text size */ 1280 sram_size = (char *)&__bl31_sram_text_end - 1281 (char *)&__bl31_sram_text_start; 1282 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1283 (unsigned long)&__bl31_sram_text_start, 1284 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1285 1286 /* sram.data size */ 1287 sram_size = (char *)&__bl31_sram_data_end - 1288 (char *)&__bl31_sram_data_start; 1289 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1290 (unsigned long)&__bl31_sram_data_start, 1291 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1292 1293 sram_size = (char *)&__bl31_sram_stack_end - 1294 (char *)&__bl31_sram_stack_start; 1295 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1296 (unsigned long)&__bl31_sram_stack_start, 1297 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1298 1299 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1300 mmap_add_region((unsigned long)&__sram_incbin_start, 1301 (unsigned long)&__sram_incbin_start, 1302 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1303 } 1304 1305 void plat_rockchip_pmu_init(void) 1306 { 1307 uint32_t cpu; 1308 1309 rockchip_pd_lock_init(); 1310 1311 /* register requires 32bits mode, switch it to 32 bits */ 1312 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1313 1314 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1315 cpuson_flags[cpu] = 0; 1316 1317 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1318 clst_warmboot_data[cpu] = 0; 1319 1320 /* config cpu's warm boot address */ 1321 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1322 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1323 CPU_BOOT_ADDR_WMASK); 1324 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1325 1326 /* 1327 * Enable Schmitt trigger for better 32 kHz input signal, which is 1328 * important for suspend/resume reliability among other things. 1329 */ 1330 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1331 1332 init_pmu_counts(); 1333 1334 nonboot_cpus_off(); 1335 1336 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1337 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1338 } 1339