1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <assert.h> 33 #include <bakery_lock.h> 34 #include <debug.h> 35 #include <delay_timer.h> 36 #include <errno.h> 37 #include <gpio.h> 38 #include <mmio.h> 39 #include <platform.h> 40 #include <platform_def.h> 41 #include <plat_params.h> 42 #include <plat_private.h> 43 #include <rk3399_def.h> 44 #include <pmu_sram.h> 45 #include <soc.h> 46 #include <pmu.h> 47 #include <pmu_com.h> 48 #include <pwm.h> 49 #include <bl31.h> 50 #include <rk3399m0.h> 51 #include <suspend.h> 52 53 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 54 55 static struct psram_data_t *psram_sleep_cfg = 56 (struct psram_data_t *)PSRAM_DT_BASE; 57 58 static uint32_t cpu_warm_boot_addr; 59 60 /* 61 * There are two ways to powering on or off on core. 62 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 63 * it is core_pwr_pd mode 64 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 65 * then, if the core enter into wfi, it power domain will be 66 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 67 * so we need core_pm_cfg_info to distinguish which method be used now. 68 */ 69 70 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 71 #if USE_COHERENT_MEM 72 __attribute__ ((section("tzfw_coherent_mem"))) 73 #endif 74 ;/* coheront */ 75 76 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 77 { 78 uint32_t bus_id = BIT(bus); 79 uint32_t bus_req; 80 uint32_t wait_cnt = 0; 81 uint32_t bus_state, bus_ack; 82 83 if (state) 84 bus_req = BIT(bus); 85 else 86 bus_req = 0; 87 88 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 89 90 do { 91 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 92 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 93 wait_cnt++; 94 } while ((bus_state != bus_req || bus_ack != bus_req) && 95 (wait_cnt < MAX_WAIT_COUNT)); 96 97 if (bus_state != bus_req || bus_ack != bus_req) { 98 INFO("%s:st=%x(%x)\n", __func__, 99 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 100 bus_state); 101 INFO("%s:st=%x(%x)\n", __func__, 102 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 103 bus_ack); 104 } 105 } 106 107 struct pmu_slpdata_s pmu_slpdata; 108 109 static void qos_save(void) 110 { 111 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 112 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 113 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 114 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 115 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 116 } 117 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 118 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 119 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 120 } 121 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 122 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 123 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 124 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 125 } 126 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 127 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 128 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 129 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 130 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 131 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 132 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 133 } 134 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 135 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 136 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 137 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 138 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 139 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 140 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 141 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 142 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 143 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 144 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 145 } 146 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 147 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 148 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 149 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 150 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 151 } 152 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 153 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 154 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 155 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 156 } 157 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 158 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 159 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 160 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 161 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 162 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 163 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 164 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 165 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 166 } 167 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 168 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 169 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 170 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 171 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 172 } 173 } 174 175 static void qos_restore(void) 176 { 177 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 178 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 179 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 180 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 181 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 182 } 183 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 184 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 185 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 186 } 187 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 188 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 189 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 190 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 191 } 192 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 193 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 194 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 195 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 196 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 197 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 198 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 199 } 200 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 201 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 202 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 203 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 204 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 205 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 206 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 207 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 208 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 209 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 210 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 211 } 212 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 213 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 214 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 215 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 216 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 217 } 218 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 219 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 220 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 221 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 222 } 223 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 224 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 225 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 226 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 227 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 228 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 229 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 230 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 231 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 232 } 233 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 234 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 235 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 236 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 237 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 238 } 239 } 240 241 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 242 { 243 uint32_t state; 244 245 if (pmu_power_domain_st(pd_id) == pd_state) 246 goto out; 247 248 if (pd_state == pmu_pd_on) 249 pmu_power_domain_ctr(pd_id, pd_state); 250 251 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 252 253 switch (pd_id) { 254 case PD_GPU: 255 pmu_bus_idle_req(BUS_ID_GPU, state); 256 break; 257 case PD_VIO: 258 pmu_bus_idle_req(BUS_ID_VIO, state); 259 break; 260 case PD_ISP0: 261 pmu_bus_idle_req(BUS_ID_ISP0, state); 262 break; 263 case PD_ISP1: 264 pmu_bus_idle_req(BUS_ID_ISP1, state); 265 break; 266 case PD_VO: 267 pmu_bus_idle_req(BUS_ID_VOPB, state); 268 pmu_bus_idle_req(BUS_ID_VOPL, state); 269 break; 270 case PD_HDCP: 271 pmu_bus_idle_req(BUS_ID_HDCP, state); 272 break; 273 case PD_TCPD0: 274 break; 275 case PD_TCPD1: 276 break; 277 case PD_GMAC: 278 pmu_bus_idle_req(BUS_ID_GMAC, state); 279 break; 280 case PD_CCI: 281 pmu_bus_idle_req(BUS_ID_CCIM0, state); 282 pmu_bus_idle_req(BUS_ID_CCIM1, state); 283 break; 284 case PD_SD: 285 pmu_bus_idle_req(BUS_ID_SD, state); 286 break; 287 case PD_EMMC: 288 pmu_bus_idle_req(BUS_ID_EMMC, state); 289 break; 290 case PD_EDP: 291 pmu_bus_idle_req(BUS_ID_EDP, state); 292 break; 293 case PD_SDIOAUDIO: 294 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 295 break; 296 case PD_GIC: 297 pmu_bus_idle_req(BUS_ID_GIC, state); 298 break; 299 case PD_RGA: 300 pmu_bus_idle_req(BUS_ID_RGA, state); 301 break; 302 case PD_VCODEC: 303 pmu_bus_idle_req(BUS_ID_VCODEC, state); 304 break; 305 case PD_VDU: 306 pmu_bus_idle_req(BUS_ID_VDU, state); 307 break; 308 case PD_IEP: 309 pmu_bus_idle_req(BUS_ID_IEP, state); 310 break; 311 case PD_USB3: 312 pmu_bus_idle_req(BUS_ID_USB3, state); 313 break; 314 case PD_PERIHP: 315 pmu_bus_idle_req(BUS_ID_PERIHP, state); 316 break; 317 default: 318 break; 319 } 320 321 if (pd_state == pmu_pd_off) 322 pmu_power_domain_ctr(pd_id, pd_state); 323 324 out: 325 return 0; 326 } 327 328 static uint32_t pmu_powerdomain_state; 329 330 static void pmu_power_domains_suspend(void) 331 { 332 clk_gate_con_save(); 333 clk_gate_con_disable(); 334 qos_save(); 335 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 336 pmu_set_power_domain(PD_GPU, pmu_pd_off); 337 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 338 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 339 pmu_set_power_domain(PD_VO, pmu_pd_off); 340 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 341 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 342 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 343 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 344 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 345 pmu_set_power_domain(PD_EDP, pmu_pd_off); 346 pmu_set_power_domain(PD_IEP, pmu_pd_off); 347 pmu_set_power_domain(PD_RGA, pmu_pd_off); 348 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 349 pmu_set_power_domain(PD_VDU, pmu_pd_off); 350 clk_gate_con_restore(); 351 } 352 353 static void pmu_power_domains_resume(void) 354 { 355 clk_gate_con_save(); 356 clk_gate_con_disable(); 357 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 358 pmu_set_power_domain(PD_VDU, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 360 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 361 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 362 pmu_set_power_domain(PD_RGA, pmu_pd_on); 363 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 364 pmu_set_power_domain(PD_IEP, pmu_pd_on); 365 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 366 pmu_set_power_domain(PD_EDP, pmu_pd_on); 367 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 368 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 369 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 370 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 371 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 372 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 373 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 374 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 375 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 376 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 377 if (!(pmu_powerdomain_state & BIT(PD_VO))) 378 pmu_set_power_domain(PD_VO, pmu_pd_on); 379 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 380 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 381 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 382 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 383 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 384 pmu_set_power_domain(PD_GPU, pmu_pd_on); 385 qos_restore(); 386 clk_gate_con_restore(); 387 } 388 389 void rk3399_flash_l2_b(void) 390 { 391 uint32_t wait_cnt = 0; 392 393 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 394 dsb(); 395 396 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 397 BIT(L2_FLUSHDONE_CLUSTER_B))) { 398 wait_cnt++; 399 if (wait_cnt >= MAX_WAIT_COUNT) 400 WARN("%s:reg %x,wait\n", __func__, 401 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 402 } 403 404 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 405 } 406 407 static void pmu_scu_b_pwrdn(void) 408 { 409 uint32_t wait_cnt = 0; 410 411 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 412 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 413 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 414 ERROR("%s: not all cpus is off\n", __func__); 415 return; 416 } 417 418 rk3399_flash_l2_b(); 419 420 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 421 422 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 423 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 424 wait_cnt++; 425 if (wait_cnt >= MAX_WAIT_COUNT) 426 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 427 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 428 } 429 } 430 431 static void pmu_scu_b_pwrup(void) 432 { 433 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 434 } 435 436 void plat_rockchip_pmusram_prepare(void) 437 { 438 uint32_t *sram_dst, *sram_src; 439 size_t sram_size; 440 441 /* 442 * pmu sram code and data prepare 443 */ 444 sram_dst = (uint32_t *)PMUSRAM_BASE; 445 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; 446 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - 447 (uint32_t *)sram_src; 448 449 u32_align_cpy(sram_dst, sram_src, sram_size); 450 451 psram_sleep_cfg->sp = PSRAM_DT_BASE; 452 } 453 454 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 455 { 456 assert(cpu_id < PLATFORM_CORE_COUNT); 457 return core_pm_cfg_info[cpu_id]; 458 } 459 460 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 461 { 462 assert(cpu_id < PLATFORM_CORE_COUNT); 463 core_pm_cfg_info[cpu_id] = value; 464 #if !USE_COHERENT_MEM 465 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 466 sizeof(uint32_t)); 467 #endif 468 } 469 470 static int cpus_power_domain_on(uint32_t cpu_id) 471 { 472 uint32_t cfg_info; 473 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 474 /* 475 * There are two ways to powering on or off on core. 476 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 477 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 478 * then, if the core enter into wfi, it power domain will be 479 * powered off automatically. 480 */ 481 482 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 483 484 if (cfg_info == core_pwr_pd) { 485 /* disable core_pm cfg */ 486 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 487 CORES_PM_DISABLE); 488 /* if the cores have be on, power off it firstly */ 489 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 490 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 491 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 492 } 493 494 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 495 } else { 496 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 497 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 498 return -EINVAL; 499 } 500 501 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 502 BIT(core_pm_sft_wakeup_en)); 503 dsb(); 504 } 505 506 return 0; 507 } 508 509 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 510 { 511 uint32_t cpu_pd; 512 uint32_t core_pm_value; 513 514 cpu_pd = PD_CPUL0 + cpu_id; 515 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 516 return 0; 517 518 if (pd_cfg == core_pwr_pd) { 519 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 520 return -EINVAL; 521 522 /* disable core_pm cfg */ 523 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 524 CORES_PM_DISABLE); 525 526 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 527 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 528 } else { 529 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 530 531 core_pm_value = BIT(core_pm_en); 532 if (pd_cfg == core_pwr_wfi_int) 533 core_pm_value |= BIT(core_pm_int_wakeup_en); 534 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 535 core_pm_value); 536 dsb(); 537 } 538 539 return 0; 540 } 541 542 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 543 { 544 uint32_t cpu_id = plat_my_core_pos(); 545 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 546 547 assert(cpu_id < PLATFORM_CORE_COUNT); 548 549 if (lvl_state == PLAT_MAX_OFF_STATE) { 550 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 551 pll_id = ALPLL_ID; 552 clst_st_msk = CLST_L_CPUS_MSK; 553 } else { 554 pll_id = ABPLL_ID; 555 clst_st_msk = CLST_B_CPUS_MSK << 556 PLATFORM_CLUSTER0_CORE_COUNT; 557 } 558 559 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 560 561 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 562 563 pmu_st &= clst_st_msk; 564 565 if (pmu_st == clst_st_chk_msk) { 566 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 567 PLL_SLOW_MODE); 568 569 clst_warmboot_data[pll_id] = PMU_CLST_RET; 570 571 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 572 pmu_st &= clst_st_msk; 573 if (pmu_st == clst_st_chk_msk) 574 return; 575 /* 576 * it is mean that others cpu is up again, 577 * we must resume the cfg at once. 578 */ 579 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 580 PLL_NOMAL_MODE); 581 clst_warmboot_data[pll_id] = 0; 582 } 583 } 584 } 585 586 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 587 { 588 uint32_t cpu_id = plat_my_core_pos(); 589 uint32_t pll_id, pll_st; 590 591 assert(cpu_id < PLATFORM_CORE_COUNT); 592 593 if (lvl_state == PLAT_MAX_OFF_STATE) { 594 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 595 pll_id = ALPLL_ID; 596 else 597 pll_id = ABPLL_ID; 598 599 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 600 PLL_MODE_SHIFT; 601 602 if (pll_st != NORMAL_MODE) { 603 WARN("%s: clst (%d) is in error mode (%d)\n", 604 __func__, pll_id, pll_st); 605 return -1; 606 } 607 } 608 609 return 0; 610 } 611 612 static void nonboot_cpus_off(void) 613 { 614 uint32_t boot_cpu, cpu; 615 616 boot_cpu = plat_my_core_pos(); 617 618 /* turn off noboot cpus */ 619 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 620 if (cpu == boot_cpu) 621 continue; 622 cpus_power_domain_off(cpu, core_pwr_pd); 623 } 624 } 625 626 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) 627 { 628 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 629 630 assert(cpu_id < PLATFORM_CORE_COUNT); 631 assert(cpuson_flags[cpu_id] == 0); 632 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 633 cpuson_entry_point[cpu_id] = entrypoint; 634 dsb(); 635 636 cpus_power_domain_on(cpu_id); 637 638 return 0; 639 } 640 641 static int cores_pwr_domain_off(void) 642 { 643 uint32_t cpu_id = plat_my_core_pos(); 644 645 cpus_power_domain_off(cpu_id, core_pwr_wfi); 646 647 return 0; 648 } 649 650 static int hlvl_pwr_domain_off(uint32_t lvl, plat_local_state_t lvl_state) 651 { 652 switch (lvl) { 653 case MPIDR_AFFLVL1: 654 clst_pwr_domain_suspend(lvl_state); 655 break; 656 default: 657 break; 658 } 659 660 return 0; 661 } 662 663 static int cores_pwr_domain_suspend(void) 664 { 665 uint32_t cpu_id = plat_my_core_pos(); 666 667 assert(cpu_id < PLATFORM_CORE_COUNT); 668 assert(cpuson_flags[cpu_id] == 0); 669 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 670 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 671 dsb(); 672 673 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 674 675 return 0; 676 } 677 678 static int hlvl_pwr_domain_suspend(uint32_t lvl, plat_local_state_t lvl_state) 679 { 680 switch (lvl) { 681 case MPIDR_AFFLVL1: 682 clst_pwr_domain_suspend(lvl_state); 683 break; 684 default: 685 break; 686 } 687 688 return 0; 689 } 690 691 static int cores_pwr_domain_on_finish(void) 692 { 693 uint32_t cpu_id = plat_my_core_pos(); 694 695 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 696 CORES_PM_DISABLE); 697 return 0; 698 } 699 700 static int hlvl_pwr_domain_on_finish(uint32_t lvl, 701 plat_local_state_t lvl_state) 702 { 703 switch (lvl) { 704 case MPIDR_AFFLVL1: 705 clst_pwr_domain_resume(lvl_state); 706 break; 707 default: 708 break; 709 } 710 711 return 0; 712 } 713 714 static int cores_pwr_domain_resume(void) 715 { 716 uint32_t cpu_id = plat_my_core_pos(); 717 718 /* Disable core_pm */ 719 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 720 721 return 0; 722 } 723 724 static int hlvl_pwr_domain_resume(uint32_t lvl, plat_local_state_t lvl_state) 725 { 726 switch (lvl) { 727 case MPIDR_AFFLVL1: 728 clst_pwr_domain_resume(lvl_state); 729 default: 730 break; 731 } 732 733 return 0; 734 } 735 736 /** 737 * init_pmu_counts - Init timing counts in the PMU register area 738 * 739 * At various points when we power up or down parts of the system we need 740 * a delay to wait for power / clocks to become stable. The PMU has counters 741 * to help software do the delay properly. Basically, it works like this: 742 * - Software sets up counter values 743 * - When software turns on something in the PMU, the counter kicks off 744 * - The hardware sets a bit automatically when the counter has finished and 745 * software knows that the initialization is done. 746 * 747 * It's software's job to setup these counters. The hardware power on default 748 * for these settings is conservative, setting everything to 0x5dc0 749 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 750 * 751 * Note that some of these counters are only really used at suspend/resume 752 * time (for instance, that's the only time we turn off/on the oscillator) and 753 * others are used during normal runtime (like turning on/off a CPU or GPU) but 754 * it doesn't hurt to init everything at boot. 755 * 756 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 757 * clock. While the 24 MHz clock can give us more precision, it's not always 758 * available (like when we turn the oscillator off at sleep time). The 759 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 760 * is that counts work like this: 761 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 762 * use the 24M OSC for counts 763 * ELSE 764 * use the 32K OSC for counts 765 * 766 * Notes: 767 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 768 * we always keep that 0. This apparently choose between using the PLL as 769 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 770 * should consider how it affects these counts (if at all). 771 * - The power_mode_en is documented to auto-clear automatically when we leave 772 * "power mode". That's why most clocks are on 24M. Only timings used when 773 * in "power mode" are 32k. 774 * - In some cases the kernel may override these counts. 775 * 776 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 777 * in power mode, we need to ensure that they are available. 778 */ 779 static void init_pmu_counts(void) 780 { 781 /* COUNTS FOR INSIDE POWER MODE */ 782 783 /* 784 * From limited testing, need PMU stable >= 2ms, but go overkill 785 * and choose 30 ms to match testing on past SoCs. Also let 786 * OSC have 30 ms for stabilization. 787 */ 788 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 789 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 790 791 /* Unclear what these should be; try 3 ms */ 792 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 793 794 /* Unclear what this should be, but set the default explicitly */ 795 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 796 797 /* COUNTS FOR OUTSIDE POWER MODE */ 798 799 /* Put something sorta conservative here until we know better */ 800 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 801 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 802 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 803 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 804 805 /* 806 * Set CPU/GPU to 1 us. 807 * 808 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 809 * counts here. After all ATF controls all these other bits and also 810 * chooses which clock these counters use. 811 */ 812 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_US(1)); 813 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 814 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 815 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 816 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 817 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 818 } 819 820 static uint32_t clk_ddrc_save; 821 822 static void sys_slp_config(void) 823 { 824 uint32_t slp_mode_cfg = 0; 825 826 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 827 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 828 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 829 830 prepare_abpll_for_ddrctrl(); 831 sram_func_set_ddrctl_pll(ABPLL_ID); 832 833 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 834 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 835 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 836 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 837 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 838 839 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 840 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 841 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 842 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 843 844 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 845 BIT(PMU_POWER_OFF_REQ_CFG) | 846 BIT(PMU_CPU0_PD_EN) | 847 BIT(PMU_L2_FLUSH_EN) | 848 BIT(PMU_L2_IDLE_EN) | 849 BIT(PMU_SCU_PD_EN) | 850 BIT(PMU_CCI_PD_EN) | 851 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 852 BIT(PMU_ALIVE_USE_LF) | 853 BIT(PMU_SREF0_ENTER_EN) | 854 BIT(PMU_SREF1_ENTER_EN) | 855 BIT(PMU_DDRC0_GATING_EN) | 856 BIT(PMU_DDRC1_GATING_EN) | 857 BIT(PMU_DDRIO0_RET_EN) | 858 BIT(PMU_DDRIO1_RET_EN) | 859 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 860 BIT(PMU_CENTER_PD_EN) | 861 BIT(PMU_PLL_PD_EN) | 862 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 863 BIT(PMU_OSC_DIS) | 864 BIT(PMU_PMU_USE_LF); 865 866 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 867 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 868 869 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 870 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 871 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 872 } 873 874 static void set_hw_idle(uint32_t hw_idle) 875 { 876 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 877 } 878 879 static void clr_hw_idle(uint32_t hw_idle) 880 { 881 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 882 } 883 884 static uint32_t iomux_status[12]; 885 static uint32_t pull_mode_status[12]; 886 static uint32_t gpio_direction[3]; 887 static uint32_t gpio_2_4_clk_gate; 888 889 static void suspend_apio(void) 890 { 891 struct apio_info *suspend_apio; 892 int i; 893 894 suspend_apio = plat_get_rockchip_suspend_apio(); 895 896 if (!suspend_apio) 897 return; 898 899 /* save gpio2 ~ gpio4 iomux and pull mode */ 900 for (i = 0; i < 12; i++) { 901 iomux_status[i] = mmio_read_32(GRF_BASE + 902 GRF_GPIO2A_IOMUX + i * 4); 903 pull_mode_status[i] = mmio_read_32(GRF_BASE + 904 GRF_GPIO2A_P + i * 4); 905 } 906 907 /* store gpio2 ~ gpio4 clock gate state */ 908 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 909 PCLK_GPIO2_GATE_SHIFT) & 0x07; 910 911 /* enable gpio2 ~ gpio4 clock gate */ 912 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 913 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 914 915 /* save gpio2 ~ gpio4 direction */ 916 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 917 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 918 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 919 920 /* apio1 charge gpio3a0 ~ gpio3c7 */ 921 if (suspend_apio->apio1) { 922 923 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 924 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 925 REG_SOC_WMSK | GRF_IOMUX_GPIO); 926 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 927 REG_SOC_WMSK | GRF_IOMUX_GPIO); 928 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 929 REG_SOC_WMSK | GRF_IOMUX_GPIO); 930 931 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 932 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 933 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 934 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 935 936 /* set gpio3a0 ~ gpio3c7 to input */ 937 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 938 } 939 940 /* apio2 charge gpio2a0 ~ gpio2b4 */ 941 if (suspend_apio->apio2) { 942 943 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 944 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 945 REG_SOC_WMSK | GRF_IOMUX_GPIO); 946 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 947 REG_SOC_WMSK | GRF_IOMUX_GPIO); 948 949 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 950 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 951 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 952 953 /* set gpio2a0 ~ gpio2b4 to input */ 954 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 955 } 956 957 /* apio3 charge gpio2c0 ~ gpio2d4*/ 958 if (suspend_apio->apio3) { 959 960 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 961 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 962 REG_SOC_WMSK | GRF_IOMUX_GPIO); 963 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 964 REG_SOC_WMSK | GRF_IOMUX_GPIO); 965 966 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 967 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 968 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 969 970 /* set gpio2c0 ~ gpio2d4 to input */ 971 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 972 } 973 974 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 975 if (suspend_apio->apio4) { 976 977 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 978 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 979 REG_SOC_WMSK | GRF_IOMUX_GPIO); 980 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 981 REG_SOC_WMSK | GRF_IOMUX_GPIO); 982 983 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 984 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 985 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 986 987 /* set gpio4c0 ~ gpio4d6 to input */ 988 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 989 } 990 991 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 992 if (suspend_apio->apio5) { 993 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 994 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 995 REG_SOC_WMSK | GRF_IOMUX_GPIO); 996 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 997 REG_SOC_WMSK | GRF_IOMUX_GPIO); 998 999 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 1000 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1001 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1002 1003 /* set gpio4c0 ~ gpio4d6 to input */ 1004 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1005 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1006 } 1007 } 1008 1009 static void resume_apio(void) 1010 { 1011 struct apio_info *suspend_apio; 1012 int i; 1013 1014 suspend_apio = plat_get_rockchip_suspend_apio(); 1015 1016 if (!suspend_apio) 1017 return; 1018 1019 for (i = 0; i < 12; i++) { 1020 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1021 REG_SOC_WMSK | pull_mode_status[i]); 1022 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1023 REG_SOC_WMSK | iomux_status[i]); 1024 } 1025 1026 /* set gpio2 ~ gpio4 direction back to store value */ 1027 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1028 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1029 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1030 1031 /* set gpio2 ~ gpio4 clock gate back to store value */ 1032 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1033 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1034 PCLK_GPIO2_GATE_SHIFT)); 1035 } 1036 1037 static void suspend_gpio(void) 1038 { 1039 struct gpio_info *suspend_gpio; 1040 uint32_t count; 1041 int i; 1042 1043 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1044 1045 for (i = 0; i < count; i++) { 1046 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1047 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1048 udelay(1); 1049 } 1050 } 1051 1052 static void resume_gpio(void) 1053 { 1054 struct gpio_info *suspend_gpio; 1055 uint32_t count; 1056 int i; 1057 1058 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1059 1060 for (i = count - 1; i >= 0; i--) { 1061 gpio_set_value(suspend_gpio[i].index, 1062 !suspend_gpio[i].polarity); 1063 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1064 udelay(1); 1065 } 1066 } 1067 1068 static void m0_clock_init(void) 1069 { 1070 /* enable clocks for M0 */ 1071 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, 1072 BITS_WITH_WMASK(0x0, 0x2f, 0)); 1073 1074 /* switch the parent to xin24M and div == 1 */ 1075 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKSEL_CON0, 1076 BIT_WITH_WMSK(15) | BITS_WITH_WMASK(0x0, 0x1f, 8)); 1077 1078 /* start M0 */ 1079 mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, 1080 BITS_WITH_WMASK(0x0, 0x24, 0)); 1081 1082 /* gating disable for M0 */ 1083 mmio_write_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, BIT_WITH_WMSK(1)); 1084 } 1085 1086 static void m0_reset(void) 1087 { 1088 /* stop M0 */ 1089 mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, 1090 BITS_WITH_WMASK(0x24, 0x24, 0)); 1091 1092 /* recover gating bit for M0 */ 1093 mmio_write_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, WMSK_BIT(1)); 1094 1095 /* disable clocks for M0 */ 1096 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, 1097 BITS_WITH_WMASK(0x2f, 0x2f, 0)); 1098 } 1099 1100 static int sys_pwr_domain_suspend(void) 1101 { 1102 uint32_t wait_cnt = 0; 1103 uint32_t status = 0; 1104 1105 dmc_save(); 1106 pmu_scu_b_pwrdn(); 1107 1108 pmu_power_domains_suspend(); 1109 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1110 BIT(PMU_CLR_ALIVE) | 1111 BIT(PMU_CLR_MSCH0) | 1112 BIT(PMU_CLR_MSCH1) | 1113 BIT(PMU_CLR_CCIM0) | 1114 BIT(PMU_CLR_CCIM1) | 1115 BIT(PMU_CLR_CENTER) | 1116 BIT(PMU_CLR_GIC)); 1117 1118 sys_slp_config(); 1119 1120 m0_clock_init(); 1121 1122 pmu_sgrf_rst_hld(); 1123 1124 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1125 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | 1126 CPU_BOOT_ADDR_WMASK); 1127 1128 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1129 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1130 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1131 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1132 dsb(); 1133 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1134 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1135 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1136 while ((mmio_read_32(PMU_BASE + 1137 PMU_ADB400_ST) & status) != status) { 1138 wait_cnt++; 1139 if (wait_cnt >= MAX_WAIT_COUNT) { 1140 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1141 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1142 panic(); 1143 } 1144 } 1145 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1146 1147 secure_watchdog_disable(); 1148 1149 /* 1150 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1151 * the last steps in suspend. 1152 */ 1153 disable_dvfs_plls(); 1154 disable_pwms(); 1155 disable_nodvfs_plls(); 1156 1157 suspend_apio(); 1158 suspend_gpio(); 1159 1160 return 0; 1161 } 1162 1163 static int sys_pwr_domain_resume(void) 1164 { 1165 uint32_t wait_cnt = 0; 1166 uint32_t status = 0; 1167 1168 resume_apio(); 1169 resume_gpio(); 1170 enable_nodvfs_plls(); 1171 enable_pwms(); 1172 /* PWM regulators take time to come up; give 300us to be safe. */ 1173 udelay(300); 1174 enable_dvfs_plls(); 1175 1176 secure_watchdog_restore(); 1177 1178 /* restore clk_ddrc_bpll_src_en gate */ 1179 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1180 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1181 1182 /* 1183 * The wakeup status is not cleared by itself, we need to clear it 1184 * manually. Otherwise we will alway query some interrupt next time. 1185 * 1186 * NOTE: If the kernel needs to query this, we might want to stash it 1187 * somewhere. 1188 */ 1189 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1190 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1191 1192 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1193 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1194 CPU_BOOT_ADDR_WMASK); 1195 1196 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1197 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1198 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1199 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1200 dsb(); 1201 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1202 BIT(PMU_SCU_B_PWRDWN_EN)); 1203 1204 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1205 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1206 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1207 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1208 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1209 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1210 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1211 1212 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1213 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1214 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1215 1216 while ((mmio_read_32(PMU_BASE + 1217 PMU_ADB400_ST) & status)) { 1218 wait_cnt++; 1219 if (wait_cnt >= MAX_WAIT_COUNT) { 1220 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1221 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1222 panic(); 1223 } 1224 } 1225 1226 pmu_sgrf_rst_hld_release(); 1227 pmu_scu_b_pwrup(); 1228 pmu_power_domains_resume(); 1229 1230 restore_dpll(); 1231 sram_func_set_ddrctl_pll(DPLL_ID); 1232 restore_abpll(); 1233 1234 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1235 BIT(PMU_CLR_ALIVE) | 1236 BIT(PMU_CLR_MSCH0) | 1237 BIT(PMU_CLR_MSCH1) | 1238 BIT(PMU_CLR_CCIM0) | 1239 BIT(PMU_CLR_CCIM1) | 1240 BIT(PMU_CLR_CENTER) | 1241 BIT(PMU_CLR_GIC)); 1242 1243 plat_rockchip_gic_cpuif_enable(); 1244 1245 m0_reset(); 1246 1247 return 0; 1248 } 1249 1250 void __dead2 soc_soft_reset(void) 1251 { 1252 struct gpio_info *rst_gpio; 1253 1254 rst_gpio = plat_get_rockchip_gpio_reset(); 1255 1256 if (rst_gpio) { 1257 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1258 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1259 } else { 1260 soc_global_soft_reset(); 1261 } 1262 1263 while (1) 1264 ; 1265 } 1266 1267 void __dead2 soc_system_off(void) 1268 { 1269 struct gpio_info *poweroff_gpio; 1270 1271 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1272 1273 if (poweroff_gpio) { 1274 /* 1275 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1276 * need to set this pin iomux back to gpio function 1277 */ 1278 if (poweroff_gpio->index == TSADC_INT_PIN) { 1279 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1280 GPIO1A6_IOMUX); 1281 } 1282 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1283 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1284 } else { 1285 WARN("Do nothing when system off\n"); 1286 } 1287 1288 while (1) 1289 ; 1290 } 1291 1292 static struct rockchip_pm_ops_cb pm_ops = { 1293 .cores_pwr_dm_on = cores_pwr_domain_on, 1294 .cores_pwr_dm_off = cores_pwr_domain_off, 1295 .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish, 1296 .cores_pwr_dm_suspend = cores_pwr_domain_suspend, 1297 .cores_pwr_dm_resume = cores_pwr_domain_resume, 1298 .hlvl_pwr_dm_suspend = hlvl_pwr_domain_suspend, 1299 .hlvl_pwr_dm_resume = hlvl_pwr_domain_resume, 1300 .hlvl_pwr_dm_off = hlvl_pwr_domain_off, 1301 .hlvl_pwr_dm_on_finish = hlvl_pwr_domain_on_finish, 1302 .sys_pwr_dm_suspend = sys_pwr_domain_suspend, 1303 .sys_pwr_dm_resume = sys_pwr_domain_resume, 1304 .sys_gbl_soft_reset = soc_soft_reset, 1305 .system_off = soc_system_off, 1306 }; 1307 1308 void plat_rockchip_pmu_init(void) 1309 { 1310 uint32_t cpu; 1311 1312 rockchip_pd_lock_init(); 1313 plat_setup_rockchip_pm_ops(&pm_ops); 1314 1315 /* register requires 32bits mode, switch it to 32 bits */ 1316 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1317 1318 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1319 cpuson_flags[cpu] = 0; 1320 1321 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1322 clst_warmboot_data[cpu] = 0; 1323 1324 psram_sleep_cfg->ddr_func = (uint64_t)dmc_restore; 1325 psram_sleep_cfg->ddr_data = (uint64_t)&sdram_config; 1326 psram_sleep_cfg->ddr_flag = 0x01; 1327 1328 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; 1329 1330 /* config cpu's warm boot address */ 1331 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1332 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1333 CPU_BOOT_ADDR_WMASK); 1334 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1335 1336 /* 1337 * Enable Schmitt trigger for better 32 kHz input signal, which is 1338 * important for suspend/resume reliability among other things. 1339 */ 1340 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1341 1342 init_pmu_counts(); 1343 1344 nonboot_cpus_off(); 1345 1346 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1347 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1348 } 1349