1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <assert.h> 33 #include <bakery_lock.h> 34 #include <debug.h> 35 #include <delay_timer.h> 36 #include <errno.h> 37 #include <gpio.h> 38 #include <mmio.h> 39 #include <platform.h> 40 #include <platform_def.h> 41 #include <plat_params.h> 42 #include <plat_private.h> 43 #include <rk3399_def.h> 44 #include <pmu_sram.h> 45 #include <soc.h> 46 #include <pmu.h> 47 #include <pmu_com.h> 48 #include <pwm.h> 49 #include <soc.h> 50 #include <bl31.h> 51 #include <rk3399m0.h> 52 53 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 54 55 static struct psram_data_t *psram_sleep_cfg = 56 (struct psram_data_t *)PSRAM_DT_BASE; 57 58 static uint32_t cpu_warm_boot_addr; 59 60 /* 61 * There are two ways to powering on or off on core. 62 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 63 * it is core_pwr_pd mode 64 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 65 * then, if the core enter into wfi, it power domain will be 66 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 67 * so we need core_pm_cfg_info to distinguish which method be used now. 68 */ 69 70 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 71 #if USE_COHERENT_MEM 72 __attribute__ ((section("tzfw_coherent_mem"))) 73 #endif 74 ;/* coheront */ 75 76 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 77 { 78 uint32_t bus_id = BIT(bus); 79 uint32_t bus_req; 80 uint32_t wait_cnt = 0; 81 uint32_t bus_state, bus_ack; 82 83 if (state) 84 bus_req = BIT(bus); 85 else 86 bus_req = 0; 87 88 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 89 90 do { 91 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 92 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 93 wait_cnt++; 94 } while ((bus_state != bus_req || bus_ack != bus_req) && 95 (wait_cnt < MAX_WAIT_COUNT)); 96 97 if (bus_state != bus_req || bus_ack != bus_req) { 98 INFO("%s:st=%x(%x)\n", __func__, 99 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 100 bus_state); 101 INFO("%s:st=%x(%x)\n", __func__, 102 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 103 bus_ack); 104 } 105 106 } 107 108 struct pmu_slpdata_s pmu_slpdata; 109 110 static void qos_save(void) 111 { 112 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 113 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 114 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 115 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 116 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 117 } 118 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 119 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 120 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 121 } 122 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 123 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 124 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 125 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 126 } 127 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 128 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 129 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 130 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 131 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 132 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 133 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 134 } 135 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 136 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 137 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 138 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 139 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 140 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 141 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 142 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 143 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 144 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 145 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 146 } 147 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 148 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 149 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 150 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 151 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 152 } 153 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 154 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 155 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 156 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 157 } 158 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 159 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 160 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 161 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 162 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 163 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 164 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 165 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 166 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 167 } 168 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 169 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 170 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 171 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 172 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 173 } 174 } 175 176 static void qos_restore(void) 177 { 178 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 179 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 180 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 181 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 182 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 183 } 184 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 185 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 186 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 187 } 188 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 189 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 190 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 191 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 192 } 193 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 194 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 195 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 196 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 197 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 198 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 199 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 200 } 201 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 202 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 203 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 204 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 205 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 206 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 207 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 208 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 209 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 210 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 211 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 212 } 213 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 214 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 215 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 216 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 217 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 218 } 219 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 220 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 221 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 222 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 223 } 224 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 225 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 226 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 227 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 228 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 229 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 230 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 231 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 232 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 233 } 234 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 235 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 236 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 237 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 238 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 239 } 240 } 241 242 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 243 { 244 uint32_t state; 245 246 if (pmu_power_domain_st(pd_id) == pd_state) 247 goto out; 248 249 if (pd_state == pmu_pd_on) 250 pmu_power_domain_ctr(pd_id, pd_state); 251 252 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 253 254 switch (pd_id) { 255 case PD_GPU: 256 pmu_bus_idle_req(BUS_ID_GPU, state); 257 break; 258 case PD_VIO: 259 pmu_bus_idle_req(BUS_ID_VIO, state); 260 break; 261 case PD_ISP0: 262 pmu_bus_idle_req(BUS_ID_ISP0, state); 263 break; 264 case PD_ISP1: 265 pmu_bus_idle_req(BUS_ID_ISP1, state); 266 break; 267 case PD_VO: 268 pmu_bus_idle_req(BUS_ID_VOPB, state); 269 pmu_bus_idle_req(BUS_ID_VOPL, state); 270 break; 271 case PD_HDCP: 272 pmu_bus_idle_req(BUS_ID_HDCP, state); 273 break; 274 case PD_TCPD0: 275 break; 276 case PD_TCPD1: 277 break; 278 case PD_GMAC: 279 pmu_bus_idle_req(BUS_ID_GMAC, state); 280 break; 281 case PD_CCI: 282 pmu_bus_idle_req(BUS_ID_CCIM0, state); 283 pmu_bus_idle_req(BUS_ID_CCIM1, state); 284 break; 285 case PD_SD: 286 pmu_bus_idle_req(BUS_ID_SD, state); 287 break; 288 case PD_EMMC: 289 pmu_bus_idle_req(BUS_ID_EMMC, state); 290 break; 291 case PD_EDP: 292 pmu_bus_idle_req(BUS_ID_EDP, state); 293 break; 294 case PD_SDIOAUDIO: 295 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 296 break; 297 case PD_GIC: 298 pmu_bus_idle_req(BUS_ID_GIC, state); 299 break; 300 case PD_RGA: 301 pmu_bus_idle_req(BUS_ID_RGA, state); 302 break; 303 case PD_VCODEC: 304 pmu_bus_idle_req(BUS_ID_VCODEC, state); 305 break; 306 case PD_VDU: 307 pmu_bus_idle_req(BUS_ID_VDU, state); 308 break; 309 case PD_IEP: 310 pmu_bus_idle_req(BUS_ID_IEP, state); 311 break; 312 case PD_USB3: 313 pmu_bus_idle_req(BUS_ID_USB3, state); 314 break; 315 case PD_PERIHP: 316 pmu_bus_idle_req(BUS_ID_PERIHP, state); 317 break; 318 default: 319 break; 320 } 321 322 if (pd_state == pmu_pd_off) 323 pmu_power_domain_ctr(pd_id, pd_state); 324 325 out: 326 return 0; 327 } 328 329 static uint32_t pmu_powerdomain_state; 330 331 static void pmu_power_domains_suspend(void) 332 { 333 clk_gate_con_save(); 334 clk_gate_con_disable(); 335 qos_save(); 336 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 337 pmu_set_power_domain(PD_GPU, pmu_pd_off); 338 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 339 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 340 pmu_set_power_domain(PD_VO, pmu_pd_off); 341 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 342 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 343 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 344 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 345 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 346 pmu_set_power_domain(PD_EDP, pmu_pd_off); 347 pmu_set_power_domain(PD_IEP, pmu_pd_off); 348 pmu_set_power_domain(PD_RGA, pmu_pd_off); 349 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 350 pmu_set_power_domain(PD_VDU, pmu_pd_off); 351 clk_gate_con_restore(); 352 } 353 354 static void pmu_power_domains_resume(void) 355 { 356 clk_gate_con_save(); 357 clk_gate_con_disable(); 358 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 359 pmu_set_power_domain(PD_VDU, pmu_pd_on); 360 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 361 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 362 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 363 pmu_set_power_domain(PD_RGA, pmu_pd_on); 364 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 365 pmu_set_power_domain(PD_IEP, pmu_pd_on); 366 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 367 pmu_set_power_domain(PD_EDP, pmu_pd_on); 368 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 369 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 370 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 371 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 372 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 373 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 374 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 375 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 376 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 377 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 378 if (!(pmu_powerdomain_state & BIT(PD_VO))) 379 pmu_set_power_domain(PD_VO, pmu_pd_on); 380 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 381 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 382 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 383 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 384 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 385 pmu_set_power_domain(PD_GPU, pmu_pd_on); 386 qos_restore(); 387 clk_gate_con_restore(); 388 } 389 390 void rk3399_flash_l2_b(void) 391 { 392 uint32_t wait_cnt = 0; 393 394 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 395 dsb(); 396 397 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 398 BIT(L2_FLUSHDONE_CLUSTER_B))) { 399 wait_cnt++; 400 if (wait_cnt >= MAX_WAIT_COUNT) 401 WARN("%s:reg %x,wait\n", __func__, 402 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 403 } 404 405 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 406 } 407 408 static void pmu_scu_b_pwrdn(void) 409 { 410 uint32_t wait_cnt = 0; 411 412 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 413 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 414 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 415 ERROR("%s: not all cpus is off\n", __func__); 416 return; 417 } 418 419 rk3399_flash_l2_b(); 420 421 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 422 423 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 424 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 425 wait_cnt++; 426 if (wait_cnt >= MAX_WAIT_COUNT) 427 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 428 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 429 } 430 } 431 432 static void pmu_scu_b_pwrup(void) 433 { 434 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 435 } 436 437 void plat_rockchip_pmusram_prepare(void) 438 { 439 uint32_t *sram_dst, *sram_src; 440 size_t sram_size; 441 442 /* 443 * pmu sram code and data prepare 444 */ 445 sram_dst = (uint32_t *)PMUSRAM_BASE; 446 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; 447 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - 448 (uint32_t *)sram_src; 449 450 u32_align_cpy(sram_dst, sram_src, sram_size); 451 452 psram_sleep_cfg->sp = PSRAM_DT_BASE; 453 } 454 455 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 456 { 457 assert(cpu_id < PLATFORM_CORE_COUNT); 458 return core_pm_cfg_info[cpu_id]; 459 } 460 461 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 462 { 463 assert(cpu_id < PLATFORM_CORE_COUNT); 464 core_pm_cfg_info[cpu_id] = value; 465 #if !USE_COHERENT_MEM 466 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 467 sizeof(uint32_t)); 468 #endif 469 } 470 471 static int cpus_power_domain_on(uint32_t cpu_id) 472 { 473 uint32_t cfg_info; 474 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 475 /* 476 * There are two ways to powering on or off on core. 477 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 478 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 479 * then, if the core enter into wfi, it power domain will be 480 * powered off automatically. 481 */ 482 483 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 484 485 if (cfg_info == core_pwr_pd) { 486 /* disable core_pm cfg */ 487 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 488 CORES_PM_DISABLE); 489 /* if the cores have be on, power off it firstly */ 490 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 491 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 492 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 493 } 494 495 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 496 } else { 497 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 498 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 499 return -EINVAL; 500 } 501 502 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 503 BIT(core_pm_sft_wakeup_en)); 504 dsb(); 505 } 506 507 return 0; 508 } 509 510 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 511 { 512 uint32_t cpu_pd; 513 uint32_t core_pm_value; 514 515 cpu_pd = PD_CPUL0 + cpu_id; 516 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 517 return 0; 518 519 if (pd_cfg == core_pwr_pd) { 520 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 521 return -EINVAL; 522 523 /* disable core_pm cfg */ 524 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 525 CORES_PM_DISABLE); 526 527 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 528 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 529 } else { 530 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 531 532 core_pm_value = BIT(core_pm_en); 533 if (pd_cfg == core_pwr_wfi_int) 534 core_pm_value |= BIT(core_pm_int_wakeup_en); 535 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 536 core_pm_value); 537 dsb(); 538 } 539 540 return 0; 541 } 542 543 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 544 { 545 uint32_t cpu_id = plat_my_core_pos(); 546 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 547 548 assert(cpu_id < PLATFORM_CORE_COUNT); 549 550 if (lvl_state == PLAT_MAX_OFF_STATE) { 551 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 552 pll_id = ALPLL_ID; 553 clst_st_msk = CLST_L_CPUS_MSK; 554 } else { 555 pll_id = ABPLL_ID; 556 clst_st_msk = CLST_B_CPUS_MSK << 557 PLATFORM_CLUSTER0_CORE_COUNT; 558 } 559 560 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 561 562 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 563 564 pmu_st &= clst_st_msk; 565 566 if (pmu_st == clst_st_chk_msk) { 567 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 568 PLL_SLOW_MODE); 569 570 clst_warmboot_data[pll_id] = PMU_CLST_RET; 571 572 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 573 pmu_st &= clst_st_msk; 574 if (pmu_st == clst_st_chk_msk) 575 return; 576 /* 577 * it is mean that others cpu is up again, 578 * we must resume the cfg at once. 579 */ 580 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 581 PLL_NOMAL_MODE); 582 clst_warmboot_data[pll_id] = 0; 583 } 584 } 585 } 586 587 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 588 { 589 uint32_t cpu_id = plat_my_core_pos(); 590 uint32_t pll_id, pll_st; 591 592 assert(cpu_id < PLATFORM_CORE_COUNT); 593 594 if (lvl_state == PLAT_MAX_OFF_STATE) { 595 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 596 pll_id = ALPLL_ID; 597 else 598 pll_id = ABPLL_ID; 599 600 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 601 PLL_MODE_SHIFT; 602 603 if (pll_st != NORMAL_MODE) { 604 WARN("%s: clst (%d) is in error mode (%d)\n", 605 __func__, pll_id, pll_st); 606 return -1; 607 } 608 } 609 610 return 0; 611 } 612 613 static void nonboot_cpus_off(void) 614 { 615 uint32_t boot_cpu, cpu; 616 617 boot_cpu = plat_my_core_pos(); 618 619 /* turn off noboot cpus */ 620 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 621 if (cpu == boot_cpu) 622 continue; 623 cpus_power_domain_off(cpu, core_pwr_pd); 624 } 625 } 626 627 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) 628 { 629 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 630 631 assert(cpu_id < PLATFORM_CORE_COUNT); 632 assert(cpuson_flags[cpu_id] == 0); 633 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 634 cpuson_entry_point[cpu_id] = entrypoint; 635 dsb(); 636 637 cpus_power_domain_on(cpu_id); 638 639 return 0; 640 } 641 642 static int cores_pwr_domain_off(void) 643 { 644 uint32_t cpu_id = plat_my_core_pos(); 645 646 cpus_power_domain_off(cpu_id, core_pwr_wfi); 647 648 return 0; 649 } 650 651 static int hlvl_pwr_domain_off(uint32_t lvl, plat_local_state_t lvl_state) 652 { 653 switch (lvl) { 654 case MPIDR_AFFLVL1: 655 clst_pwr_domain_suspend(lvl_state); 656 break; 657 default: 658 break; 659 } 660 661 return 0; 662 } 663 664 static int cores_pwr_domain_suspend(void) 665 { 666 uint32_t cpu_id = plat_my_core_pos(); 667 668 assert(cpu_id < PLATFORM_CORE_COUNT); 669 assert(cpuson_flags[cpu_id] == 0); 670 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 671 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 672 dsb(); 673 674 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 675 676 return 0; 677 } 678 679 static int hlvl_pwr_domain_suspend(uint32_t lvl, plat_local_state_t lvl_state) 680 { 681 switch (lvl) { 682 case MPIDR_AFFLVL1: 683 clst_pwr_domain_suspend(lvl_state); 684 break; 685 default: 686 break; 687 } 688 689 return 0; 690 } 691 692 static int cores_pwr_domain_on_finish(void) 693 { 694 uint32_t cpu_id = plat_my_core_pos(); 695 696 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 697 CORES_PM_DISABLE); 698 return 0; 699 } 700 701 static int hlvl_pwr_domain_on_finish(uint32_t lvl, 702 plat_local_state_t lvl_state) 703 { 704 switch (lvl) { 705 case MPIDR_AFFLVL1: 706 clst_pwr_domain_resume(lvl_state); 707 break; 708 default: 709 break; 710 } 711 712 return 0; 713 } 714 715 static int cores_pwr_domain_resume(void) 716 { 717 uint32_t cpu_id = plat_my_core_pos(); 718 719 /* Disable core_pm */ 720 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 721 722 return 0; 723 } 724 725 static int hlvl_pwr_domain_resume(uint32_t lvl, plat_local_state_t lvl_state) 726 { 727 switch (lvl) { 728 case MPIDR_AFFLVL1: 729 clst_pwr_domain_resume(lvl_state); 730 default: 731 break; 732 } 733 734 return 0; 735 } 736 737 /** 738 * init_pmu_counts - Init timing counts in the PMU register area 739 * 740 * At various points when we power up or down parts of the system we need 741 * a delay to wait for power / clocks to become stable. The PMU has counters 742 * to help software do the delay properly. Basically, it works like this: 743 * - Software sets up counter values 744 * - When software turns on something in the PMU, the counter kicks off 745 * - The hardware sets a bit automatically when the counter has finished and 746 * software knows that the initialization is done. 747 * 748 * It's software's job to setup these counters. The hardware power on default 749 * for these settings is conservative, setting everything to 0x5dc0 750 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 751 * 752 * Note that some of these counters are only really used at suspend/resume 753 * time (for instance, that's the only time we turn off/on the oscillator) and 754 * others are used during normal runtime (like turning on/off a CPU or GPU) but 755 * it doesn't hurt to init everything at boot. 756 * 757 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 758 * clock. While the 24 MHz clock can give us more precision, it's not always 759 * available (like when we turn the oscillator off at sleep time). The 760 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 761 * is that counts work like this: 762 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 763 * use the 24M OSC for counts 764 * ELSE 765 * use the 32K OSC for counts 766 * 767 * Notes: 768 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 769 * we always keep that 0. This apparently choose between using the PLL as 770 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 771 * should consider how it affects these counts (if at all). 772 * - The power_mode_en is documented to auto-clear automatically when we leave 773 * "power mode". That's why most clocks are on 24M. Only timings used when 774 * in "power mode" are 32k. 775 * - In some cases the kernel may override these counts. 776 * 777 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 778 * in power mode, we need to ensure that they are available. 779 */ 780 static void init_pmu_counts(void) 781 { 782 /* COUNTS FOR INSIDE POWER MODE */ 783 784 /* 785 * From limited testing, need PMU stable >= 2ms, but go overkill 786 * and choose 30 ms to match testing on past SoCs. Also let 787 * OSC have 30 ms for stabilization. 788 */ 789 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 790 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 791 792 /* Unclear what these should be; try 3 ms */ 793 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 794 795 /* Unclear what this should be, but set the default explicitly */ 796 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 797 798 /* COUNTS FOR OUTSIDE POWER MODE */ 799 800 /* Put something sorta conservative here until we know better */ 801 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 802 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 803 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 804 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 805 806 /* 807 * Set CPU/GPU to 1 us. 808 * 809 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 810 * counts here. After all ATF controls all these other bits and also 811 * chooses which clock these counters use. 812 */ 813 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_US(1)); 814 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 815 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 816 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 817 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 818 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 819 } 820 821 static void sys_slp_config(void) 822 { 823 uint32_t slp_mode_cfg = 0; 824 825 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 826 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 827 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 828 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 829 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 830 831 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 832 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 833 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 834 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 835 836 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 837 BIT(PMU_POWER_OFF_REQ_CFG) | 838 BIT(PMU_CPU0_PD_EN) | 839 BIT(PMU_L2_FLUSH_EN) | 840 BIT(PMU_L2_IDLE_EN) | 841 BIT(PMU_SCU_PD_EN) | 842 BIT(PMU_CCI_PD_EN) | 843 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 844 BIT(PMU_ALIVE_USE_LF) | 845 BIT(PMU_SREF0_ENTER_EN) | 846 BIT(PMU_SREF1_ENTER_EN) | 847 BIT(PMU_DDRC0_GATING_EN) | 848 BIT(PMU_DDRC1_GATING_EN) | 849 BIT(PMU_DDRIO0_RET_EN) | 850 BIT(PMU_DDRIO1_RET_EN) | 851 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 852 BIT(PMU_PLL_PD_EN) | 853 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 854 BIT(PMU_OSC_DIS) | 855 BIT(PMU_PMU_USE_LF); 856 857 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 858 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 859 860 861 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 862 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 863 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 864 } 865 866 static void set_hw_idle(uint32_t hw_idle) 867 { 868 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 869 } 870 871 static void clr_hw_idle(uint32_t hw_idle) 872 { 873 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 874 } 875 876 static uint32_t iomux_status[12]; 877 static uint32_t pull_mode_status[12]; 878 static uint32_t gpio_direction[3]; 879 static uint32_t gpio_2_4_clk_gate; 880 881 static void suspend_apio(void) 882 { 883 struct apio_info *suspend_apio; 884 int i; 885 886 suspend_apio = plat_get_rockchip_suspend_apio(); 887 888 if (!suspend_apio) 889 return; 890 891 /* save gpio2 ~ gpio4 iomux and pull mode */ 892 for (i = 0; i < 12; i++) { 893 iomux_status[i] = mmio_read_32(GRF_BASE + 894 GRF_GPIO2A_IOMUX + i * 4); 895 pull_mode_status[i] = mmio_read_32(GRF_BASE + 896 GRF_GPIO2A_P + i * 4); 897 } 898 899 /* store gpio2 ~ gpio4 clock gate state */ 900 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 901 PCLK_GPIO2_GATE_SHIFT) & 0x07; 902 903 /* enable gpio2 ~ gpio4 clock gate */ 904 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 905 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 906 907 /* save gpio2 ~ gpio4 direction */ 908 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 909 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 910 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 911 912 /* apio1 charge gpio3a0 ~ gpio3c7 */ 913 if (suspend_apio->apio1) { 914 915 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 916 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 917 REG_SOC_WMSK | GRF_IOMUX_GPIO); 918 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 919 REG_SOC_WMSK | GRF_IOMUX_GPIO); 920 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 921 REG_SOC_WMSK | GRF_IOMUX_GPIO); 922 923 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 924 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 925 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 926 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 927 928 /* set gpio3a0 ~ gpio3c7 to input */ 929 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 930 } 931 932 /* apio2 charge gpio2a0 ~ gpio2b4 */ 933 if (suspend_apio->apio2) { 934 935 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 936 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 937 REG_SOC_WMSK | GRF_IOMUX_GPIO); 938 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 939 REG_SOC_WMSK | GRF_IOMUX_GPIO); 940 941 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 942 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 943 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 944 945 /* set gpio2a0 ~ gpio2b4 to input */ 946 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 947 } 948 949 /* apio3 charge gpio2c0 ~ gpio2d4*/ 950 if (suspend_apio->apio3) { 951 952 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 953 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 954 REG_SOC_WMSK | GRF_IOMUX_GPIO); 955 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 956 REG_SOC_WMSK | GRF_IOMUX_GPIO); 957 958 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 959 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 960 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 961 962 /* set gpio2c0 ~ gpio2d4 to input */ 963 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 964 } 965 966 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 967 if (suspend_apio->apio4) { 968 969 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 970 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 971 REG_SOC_WMSK | GRF_IOMUX_GPIO); 972 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 973 REG_SOC_WMSK | GRF_IOMUX_GPIO); 974 975 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 976 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 977 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 978 979 /* set gpio4c0 ~ gpio4d6 to input */ 980 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 981 } 982 983 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 984 if (suspend_apio->apio5) { 985 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 986 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 987 REG_SOC_WMSK | GRF_IOMUX_GPIO); 988 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 989 REG_SOC_WMSK | GRF_IOMUX_GPIO); 990 991 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 992 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 993 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 994 995 /* set gpio4c0 ~ gpio4d6 to input */ 996 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 997 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 998 } 999 } 1000 1001 static void resume_apio(void) 1002 { 1003 struct apio_info *suspend_apio; 1004 int i; 1005 1006 suspend_apio = plat_get_rockchip_suspend_apio(); 1007 1008 if (!suspend_apio) 1009 return; 1010 1011 for (i = 0; i < 12; i++) { 1012 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1013 REG_SOC_WMSK | pull_mode_status[i]); 1014 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1015 REG_SOC_WMSK | iomux_status[i]); 1016 } 1017 1018 /* set gpio2 ~ gpio4 direction back to store value */ 1019 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1020 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1021 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1022 1023 /* set gpio2 ~ gpio4 clock gate back to store value */ 1024 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1025 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1026 PCLK_GPIO2_GATE_SHIFT)); 1027 } 1028 1029 static void suspend_gpio(void) 1030 { 1031 struct gpio_info *suspend_gpio; 1032 uint32_t count; 1033 int i; 1034 1035 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1036 1037 for (i = 0; i < count; i++) { 1038 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1039 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1040 udelay(1); 1041 } 1042 } 1043 1044 static void resume_gpio(void) 1045 { 1046 struct gpio_info *suspend_gpio; 1047 uint32_t count; 1048 int i; 1049 1050 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1051 1052 for (i = count - 1; i >= 0; i--) { 1053 gpio_set_value(suspend_gpio[i].index, 1054 !suspend_gpio[i].polarity); 1055 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1056 udelay(1); 1057 } 1058 } 1059 1060 static void m0_clock_init(void) 1061 { 1062 /* enable clocks for M0 */ 1063 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, 1064 BITS_WITH_WMASK(0x0, 0x2f, 0)); 1065 1066 /* switch the parent to xin24M and div == 1 */ 1067 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKSEL_CON0, 1068 BIT_WITH_WMSK(15) | BITS_WITH_WMASK(0x0, 0x1f, 8)); 1069 1070 /* start M0 */ 1071 mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, 1072 BITS_WITH_WMASK(0x0, 0x24, 0)); 1073 1074 /* gating disable for M0 */ 1075 mmio_write_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, BIT_WITH_WMSK(1)); 1076 } 1077 1078 static void m0_reset(void) 1079 { 1080 /* stop M0 */ 1081 mmio_write_32(PMUCRU_BASE + PMUCRU_SOFTRST_CON0, 1082 BITS_WITH_WMASK(0x24, 0x24, 0)); 1083 1084 /* recover gating bit for M0 */ 1085 mmio_write_32(PMUCRU_BASE + PMUCRU_GATEDIS_CON0, WMSK_BIT(1)); 1086 1087 /* disable clocks for M0 */ 1088 mmio_write_32(PMUCRU_BASE + PMUCRU_CLKGATE_CON2, 1089 BITS_WITH_WMASK(0x2f, 0x2f, 0)); 1090 } 1091 1092 static int sys_pwr_domain_suspend(void) 1093 { 1094 uint32_t wait_cnt = 0; 1095 uint32_t status = 0; 1096 1097 pmu_power_domains_suspend(); 1098 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1099 BIT(PMU_CLR_ALIVE) | 1100 BIT(PMU_CLR_MSCH0) | 1101 BIT(PMU_CLR_MSCH1) | 1102 BIT(PMU_CLR_CCIM0) | 1103 BIT(PMU_CLR_CCIM1) | 1104 BIT(PMU_CLR_CENTER) | 1105 BIT(PMU_CLR_GIC)); 1106 1107 sys_slp_config(); 1108 1109 m0_clock_init(); 1110 1111 pmu_sgrf_rst_hld(); 1112 1113 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1114 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | 1115 CPU_BOOT_ADDR_WMASK); 1116 1117 pmu_scu_b_pwrdn(); 1118 1119 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1120 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1121 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1122 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1123 dsb(); 1124 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1125 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1126 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1127 while ((mmio_read_32(PMU_BASE + 1128 PMU_ADB400_ST) & status) != status) { 1129 wait_cnt++; 1130 if (wait_cnt >= MAX_WAIT_COUNT) { 1131 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1132 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1133 panic(); 1134 } 1135 } 1136 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1137 /* 1138 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1139 * the last steps in suspend. 1140 */ 1141 plls_suspend_prepare(); 1142 disable_dvfs_plls(); 1143 disable_pwms(); 1144 disable_nodvfs_plls(); 1145 1146 suspend_apio(); 1147 suspend_gpio(); 1148 1149 return 0; 1150 } 1151 1152 static int sys_pwr_domain_resume(void) 1153 { 1154 uint32_t wait_cnt = 0; 1155 uint32_t status = 0; 1156 1157 resume_apio(); 1158 resume_gpio(); 1159 enable_nodvfs_plls(); 1160 enable_pwms(); 1161 /* PWM regulators take time to come up; give 300us to be safe. */ 1162 udelay(300); 1163 enable_dvfs_plls(); 1164 plls_resume_finish(); 1165 1166 /* 1167 * The wakeup status is not cleared by itself, we need to clear it 1168 * manually. Otherwise we will alway query some interrupt next time. 1169 * 1170 * NOTE: If the kernel needs to query this, we might want to stash it 1171 * somewhere. 1172 */ 1173 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1174 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1175 1176 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1177 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1178 CPU_BOOT_ADDR_WMASK); 1179 1180 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1181 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1182 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1183 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1184 dsb(); 1185 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1186 BIT(PMU_SCU_B_PWRDWN_EN)); 1187 1188 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1189 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1190 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1191 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1192 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1193 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1194 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1195 1196 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1197 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1198 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1199 1200 while ((mmio_read_32(PMU_BASE + 1201 PMU_ADB400_ST) & status)) { 1202 wait_cnt++; 1203 if (wait_cnt >= MAX_WAIT_COUNT) { 1204 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1205 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1206 panic(); 1207 } 1208 } 1209 1210 pmu_sgrf_rst_hld_release(); 1211 pmu_scu_b_pwrup(); 1212 1213 pmu_power_domains_resume(); 1214 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1215 BIT(PMU_CLR_ALIVE) | 1216 BIT(PMU_CLR_MSCH0) | 1217 BIT(PMU_CLR_MSCH1) | 1218 BIT(PMU_CLR_CCIM0) | 1219 BIT(PMU_CLR_CCIM1) | 1220 BIT(PMU_CLR_CENTER) | 1221 BIT(PMU_CLR_GIC)); 1222 1223 plat_rockchip_gic_cpuif_enable(); 1224 1225 m0_reset(); 1226 1227 return 0; 1228 } 1229 1230 void __dead2 soc_soft_reset(void) 1231 { 1232 struct gpio_info *rst_gpio; 1233 1234 rst_gpio = plat_get_rockchip_gpio_reset(); 1235 1236 if (rst_gpio) { 1237 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1238 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1239 } else { 1240 soc_global_soft_reset(); 1241 } 1242 1243 while (1) 1244 ; 1245 } 1246 1247 void __dead2 soc_system_off(void) 1248 { 1249 struct gpio_info *poweroff_gpio; 1250 1251 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1252 1253 if (poweroff_gpio) { 1254 /* 1255 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1256 * need to set this pin iomux back to gpio function 1257 */ 1258 if (poweroff_gpio->index == TSADC_INT_PIN) { 1259 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1260 GPIO1A6_IOMUX); 1261 } 1262 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1263 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1264 } else { 1265 WARN("Do nothing when system off\n"); 1266 } 1267 1268 while (1) 1269 ; 1270 } 1271 1272 static struct rockchip_pm_ops_cb pm_ops = { 1273 .cores_pwr_dm_on = cores_pwr_domain_on, 1274 .cores_pwr_dm_off = cores_pwr_domain_off, 1275 .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish, 1276 .cores_pwr_dm_suspend = cores_pwr_domain_suspend, 1277 .cores_pwr_dm_resume = cores_pwr_domain_resume, 1278 .hlvl_pwr_dm_suspend = hlvl_pwr_domain_suspend, 1279 .hlvl_pwr_dm_resume = hlvl_pwr_domain_resume, 1280 .hlvl_pwr_dm_off = hlvl_pwr_domain_off, 1281 .hlvl_pwr_dm_on_finish = hlvl_pwr_domain_on_finish, 1282 .sys_pwr_dm_suspend = sys_pwr_domain_suspend, 1283 .sys_pwr_dm_resume = sys_pwr_domain_resume, 1284 .sys_gbl_soft_reset = soc_soft_reset, 1285 .system_off = soc_system_off, 1286 }; 1287 1288 void plat_rockchip_pmu_init(void) 1289 { 1290 uint32_t cpu; 1291 1292 rockchip_pd_lock_init(); 1293 plat_setup_rockchip_pm_ops(&pm_ops); 1294 1295 /* register requires 32bits mode, switch it to 32 bits */ 1296 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1297 1298 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1299 cpuson_flags[cpu] = 0; 1300 1301 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1302 clst_warmboot_data[cpu] = 0; 1303 1304 psram_sleep_cfg->ddr_func = 0x00; 1305 psram_sleep_cfg->ddr_data = 0x00; 1306 psram_sleep_cfg->ddr_flag = 0x00; 1307 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; 1308 1309 /* config cpu's warm boot address */ 1310 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1311 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1312 CPU_BOOT_ADDR_WMASK); 1313 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1314 1315 /* 1316 * Enable Schmitt trigger for better 32 kHz input signal, which is 1317 * important for suspend/resume reliability among other things. 1318 */ 1319 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1320 1321 init_pmu_counts(); 1322 1323 nonboot_cpus_off(); 1324 1325 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1326 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1327 } 1328