1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <assert.h> 33 #include <bakery_lock.h> 34 #include <debug.h> 35 #include <delay_timer.h> 36 #include <errno.h> 37 #include <gpio.h> 38 #include <mmio.h> 39 #include <platform.h> 40 #include <platform_def.h> 41 #include <plat_params.h> 42 #include <plat_private.h> 43 #include <rk3399_def.h> 44 #include <pmu_sram.h> 45 #include <soc.h> 46 #include <pmu.h> 47 #include <pmu_com.h> 48 #include <pwm.h> 49 #include <soc.h> 50 #include <bl31.h> 51 52 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 53 54 static struct psram_data_t *psram_sleep_cfg = 55 (struct psram_data_t *)PSRAM_DT_BASE; 56 57 static uint32_t cpu_warm_boot_addr; 58 59 /* 60 * There are two ways to powering on or off on core. 61 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 62 * it is core_pwr_pd mode 63 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 64 * then, if the core enter into wfi, it power domain will be 65 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 66 * so we need core_pm_cfg_info to distinguish which method be used now. 67 */ 68 69 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 70 #if USE_COHERENT_MEM 71 __attribute__ ((section("tzfw_coherent_mem"))) 72 #endif 73 ;/* coheront */ 74 75 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 76 { 77 uint32_t bus_id = BIT(bus); 78 uint32_t bus_req; 79 uint32_t wait_cnt = 0; 80 uint32_t bus_state, bus_ack; 81 82 if (state) 83 bus_req = BIT(bus); 84 else 85 bus_req = 0; 86 87 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 88 89 do { 90 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 91 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 92 wait_cnt++; 93 } while ((bus_state != bus_req || bus_ack != bus_req) && 94 (wait_cnt < MAX_WAIT_COUNT)); 95 96 if (bus_state != bus_req || bus_ack != bus_req) { 97 INFO("%s:st=%x(%x)\n", __func__, 98 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 99 bus_state); 100 INFO("%s:st=%x(%x)\n", __func__, 101 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 102 bus_ack); 103 } 104 105 } 106 107 struct pmu_slpdata_s pmu_slpdata; 108 109 static void qos_save(void) 110 { 111 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 112 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 113 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 114 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 115 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 116 } 117 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 118 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 119 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 120 } 121 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 122 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 123 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 124 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 125 } 126 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 127 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 128 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 129 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 130 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 131 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 132 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 133 } 134 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 135 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 136 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 137 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 138 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 139 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 140 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 141 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 142 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 143 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 144 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 145 } 146 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 147 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 148 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 149 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 150 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 151 } 152 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 153 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 154 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 155 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 156 } 157 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 158 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 159 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 160 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 161 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 162 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 163 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 164 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 165 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 166 } 167 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 168 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 169 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 170 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 171 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 172 } 173 } 174 175 static void qos_restore(void) 176 { 177 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 178 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 179 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 180 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 181 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 182 } 183 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 184 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 185 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 186 } 187 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 188 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 189 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 190 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 191 } 192 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 193 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 194 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 195 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 196 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 197 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 198 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 199 } 200 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 201 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 202 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 203 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 204 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 205 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 206 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 207 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 208 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 209 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 210 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 211 } 212 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 213 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 214 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 215 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 216 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 217 } 218 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 219 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 220 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 221 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 222 } 223 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 224 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 225 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 226 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 227 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 228 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 229 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 230 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 231 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 232 } 233 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 234 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 235 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 236 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 237 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 238 } 239 } 240 241 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 242 { 243 uint32_t state; 244 245 if (pmu_power_domain_st(pd_id) == pd_state) 246 goto out; 247 248 if (pd_state == pmu_pd_on) 249 pmu_power_domain_ctr(pd_id, pd_state); 250 251 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 252 253 switch (pd_id) { 254 case PD_GPU: 255 pmu_bus_idle_req(BUS_ID_GPU, state); 256 break; 257 case PD_VIO: 258 pmu_bus_idle_req(BUS_ID_VIO, state); 259 break; 260 case PD_ISP0: 261 pmu_bus_idle_req(BUS_ID_ISP0, state); 262 break; 263 case PD_ISP1: 264 pmu_bus_idle_req(BUS_ID_ISP1, state); 265 break; 266 case PD_VO: 267 pmu_bus_idle_req(BUS_ID_VOPB, state); 268 pmu_bus_idle_req(BUS_ID_VOPL, state); 269 break; 270 case PD_HDCP: 271 pmu_bus_idle_req(BUS_ID_HDCP, state); 272 break; 273 case PD_TCPD0: 274 break; 275 case PD_TCPD1: 276 break; 277 case PD_GMAC: 278 pmu_bus_idle_req(BUS_ID_GMAC, state); 279 break; 280 case PD_CCI: 281 pmu_bus_idle_req(BUS_ID_CCIM0, state); 282 pmu_bus_idle_req(BUS_ID_CCIM1, state); 283 break; 284 case PD_SD: 285 pmu_bus_idle_req(BUS_ID_SD, state); 286 break; 287 case PD_EMMC: 288 pmu_bus_idle_req(BUS_ID_EMMC, state); 289 break; 290 case PD_EDP: 291 pmu_bus_idle_req(BUS_ID_EDP, state); 292 break; 293 case PD_SDIOAUDIO: 294 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 295 break; 296 case PD_GIC: 297 pmu_bus_idle_req(BUS_ID_GIC, state); 298 break; 299 case PD_RGA: 300 pmu_bus_idle_req(BUS_ID_RGA, state); 301 break; 302 case PD_VCODEC: 303 pmu_bus_idle_req(BUS_ID_VCODEC, state); 304 break; 305 case PD_VDU: 306 pmu_bus_idle_req(BUS_ID_VDU, state); 307 break; 308 case PD_IEP: 309 pmu_bus_idle_req(BUS_ID_IEP, state); 310 break; 311 case PD_USB3: 312 pmu_bus_idle_req(BUS_ID_USB3, state); 313 break; 314 case PD_PERIHP: 315 pmu_bus_idle_req(BUS_ID_PERIHP, state); 316 break; 317 default: 318 break; 319 } 320 321 if (pd_state == pmu_pd_off) 322 pmu_power_domain_ctr(pd_id, pd_state); 323 324 out: 325 return 0; 326 } 327 328 static uint32_t pmu_powerdomain_state; 329 330 static void pmu_power_domains_suspend(void) 331 { 332 clk_gate_con_save(); 333 clk_gate_con_disable(); 334 qos_save(); 335 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 336 pmu_set_power_domain(PD_GPU, pmu_pd_off); 337 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 338 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 339 pmu_set_power_domain(PD_VO, pmu_pd_off); 340 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 341 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 342 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 343 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 344 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 345 pmu_set_power_domain(PD_EDP, pmu_pd_off); 346 pmu_set_power_domain(PD_IEP, pmu_pd_off); 347 pmu_set_power_domain(PD_RGA, pmu_pd_off); 348 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 349 pmu_set_power_domain(PD_VDU, pmu_pd_off); 350 clk_gate_con_restore(); 351 } 352 353 static void pmu_power_domains_resume(void) 354 { 355 clk_gate_con_save(); 356 clk_gate_con_disable(); 357 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 358 pmu_set_power_domain(PD_VDU, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 360 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 361 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 362 pmu_set_power_domain(PD_RGA, pmu_pd_on); 363 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 364 pmu_set_power_domain(PD_IEP, pmu_pd_on); 365 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 366 pmu_set_power_domain(PD_EDP, pmu_pd_on); 367 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 368 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 369 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 370 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 371 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 372 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 373 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 374 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 375 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 376 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 377 if (!(pmu_powerdomain_state & BIT(PD_VO))) 378 pmu_set_power_domain(PD_VO, pmu_pd_on); 379 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 380 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 381 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 382 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 383 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 384 pmu_set_power_domain(PD_GPU, pmu_pd_on); 385 qos_restore(); 386 clk_gate_con_restore(); 387 } 388 389 void rk3399_flash_l2_b(void) 390 { 391 uint32_t wait_cnt = 0; 392 393 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 394 dsb(); 395 396 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 397 BIT(L2_FLUSHDONE_CLUSTER_B))) { 398 wait_cnt++; 399 if (wait_cnt >= MAX_WAIT_COUNT) 400 WARN("%s:reg %x,wait\n", __func__, 401 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 402 } 403 404 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 405 } 406 407 static void pmu_scu_b_pwrdn(void) 408 { 409 uint32_t wait_cnt = 0; 410 411 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 412 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 413 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 414 ERROR("%s: not all cpus is off\n", __func__); 415 return; 416 } 417 418 rk3399_flash_l2_b(); 419 420 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 421 422 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 423 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 424 wait_cnt++; 425 if (wait_cnt >= MAX_WAIT_COUNT) 426 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 427 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 428 } 429 } 430 431 static void pmu_scu_b_pwrup(void) 432 { 433 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 434 } 435 436 void plat_rockchip_pmusram_prepare(void) 437 { 438 uint32_t *sram_dst, *sram_src; 439 size_t sram_size = 2; 440 441 /* 442 * pmu sram code and data prepare 443 */ 444 sram_dst = (uint32_t *)PMUSRAM_BASE; 445 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; 446 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - 447 (uint32_t *)sram_src; 448 449 u32_align_cpy(sram_dst, sram_src, sram_size); 450 451 psram_sleep_cfg->sp = PSRAM_DT_BASE; 452 } 453 454 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 455 { 456 assert(cpu_id < PLATFORM_CORE_COUNT); 457 return core_pm_cfg_info[cpu_id]; 458 } 459 460 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 461 { 462 assert(cpu_id < PLATFORM_CORE_COUNT); 463 core_pm_cfg_info[cpu_id] = value; 464 #if !USE_COHERENT_MEM 465 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 466 sizeof(uint32_t)); 467 #endif 468 } 469 470 static int cpus_power_domain_on(uint32_t cpu_id) 471 { 472 uint32_t cfg_info; 473 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 474 /* 475 * There are two ways to powering on or off on core. 476 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 477 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 478 * then, if the core enter into wfi, it power domain will be 479 * powered off automatically. 480 */ 481 482 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 483 484 if (cfg_info == core_pwr_pd) { 485 /* disable core_pm cfg */ 486 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 487 CORES_PM_DISABLE); 488 /* if the cores have be on, power off it firstly */ 489 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 490 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 491 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 492 } 493 494 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 495 } else { 496 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 497 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 498 return -EINVAL; 499 } 500 501 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 502 BIT(core_pm_sft_wakeup_en)); 503 dsb(); 504 } 505 506 return 0; 507 } 508 509 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 510 { 511 uint32_t cpu_pd; 512 uint32_t core_pm_value; 513 514 cpu_pd = PD_CPUL0 + cpu_id; 515 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 516 return 0; 517 518 if (pd_cfg == core_pwr_pd) { 519 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 520 return -EINVAL; 521 522 /* disable core_pm cfg */ 523 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 524 CORES_PM_DISABLE); 525 526 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 527 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 528 } else { 529 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 530 531 core_pm_value = BIT(core_pm_en); 532 if (pd_cfg == core_pwr_wfi_int) 533 core_pm_value |= BIT(core_pm_int_wakeup_en); 534 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 535 core_pm_value); 536 dsb(); 537 } 538 539 return 0; 540 } 541 542 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 543 { 544 uint32_t cpu_id = plat_my_core_pos(); 545 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 546 547 assert(cpu_id < PLATFORM_CORE_COUNT); 548 549 if (lvl_state == PLAT_MAX_OFF_STATE) { 550 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 551 pll_id = ALPLL_ID; 552 clst_st_msk = CLST_L_CPUS_MSK; 553 } else { 554 pll_id = ABPLL_ID; 555 clst_st_msk = CLST_B_CPUS_MSK << 556 PLATFORM_CLUSTER0_CORE_COUNT; 557 } 558 559 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 560 561 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 562 563 pmu_st &= clst_st_msk; 564 565 if (pmu_st == clst_st_chk_msk) { 566 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 567 PLL_SLOW_MODE); 568 569 clst_warmboot_data[pll_id] = PMU_CLST_RET; 570 571 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 572 pmu_st &= clst_st_msk; 573 if (pmu_st == clst_st_chk_msk) 574 return; 575 /* 576 * it is mean that others cpu is up again, 577 * we must resume the cfg at once. 578 */ 579 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 580 PLL_NOMAL_MODE); 581 clst_warmboot_data[pll_id] = 0; 582 } 583 } 584 } 585 586 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 587 { 588 uint32_t cpu_id = plat_my_core_pos(); 589 uint32_t pll_id, pll_st; 590 591 assert(cpu_id < PLATFORM_CORE_COUNT); 592 593 if (lvl_state == PLAT_MAX_OFF_STATE) { 594 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 595 pll_id = ALPLL_ID; 596 else 597 pll_id = ABPLL_ID; 598 599 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 600 PLL_MODE_SHIFT; 601 602 if (pll_st != NORMAL_MODE) { 603 WARN("%s: clst (%d) is in error mode (%d)\n", 604 __func__, pll_id, pll_st); 605 return -1; 606 } 607 } 608 609 return 0; 610 } 611 612 static void nonboot_cpus_off(void) 613 { 614 uint32_t boot_cpu, cpu; 615 616 boot_cpu = plat_my_core_pos(); 617 618 /* turn off noboot cpus */ 619 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 620 if (cpu == boot_cpu) 621 continue; 622 cpus_power_domain_off(cpu, core_pwr_pd); 623 } 624 } 625 626 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) 627 { 628 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 629 630 assert(cpu_id < PLATFORM_CORE_COUNT); 631 assert(cpuson_flags[cpu_id] == 0); 632 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 633 cpuson_entry_point[cpu_id] = entrypoint; 634 dsb(); 635 636 cpus_power_domain_on(cpu_id); 637 638 return 0; 639 } 640 641 static int cores_pwr_domain_off(void) 642 { 643 uint32_t cpu_id = plat_my_core_pos(); 644 645 cpus_power_domain_off(cpu_id, core_pwr_wfi); 646 647 return 0; 648 } 649 650 static int hlvl_pwr_domain_off(uint32_t lvl, plat_local_state_t lvl_state) 651 { 652 switch (lvl) { 653 case MPIDR_AFFLVL1: 654 clst_pwr_domain_suspend(lvl_state); 655 break; 656 default: 657 break; 658 } 659 660 return 0; 661 } 662 663 static int cores_pwr_domain_suspend(void) 664 { 665 uint32_t cpu_id = plat_my_core_pos(); 666 667 assert(cpu_id < PLATFORM_CORE_COUNT); 668 assert(cpuson_flags[cpu_id] == 0); 669 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 670 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 671 dsb(); 672 673 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 674 675 return 0; 676 } 677 678 static int hlvl_pwr_domain_suspend(uint32_t lvl, plat_local_state_t lvl_state) 679 { 680 switch (lvl) { 681 case MPIDR_AFFLVL1: 682 clst_pwr_domain_suspend(lvl_state); 683 break; 684 default: 685 break; 686 } 687 688 return 0; 689 } 690 691 static int cores_pwr_domain_on_finish(void) 692 { 693 uint32_t cpu_id = plat_my_core_pos(); 694 695 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 696 CORES_PM_DISABLE); 697 return 0; 698 } 699 700 static int hlvl_pwr_domain_on_finish(uint32_t lvl, 701 plat_local_state_t lvl_state) 702 { 703 switch (lvl) { 704 case MPIDR_AFFLVL1: 705 clst_pwr_domain_resume(lvl_state); 706 break; 707 default: 708 break; 709 } 710 711 return 0; 712 } 713 714 static int cores_pwr_domain_resume(void) 715 { 716 uint32_t cpu_id = plat_my_core_pos(); 717 718 /* Disable core_pm */ 719 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 720 721 return 0; 722 } 723 724 static int hlvl_pwr_domain_resume(uint32_t lvl, plat_local_state_t lvl_state) 725 { 726 switch (lvl) { 727 case MPIDR_AFFLVL1: 728 clst_pwr_domain_resume(lvl_state); 729 default: 730 break; 731 } 732 733 return 0; 734 } 735 736 /** 737 * init_pmu_counts - Init timing counts in the PMU register area 738 * 739 * At various points when we power up or down parts of the system we need 740 * a delay to wait for power / clocks to become stable. The PMU has counters 741 * to help software do the delay properly. Basically, it works like this: 742 * - Software sets up counter values 743 * - When software turns on something in the PMU, the counter kicks off 744 * - The hardware sets a bit automatically when the counter has finished and 745 * software knows that the initialization is done. 746 * 747 * It's software's job to setup these counters. The hardware power on default 748 * for these settings is conservative, setting everything to 0x5dc0 749 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 750 * 751 * Note that some of these counters are only really used at suspend/resume 752 * time (for instance, that's the only time we turn off/on the oscillator) and 753 * others are used during normal runtime (like turning on/off a CPU or GPU) but 754 * it doesn't hurt to init everything at boot. 755 * 756 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 757 * clock. While the 24 MHz clock can give us more precision, it's not always 758 * available (like when we turn the oscillator off at sleep time). The 759 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 760 * is that counts work like this: 761 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 762 * use the 24M OSC for counts 763 * ELSE 764 * use the 32K OSC for counts 765 * 766 * Notes: 767 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 768 * we always keep that 0. This apparently choose between using the PLL as 769 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 770 * should consider how it affects these counts (if at all). 771 * - The power_mode_en is documented to auto-clear automatically when we leave 772 * "power mode". That's why most clocks are on 24M. Only timings used when 773 * in "power mode" are 32k. 774 * - In some cases the kernel may override these counts. 775 * 776 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 777 * in power mode, we need to ensure that they are available. 778 */ 779 static void init_pmu_counts(void) 780 { 781 /* COUNTS FOR INSIDE POWER MODE */ 782 783 /* 784 * From limited testing, need PMU stable >= 2ms, but go overkill 785 * and choose 30 ms to match testing on past SoCs. Also let 786 * OSC have 30 ms for stabilization. 787 */ 788 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 789 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 790 791 /* Unclear what these should be; try 3 ms */ 792 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 793 794 /* Unclear what this should be, but set the default explicitly */ 795 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 796 797 /* COUNTS FOR OUTSIDE POWER MODE */ 798 799 /* Put something sorta conservative here until we know better */ 800 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 801 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 802 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 803 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 804 805 /* 806 * Set CPU/GPU to 1 us. 807 * 808 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 809 * counts here. After all ATF controls all these other bits and also 810 * chooses which clock these counters use. 811 */ 812 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_US(1)); 813 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 814 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 815 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 816 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 817 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 818 } 819 820 static void sys_slp_config(void) 821 { 822 uint32_t slp_mode_cfg = 0; 823 824 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 825 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 826 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 827 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 828 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 829 830 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 831 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 832 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 833 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 834 835 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 836 BIT(PMU_POWER_OFF_REQ_CFG) | 837 BIT(PMU_CPU0_PD_EN) | 838 BIT(PMU_L2_FLUSH_EN) | 839 BIT(PMU_L2_IDLE_EN) | 840 BIT(PMU_SCU_PD_EN) | 841 BIT(PMU_CCI_PD_EN) | 842 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 843 BIT(PMU_PERILP_PD_EN) | 844 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 845 BIT(PMU_ALIVE_USE_LF) | 846 BIT(PMU_SREF0_ENTER_EN) | 847 BIT(PMU_SREF1_ENTER_EN) | 848 BIT(PMU_DDRC0_GATING_EN) | 849 BIT(PMU_DDRC1_GATING_EN) | 850 BIT(PMU_DDRIO0_RET_EN) | 851 BIT(PMU_DDRIO1_RET_EN) | 852 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 853 BIT(PMU_PLL_PD_EN) | 854 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 855 BIT(PMU_OSC_DIS) | 856 BIT(PMU_PMU_USE_LF); 857 858 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 859 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 860 861 862 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 863 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 864 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 865 } 866 867 static void set_hw_idle(uint32_t hw_idle) 868 { 869 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 870 } 871 872 static void clr_hw_idle(uint32_t hw_idle) 873 { 874 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 875 } 876 877 static int sys_pwr_domain_suspend(void) 878 { 879 uint32_t wait_cnt = 0; 880 uint32_t status = 0; 881 882 pmu_power_domains_suspend(); 883 set_hw_idle(BIT(PMU_CLR_CENTER1) | 884 BIT(PMU_CLR_ALIVE) | 885 BIT(PMU_CLR_MSCH0) | 886 BIT(PMU_CLR_MSCH1) | 887 BIT(PMU_CLR_CCIM0) | 888 BIT(PMU_CLR_CCIM1) | 889 BIT(PMU_CLR_CENTER) | 890 BIT(PMU_CLR_PERILP) | 891 BIT(PMU_CLR_PMU) | 892 BIT(PMU_CLR_PERILPM0) | 893 BIT(PMU_CLR_GIC)); 894 895 sys_slp_config(); 896 pmu_sgrf_rst_hld(); 897 898 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 899 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | 900 CPU_BOOT_ADDR_WMASK); 901 902 pmu_scu_b_pwrdn(); 903 904 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 905 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 906 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 907 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 908 dsb(); 909 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 910 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 911 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 912 while ((mmio_read_32(PMU_BASE + 913 PMU_ADB400_ST) & status) != status) { 914 wait_cnt++; 915 if (wait_cnt >= MAX_WAIT_COUNT) { 916 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 917 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 918 panic(); 919 } 920 } 921 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 922 923 /* 924 * Disabling PLLs/PWM/DVFS is approaching WFI which is 925 * the last steps in suspend. 926 */ 927 plls_suspend_prepare(); 928 disable_dvfs_plls(); 929 disable_pwms(); 930 disable_nodvfs_plls(); 931 932 return 0; 933 } 934 935 static int sys_pwr_domain_resume(void) 936 { 937 uint32_t wait_cnt = 0; 938 uint32_t status = 0; 939 940 enable_nodvfs_plls(); 941 enable_pwms(); 942 /* PWM regulators take time to come up; give 300us to be safe. */ 943 udelay(300); 944 enable_dvfs_plls(); 945 plls_resume_finish(); 946 947 /* 948 * The wakeup status is not cleared by itself, we need to clear it 949 * manually. Otherwise we will alway query some interrupt next time. 950 * 951 * NOTE: If the kernel needs to query this, we might want to stash it 952 * somewhere. 953 */ 954 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 955 956 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 957 958 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 959 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 960 CPU_BOOT_ADDR_WMASK); 961 962 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 963 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 964 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 965 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 966 dsb(); 967 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 968 BIT(PMU_SCU_B_PWRDWN_EN)); 969 970 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 971 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 972 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 973 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 974 WMSK_BIT(PMU_CLR_CORE_L_HW) | 975 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 976 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 977 978 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 979 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 980 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 981 982 while ((mmio_read_32(PMU_BASE + 983 PMU_ADB400_ST) & status)) { 984 wait_cnt++; 985 if (wait_cnt >= MAX_WAIT_COUNT) { 986 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 987 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 988 panic(); 989 } 990 } 991 992 pmu_sgrf_rst_hld_release(); 993 pmu_scu_b_pwrup(); 994 995 pmu_power_domains_resume(); 996 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 997 BIT(PMU_CLR_ALIVE) | 998 BIT(PMU_CLR_MSCH0) | 999 BIT(PMU_CLR_MSCH1) | 1000 BIT(PMU_CLR_CCIM0) | 1001 BIT(PMU_CLR_CCIM1) | 1002 BIT(PMU_CLR_CENTER) | 1003 BIT(PMU_CLR_PERILP) | 1004 BIT(PMU_CLR_PMU) | 1005 BIT(PMU_CLR_GIC)); 1006 return 0; 1007 } 1008 1009 void __dead2 soc_soft_reset(void) 1010 { 1011 struct gpio_info *rst_gpio; 1012 1013 rst_gpio = (struct gpio_info *)plat_get_rockchip_gpio_reset(); 1014 1015 if (rst_gpio) { 1016 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1017 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1018 } else { 1019 soc_global_soft_reset(); 1020 } 1021 1022 while (1) 1023 ; 1024 } 1025 1026 void __dead2 soc_system_off(void) 1027 { 1028 struct gpio_info *poweroff_gpio; 1029 1030 poweroff_gpio = (struct gpio_info *)plat_get_rockchip_gpio_poweroff(); 1031 1032 if (poweroff_gpio) { 1033 /* 1034 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1035 * need to set this pin iomux back to gpio function 1036 */ 1037 if (poweroff_gpio->index == TSADC_INT_PIN) { 1038 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1039 GPIO1A6_IOMUX); 1040 } 1041 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1042 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1043 } else { 1044 WARN("Do nothing when system off\n"); 1045 } 1046 1047 while (1) 1048 ; 1049 } 1050 static void __dead2 sys_pwr_down_wfi(const psci_power_state_t *target_state) 1051 { 1052 uint32_t wakeup_status; 1053 1054 /* 1055 * Check wakeup status and abort suspend early if we see a wakeup 1056 * event. 1057 * 1058 * NOTE: technically I we're supposed to just execute a wfi here and 1059 * we'll either execute a normal suspend/resume or the wfi will be 1060 * treated as a no-op if a wake event was present and caused an abort 1061 * of the suspend/resume. For some reason that's not happening and if 1062 * we execute the wfi while a wake event is pending then the whole 1063 * system wedges. 1064 * 1065 * Until the above is solved this extra check prevents system wedges in 1066 * most cases but there is still a small race condition between checking 1067 * PMU_WAKEUP_STATUS and executing wfi. If a wake event happens in 1068 * there then we will die. 1069 */ 1070 wakeup_status = mmio_read_32(PMU_BASE + PMU_WAKEUP_STATUS); 1071 if (wakeup_status) { 1072 WARN("early wake, will not enter power mode.\n"); 1073 1074 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, 0); 1075 1076 disable_mmu_icache_el3(); 1077 bl31_warm_entrypoint(); 1078 1079 while (1) 1080 ; 1081 } else { 1082 /* Enter WFI */ 1083 psci_power_down_wfi(); 1084 } 1085 } 1086 1087 static struct rockchip_pm_ops_cb pm_ops = { 1088 .cores_pwr_dm_on = cores_pwr_domain_on, 1089 .cores_pwr_dm_off = cores_pwr_domain_off, 1090 .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish, 1091 .cores_pwr_dm_suspend = cores_pwr_domain_suspend, 1092 .cores_pwr_dm_resume = cores_pwr_domain_resume, 1093 .hlvl_pwr_dm_suspend = hlvl_pwr_domain_suspend, 1094 .hlvl_pwr_dm_resume = hlvl_pwr_domain_resume, 1095 .hlvl_pwr_dm_off = hlvl_pwr_domain_off, 1096 .hlvl_pwr_dm_on_finish = hlvl_pwr_domain_on_finish, 1097 .sys_pwr_dm_suspend = sys_pwr_domain_suspend, 1098 .sys_pwr_dm_resume = sys_pwr_domain_resume, 1099 .sys_gbl_soft_reset = soc_soft_reset, 1100 .system_off = soc_system_off, 1101 .sys_pwr_down_wfi = sys_pwr_down_wfi, 1102 }; 1103 1104 void plat_rockchip_pmu_init(void) 1105 { 1106 uint32_t cpu; 1107 1108 rockchip_pd_lock_init(); 1109 plat_setup_rockchip_pm_ops(&pm_ops); 1110 1111 /* register requires 32bits mode, switch it to 32 bits */ 1112 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1113 1114 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1115 cpuson_flags[cpu] = 0; 1116 1117 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1118 clst_warmboot_data[cpu] = 0; 1119 1120 psram_sleep_cfg->ddr_func = 0x00; 1121 psram_sleep_cfg->ddr_data = 0x00; 1122 psram_sleep_cfg->ddr_flag = 0x00; 1123 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; 1124 1125 /* config cpu's warm boot address */ 1126 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 1127 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1128 CPU_BOOT_ADDR_WMASK); 1129 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1130 1131 /* 1132 * Enable Schmitt trigger for better 32 kHz input signal, which is 1133 * important for suspend/resume reliability among other things. 1134 */ 1135 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1136 1137 init_pmu_counts(); 1138 1139 nonboot_cpus_off(); 1140 1141 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1142 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1143 } 1144