1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <bl31.h> 11 #include <debug.h> 12 #include <delay_timer.h> 13 #include <dfs.h> 14 #include <errno.h> 15 #include <gpio.h> 16 #include <m0_ctl.h> 17 #include <mmio.h> 18 #include <plat_params.h> 19 #include <plat_private.h> 20 #include <platform.h> 21 #include <platform_def.h> 22 #include <pmu.h> 23 #include <pmu_com.h> 24 #include <pwm.h> 25 #include <rk3399_def.h> 26 #include <secure.h> 27 #include <soc.h> 28 #include <string.h> 29 #include <suspend.h> 30 31 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 32 33 static uint32_t cpu_warm_boot_addr; 34 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 35 static uint32_t store_cru[CRU_SDIO0_CON1 / 4]; 36 static uint32_t store_usbphy0[7]; 37 static uint32_t store_usbphy1[7]; 38 static uint32_t store_grf_io_vsel; 39 static uint32_t store_grf_soc_con0; 40 static uint32_t store_grf_soc_con1; 41 static uint32_t store_grf_soc_con2; 42 static uint32_t store_grf_soc_con3; 43 static uint32_t store_grf_soc_con4; 44 static uint32_t store_grf_soc_con7; 45 static uint32_t store_grf_ddrc_con[4]; 46 static uint32_t store_wdt0[2]; 47 static uint32_t store_wdt1[2]; 48 49 /* 50 * There are two ways to powering on or off on core. 51 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 52 * it is core_pwr_pd mode 53 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 54 * then, if the core enter into wfi, it power domain will be 55 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 56 * so we need core_pm_cfg_info to distinguish which method be used now. 57 */ 58 59 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 60 #if USE_COHERENT_MEM 61 __attribute__ ((section("tzfw_coherent_mem"))) 62 #endif 63 ;/* coheront */ 64 65 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 66 { 67 uint32_t bus_id = BIT(bus); 68 uint32_t bus_req; 69 uint32_t wait_cnt = 0; 70 uint32_t bus_state, bus_ack; 71 72 if (state) 73 bus_req = BIT(bus); 74 else 75 bus_req = 0; 76 77 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 78 79 do { 80 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 81 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 82 if (bus_state == bus_req && bus_ack == bus_req) 83 break; 84 85 wait_cnt++; 86 udelay(1); 87 } while (wait_cnt < MAX_WAIT_COUNT); 88 89 if (bus_state != bus_req || bus_ack != bus_req) { 90 INFO("%s:st=%x(%x)\n", __func__, 91 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 92 bus_state); 93 INFO("%s:st=%x(%x)\n", __func__, 94 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 95 bus_ack); 96 } 97 } 98 99 struct pmu_slpdata_s pmu_slpdata; 100 101 static void qos_restore(void) 102 { 103 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 104 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 105 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 106 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 107 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 108 } 109 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 110 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 111 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 112 } 113 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 114 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 115 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 116 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 117 } 118 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 119 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 120 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 121 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 122 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 123 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 124 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 125 } 126 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 127 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 128 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 129 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 130 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 131 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 132 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 133 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 134 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 135 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 136 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 137 } 138 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 139 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 140 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 141 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 142 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 143 } 144 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 145 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 146 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 147 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 148 } 149 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 150 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 151 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 152 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 153 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 154 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 155 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 156 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 157 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 158 } 159 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 160 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 161 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 162 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 163 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 164 } 165 } 166 167 static void qos_save(void) 168 { 169 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 170 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 171 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 172 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 173 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 174 } 175 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 176 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 177 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 178 } 179 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 180 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 181 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 182 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 183 } 184 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 185 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 186 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 187 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 188 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 189 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 190 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 191 } 192 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 193 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 194 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 195 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 196 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 197 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 198 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 199 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 200 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 201 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 202 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 203 } 204 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 205 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 206 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 207 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 208 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 209 } 210 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 211 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 212 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 213 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 214 } 215 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 216 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 217 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 218 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 219 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 220 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 221 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 222 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 223 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 224 } 225 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 226 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 227 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 228 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 229 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 230 } 231 } 232 233 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 234 { 235 uint32_t state; 236 237 if (pmu_power_domain_st(pd_id) == pd_state) 238 goto out; 239 240 if (pd_state == pmu_pd_on) 241 pmu_power_domain_ctr(pd_id, pd_state); 242 243 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 244 245 switch (pd_id) { 246 case PD_GPU: 247 pmu_bus_idle_req(BUS_ID_GPU, state); 248 break; 249 case PD_VIO: 250 pmu_bus_idle_req(BUS_ID_VIO, state); 251 break; 252 case PD_ISP0: 253 pmu_bus_idle_req(BUS_ID_ISP0, state); 254 break; 255 case PD_ISP1: 256 pmu_bus_idle_req(BUS_ID_ISP1, state); 257 break; 258 case PD_VO: 259 pmu_bus_idle_req(BUS_ID_VOPB, state); 260 pmu_bus_idle_req(BUS_ID_VOPL, state); 261 break; 262 case PD_HDCP: 263 pmu_bus_idle_req(BUS_ID_HDCP, state); 264 break; 265 case PD_TCPD0: 266 break; 267 case PD_TCPD1: 268 break; 269 case PD_GMAC: 270 pmu_bus_idle_req(BUS_ID_GMAC, state); 271 break; 272 case PD_CCI: 273 pmu_bus_idle_req(BUS_ID_CCIM0, state); 274 pmu_bus_idle_req(BUS_ID_CCIM1, state); 275 break; 276 case PD_SD: 277 pmu_bus_idle_req(BUS_ID_SD, state); 278 break; 279 case PD_EMMC: 280 pmu_bus_idle_req(BUS_ID_EMMC, state); 281 break; 282 case PD_EDP: 283 pmu_bus_idle_req(BUS_ID_EDP, state); 284 break; 285 case PD_SDIOAUDIO: 286 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 287 break; 288 case PD_GIC: 289 pmu_bus_idle_req(BUS_ID_GIC, state); 290 break; 291 case PD_RGA: 292 pmu_bus_idle_req(BUS_ID_RGA, state); 293 break; 294 case PD_VCODEC: 295 pmu_bus_idle_req(BUS_ID_VCODEC, state); 296 break; 297 case PD_VDU: 298 pmu_bus_idle_req(BUS_ID_VDU, state); 299 break; 300 case PD_IEP: 301 pmu_bus_idle_req(BUS_ID_IEP, state); 302 break; 303 case PD_USB3: 304 pmu_bus_idle_req(BUS_ID_USB3, state); 305 break; 306 case PD_PERIHP: 307 pmu_bus_idle_req(BUS_ID_PERIHP, state); 308 break; 309 default: 310 break; 311 } 312 313 if (pd_state == pmu_pd_off) 314 pmu_power_domain_ctr(pd_id, pd_state); 315 316 out: 317 return 0; 318 } 319 320 static uint32_t pmu_powerdomain_state; 321 322 static void pmu_power_domains_suspend(void) 323 { 324 clk_gate_con_save(); 325 clk_gate_con_disable(); 326 qos_save(); 327 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 328 pmu_set_power_domain(PD_GPU, pmu_pd_off); 329 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 330 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 331 pmu_set_power_domain(PD_VO, pmu_pd_off); 332 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 333 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 334 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 335 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 336 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 337 pmu_set_power_domain(PD_EDP, pmu_pd_off); 338 pmu_set_power_domain(PD_IEP, pmu_pd_off); 339 pmu_set_power_domain(PD_RGA, pmu_pd_off); 340 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 341 pmu_set_power_domain(PD_VDU, pmu_pd_off); 342 pmu_set_power_domain(PD_USB3, pmu_pd_off); 343 pmu_set_power_domain(PD_EMMC, pmu_pd_off); 344 pmu_set_power_domain(PD_VIO, pmu_pd_off); 345 pmu_set_power_domain(PD_SD, pmu_pd_off); 346 pmu_set_power_domain(PD_PERIHP, pmu_pd_off); 347 clk_gate_con_restore(); 348 } 349 350 static void pmu_power_domains_resume(void) 351 { 352 clk_gate_con_save(); 353 clk_gate_con_disable(); 354 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 355 pmu_set_power_domain(PD_VDU, pmu_pd_on); 356 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 357 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 358 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 359 pmu_set_power_domain(PD_RGA, pmu_pd_on); 360 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 361 pmu_set_power_domain(PD_IEP, pmu_pd_on); 362 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 363 pmu_set_power_domain(PD_EDP, pmu_pd_on); 364 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 365 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 366 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 367 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 368 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 369 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 370 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 371 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 372 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 373 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 374 if (!(pmu_powerdomain_state & BIT(PD_VO))) 375 pmu_set_power_domain(PD_VO, pmu_pd_on); 376 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 377 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 378 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 379 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 380 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 381 pmu_set_power_domain(PD_GPU, pmu_pd_on); 382 if (!(pmu_powerdomain_state & BIT(PD_USB3))) 383 pmu_set_power_domain(PD_USB3, pmu_pd_on); 384 if (!(pmu_powerdomain_state & BIT(PD_EMMC))) 385 pmu_set_power_domain(PD_EMMC, pmu_pd_on); 386 if (!(pmu_powerdomain_state & BIT(PD_VIO))) 387 pmu_set_power_domain(PD_VIO, pmu_pd_on); 388 if (!(pmu_powerdomain_state & BIT(PD_SD))) 389 pmu_set_power_domain(PD_SD, pmu_pd_on); 390 if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) 391 pmu_set_power_domain(PD_PERIHP, pmu_pd_on); 392 qos_restore(); 393 clk_gate_con_restore(); 394 } 395 396 void rk3399_flush_l2_b(void) 397 { 398 uint32_t wait_cnt = 0; 399 400 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 401 dsb(); 402 403 /* 404 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 405 * the enough margin. 406 */ 407 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 408 BIT(L2_FLUSHDONE_CLUSTER_B))) { 409 wait_cnt++; 410 udelay(10); 411 if (wait_cnt == 10000 / 10) 412 WARN("L2 cache flush on suspend took longer than 10ms\n"); 413 } 414 415 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 416 } 417 418 static void pmu_scu_b_pwrdn(void) 419 { 420 uint32_t wait_cnt = 0; 421 422 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 423 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 424 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 425 ERROR("%s: not all cpus is off\n", __func__); 426 return; 427 } 428 429 rk3399_flush_l2_b(); 430 431 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 432 433 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 434 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 435 wait_cnt++; 436 udelay(1); 437 if (wait_cnt >= MAX_WAIT_COUNT) 438 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 439 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 440 } 441 } 442 443 static void pmu_scu_b_pwrup(void) 444 { 445 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 446 } 447 448 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 449 { 450 assert(cpu_id < PLATFORM_CORE_COUNT); 451 return core_pm_cfg_info[cpu_id]; 452 } 453 454 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 455 { 456 assert(cpu_id < PLATFORM_CORE_COUNT); 457 core_pm_cfg_info[cpu_id] = value; 458 #if !USE_COHERENT_MEM 459 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 460 sizeof(uint32_t)); 461 #endif 462 } 463 464 static int cpus_power_domain_on(uint32_t cpu_id) 465 { 466 uint32_t cfg_info; 467 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 468 /* 469 * There are two ways to powering on or off on core. 470 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 471 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 472 * then, if the core enter into wfi, it power domain will be 473 * powered off automatically. 474 */ 475 476 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 477 478 if (cfg_info == core_pwr_pd) { 479 /* disable core_pm cfg */ 480 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 481 CORES_PM_DISABLE); 482 /* if the cores have be on, power off it firstly */ 483 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 484 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 485 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 486 } 487 488 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 489 } else { 490 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 491 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 492 return -EINVAL; 493 } 494 495 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 496 BIT(core_pm_sft_wakeup_en)); 497 dsb(); 498 } 499 500 return 0; 501 } 502 503 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 504 { 505 uint32_t cpu_pd; 506 uint32_t core_pm_value; 507 508 cpu_pd = PD_CPUL0 + cpu_id; 509 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 510 return 0; 511 512 if (pd_cfg == core_pwr_pd) { 513 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 514 return -EINVAL; 515 516 /* disable core_pm cfg */ 517 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 518 CORES_PM_DISABLE); 519 520 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 521 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 522 } else { 523 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 524 525 core_pm_value = BIT(core_pm_en); 526 if (pd_cfg == core_pwr_wfi_int) 527 core_pm_value |= BIT(core_pm_int_wakeup_en); 528 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 529 core_pm_value); 530 dsb(); 531 } 532 533 return 0; 534 } 535 536 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 537 { 538 uint32_t cpu_id = plat_my_core_pos(); 539 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 540 541 assert(cpu_id < PLATFORM_CORE_COUNT); 542 543 if (lvl_state == PLAT_MAX_OFF_STATE) { 544 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 545 pll_id = ALPLL_ID; 546 clst_st_msk = CLST_L_CPUS_MSK; 547 } else { 548 pll_id = ABPLL_ID; 549 clst_st_msk = CLST_B_CPUS_MSK << 550 PLATFORM_CLUSTER0_CORE_COUNT; 551 } 552 553 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 554 555 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 556 557 pmu_st &= clst_st_msk; 558 559 if (pmu_st == clst_st_chk_msk) { 560 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 561 PLL_SLOW_MODE); 562 563 clst_warmboot_data[pll_id] = PMU_CLST_RET; 564 565 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 566 pmu_st &= clst_st_msk; 567 if (pmu_st == clst_st_chk_msk) 568 return; 569 /* 570 * it is mean that others cpu is up again, 571 * we must resume the cfg at once. 572 */ 573 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 574 PLL_NOMAL_MODE); 575 clst_warmboot_data[pll_id] = 0; 576 } 577 } 578 } 579 580 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 581 { 582 uint32_t cpu_id = plat_my_core_pos(); 583 uint32_t pll_id, pll_st; 584 585 assert(cpu_id < PLATFORM_CORE_COUNT); 586 587 if (lvl_state == PLAT_MAX_OFF_STATE) { 588 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 589 pll_id = ALPLL_ID; 590 else 591 pll_id = ABPLL_ID; 592 593 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 594 PLL_MODE_SHIFT; 595 596 if (pll_st != NORMAL_MODE) { 597 WARN("%s: clst (%d) is in error mode (%d)\n", 598 __func__, pll_id, pll_st); 599 return -1; 600 } 601 } 602 603 return 0; 604 } 605 606 static void nonboot_cpus_off(void) 607 { 608 uint32_t boot_cpu, cpu; 609 610 boot_cpu = plat_my_core_pos(); 611 612 /* turn off noboot cpus */ 613 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 614 if (cpu == boot_cpu) 615 continue; 616 cpus_power_domain_off(cpu, core_pwr_pd); 617 } 618 } 619 620 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 621 { 622 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 623 624 assert(cpu_id < PLATFORM_CORE_COUNT); 625 assert(cpuson_flags[cpu_id] == 0); 626 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 627 cpuson_entry_point[cpu_id] = entrypoint; 628 dsb(); 629 630 cpus_power_domain_on(cpu_id); 631 632 return PSCI_E_SUCCESS; 633 } 634 635 int rockchip_soc_cores_pwr_dm_off(void) 636 { 637 uint32_t cpu_id = plat_my_core_pos(); 638 639 cpus_power_domain_off(cpu_id, core_pwr_wfi); 640 641 return PSCI_E_SUCCESS; 642 } 643 644 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 645 plat_local_state_t lvl_state) 646 { 647 switch (lvl) { 648 case MPIDR_AFFLVL1: 649 clst_pwr_domain_suspend(lvl_state); 650 break; 651 default: 652 break; 653 } 654 655 return PSCI_E_SUCCESS; 656 } 657 658 int rockchip_soc_cores_pwr_dm_suspend(void) 659 { 660 uint32_t cpu_id = plat_my_core_pos(); 661 662 assert(cpu_id < PLATFORM_CORE_COUNT); 663 assert(cpuson_flags[cpu_id] == 0); 664 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 665 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 666 dsb(); 667 668 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 669 670 return PSCI_E_SUCCESS; 671 } 672 673 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 674 { 675 switch (lvl) { 676 case MPIDR_AFFLVL1: 677 clst_pwr_domain_suspend(lvl_state); 678 break; 679 default: 680 break; 681 } 682 683 return PSCI_E_SUCCESS; 684 } 685 686 int rockchip_soc_cores_pwr_dm_on_finish(void) 687 { 688 uint32_t cpu_id = plat_my_core_pos(); 689 690 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 691 CORES_PM_DISABLE); 692 return PSCI_E_SUCCESS; 693 } 694 695 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 696 plat_local_state_t lvl_state) 697 { 698 switch (lvl) { 699 case MPIDR_AFFLVL1: 700 clst_pwr_domain_resume(lvl_state); 701 break; 702 default: 703 break; 704 } 705 706 return PSCI_E_SUCCESS; 707 } 708 709 int rockchip_soc_cores_pwr_dm_resume(void) 710 { 711 uint32_t cpu_id = plat_my_core_pos(); 712 713 /* Disable core_pm */ 714 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 715 716 return PSCI_E_SUCCESS; 717 } 718 719 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 720 { 721 switch (lvl) { 722 case MPIDR_AFFLVL1: 723 clst_pwr_domain_resume(lvl_state); 724 default: 725 break; 726 } 727 728 return PSCI_E_SUCCESS; 729 } 730 731 /** 732 * init_pmu_counts - Init timing counts in the PMU register area 733 * 734 * At various points when we power up or down parts of the system we need 735 * a delay to wait for power / clocks to become stable. The PMU has counters 736 * to help software do the delay properly. Basically, it works like this: 737 * - Software sets up counter values 738 * - When software turns on something in the PMU, the counter kicks off 739 * - The hardware sets a bit automatically when the counter has finished and 740 * software knows that the initialization is done. 741 * 742 * It's software's job to setup these counters. The hardware power on default 743 * for these settings is conservative, setting everything to 0x5dc0 744 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 745 * 746 * Note that some of these counters are only really used at suspend/resume 747 * time (for instance, that's the only time we turn off/on the oscillator) and 748 * others are used during normal runtime (like turning on/off a CPU or GPU) but 749 * it doesn't hurt to init everything at boot. 750 * 751 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 752 * clock. While the 24 MHz clock can give us more precision, it's not always 753 * available (like when we turn the oscillator off at sleep time). The 754 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 755 * is that counts work like this: 756 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 757 * use the 24M OSC for counts 758 * ELSE 759 * use the 32K OSC for counts 760 * 761 * Notes: 762 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 763 * we always keep that 0. This apparently choose between using the PLL as 764 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 765 * should consider how it affects these counts (if at all). 766 * - The power_mode_en is documented to auto-clear automatically when we leave 767 * "power mode". That's why most clocks are on 24M. Only timings used when 768 * in "power mode" are 32k. 769 * - In some cases the kernel may override these counts. 770 * 771 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 772 * in power mode, we need to ensure that they are available. 773 */ 774 static void init_pmu_counts(void) 775 { 776 /* COUNTS FOR INSIDE POWER MODE */ 777 778 /* 779 * From limited testing, need PMU stable >= 2ms, but go overkill 780 * and choose 30 ms to match testing on past SoCs. Also let 781 * OSC have 30 ms for stabilization. 782 */ 783 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 784 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 785 786 /* Unclear what these should be; try 3 ms */ 787 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 788 789 /* Unclear what this should be, but set the default explicitly */ 790 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 791 792 /* COUNTS FOR OUTSIDE POWER MODE */ 793 794 /* Put something sorta conservative here until we know better */ 795 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 796 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 797 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 798 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 799 800 /* 801 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 802 * M0 code run in SRAM, and we need it to check whether cpu enter 803 * FSM status, so we must wait M0 finish their code and enter WFI, 804 * then we can shutdown SRAM, according FSM order: 805 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 806 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 807 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 808 */ 809 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 810 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 811 812 /* 813 * Set CPU/GPU to 1 us. 814 * 815 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 816 * counts here. After all ATF controls all these other bits and also 817 * chooses which clock these counters use. 818 */ 819 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 820 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 821 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 822 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 823 } 824 825 static uint32_t clk_ddrc_save; 826 827 static void sys_slp_config(void) 828 { 829 uint32_t slp_mode_cfg = 0; 830 831 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 832 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 833 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 834 835 prepare_abpll_for_ddrctrl(); 836 sram_func_set_ddrctl_pll(ABPLL_ID); 837 838 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 839 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 840 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 841 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 842 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 843 844 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 845 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 846 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 847 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 848 849 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 850 BIT(PMU_INPUT_CLAMP_EN) | 851 BIT(PMU_POWER_OFF_REQ_CFG) | 852 BIT(PMU_CPU0_PD_EN) | 853 BIT(PMU_L2_FLUSH_EN) | 854 BIT(PMU_L2_IDLE_EN) | 855 BIT(PMU_SCU_PD_EN) | 856 BIT(PMU_CCI_PD_EN) | 857 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 858 BIT(PMU_ALIVE_USE_LF) | 859 BIT(PMU_SREF0_ENTER_EN) | 860 BIT(PMU_SREF1_ENTER_EN) | 861 BIT(PMU_DDRC0_GATING_EN) | 862 BIT(PMU_DDRC1_GATING_EN) | 863 BIT(PMU_DDRIO0_RET_EN) | 864 BIT(PMU_DDRIO0_RET_DE_REQ) | 865 BIT(PMU_DDRIO1_RET_EN) | 866 BIT(PMU_DDRIO1_RET_DE_REQ) | 867 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 868 BIT(PMU_CENTER_PD_EN) | 869 BIT(PMU_PERILP_PD_EN) | 870 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 871 BIT(PMU_PLL_PD_EN) | 872 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 873 BIT(PMU_OSC_DIS) | 874 BIT(PMU_PMU_USE_LF); 875 876 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 877 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 878 879 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 880 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 881 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 882 } 883 884 static void set_hw_idle(uint32_t hw_idle) 885 { 886 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 887 } 888 889 static void clr_hw_idle(uint32_t hw_idle) 890 { 891 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 892 } 893 894 static uint32_t iomux_status[12]; 895 static uint32_t pull_mode_status[12]; 896 static uint32_t gpio_direction[3]; 897 static uint32_t gpio_2_4_clk_gate; 898 899 static void suspend_apio(void) 900 { 901 struct apio_info *suspend_apio; 902 int i; 903 904 suspend_apio = plat_get_rockchip_suspend_apio(); 905 906 if (!suspend_apio) 907 return; 908 909 /* save gpio2 ~ gpio4 iomux and pull mode */ 910 for (i = 0; i < 12; i++) { 911 iomux_status[i] = mmio_read_32(GRF_BASE + 912 GRF_GPIO2A_IOMUX + i * 4); 913 pull_mode_status[i] = mmio_read_32(GRF_BASE + 914 GRF_GPIO2A_P + i * 4); 915 } 916 917 /* store gpio2 ~ gpio4 clock gate state */ 918 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 919 PCLK_GPIO2_GATE_SHIFT) & 0x07; 920 921 /* enable gpio2 ~ gpio4 clock gate */ 922 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 923 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 924 925 /* save gpio2 ~ gpio4 direction */ 926 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 927 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 928 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 929 930 /* apio1 charge gpio3a0 ~ gpio3c7 */ 931 if (suspend_apio->apio1) { 932 933 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 934 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 935 REG_SOC_WMSK | GRF_IOMUX_GPIO); 936 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 937 REG_SOC_WMSK | GRF_IOMUX_GPIO); 938 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 939 REG_SOC_WMSK | GRF_IOMUX_GPIO); 940 941 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 942 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 943 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 944 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 945 946 /* set gpio3a0 ~ gpio3c7 to input */ 947 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 948 } 949 950 /* apio2 charge gpio2a0 ~ gpio2b4 */ 951 if (suspend_apio->apio2) { 952 953 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 954 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 955 REG_SOC_WMSK | GRF_IOMUX_GPIO); 956 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 957 REG_SOC_WMSK | GRF_IOMUX_GPIO); 958 959 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 960 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 961 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 962 963 /* set gpio2a0 ~ gpio2b4 to input */ 964 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 965 } 966 967 /* apio3 charge gpio2c0 ~ gpio2d4*/ 968 if (suspend_apio->apio3) { 969 970 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 971 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 972 REG_SOC_WMSK | GRF_IOMUX_GPIO); 973 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 974 REG_SOC_WMSK | GRF_IOMUX_GPIO); 975 976 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 977 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 978 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 979 980 /* set gpio2c0 ~ gpio2d4 to input */ 981 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 982 } 983 984 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 985 if (suspend_apio->apio4) { 986 987 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 988 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 989 REG_SOC_WMSK | GRF_IOMUX_GPIO); 990 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 991 REG_SOC_WMSK | GRF_IOMUX_GPIO); 992 993 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 994 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 995 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 996 997 /* set gpio4c0 ~ gpio4d6 to input */ 998 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 999 } 1000 1001 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 1002 if (suspend_apio->apio5) { 1003 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 1004 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 1005 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1006 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 1007 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1008 1009 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 1010 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1011 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1012 1013 /* set gpio4c0 ~ gpio4d6 to input */ 1014 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1015 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1016 } 1017 } 1018 1019 static void resume_apio(void) 1020 { 1021 struct apio_info *suspend_apio; 1022 int i; 1023 1024 suspend_apio = plat_get_rockchip_suspend_apio(); 1025 1026 if (!suspend_apio) 1027 return; 1028 1029 for (i = 0; i < 12; i++) { 1030 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1031 REG_SOC_WMSK | pull_mode_status[i]); 1032 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1033 REG_SOC_WMSK | iomux_status[i]); 1034 } 1035 1036 /* set gpio2 ~ gpio4 direction back to store value */ 1037 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1038 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1039 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1040 1041 /* set gpio2 ~ gpio4 clock gate back to store value */ 1042 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1043 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1044 PCLK_GPIO2_GATE_SHIFT)); 1045 } 1046 1047 static void suspend_gpio(void) 1048 { 1049 struct gpio_info *suspend_gpio; 1050 uint32_t count; 1051 int i; 1052 1053 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1054 1055 for (i = 0; i < count; i++) { 1056 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1057 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1058 udelay(1); 1059 } 1060 } 1061 1062 static void resume_gpio(void) 1063 { 1064 struct gpio_info *suspend_gpio; 1065 uint32_t count; 1066 int i; 1067 1068 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1069 1070 for (i = count - 1; i >= 0; i--) { 1071 gpio_set_value(suspend_gpio[i].index, 1072 !suspend_gpio[i].polarity); 1073 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1074 udelay(1); 1075 } 1076 } 1077 1078 static void m0_configure_suspend(void) 1079 { 1080 /* set PARAM to M0_FUNC_SUSPEND */ 1081 mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND); 1082 } 1083 1084 void sram_save(void) 1085 { 1086 size_t text_size = (char *)&__bl31_sram_text_real_end - 1087 (char *)&__bl31_sram_text_start; 1088 size_t data_size = (char *)&__bl31_sram_data_real_end - 1089 (char *)&__bl31_sram_data_start; 1090 size_t incbin_size = (char *)&__sram_incbin_real_end - 1091 (char *)&__sram_incbin_start; 1092 1093 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1094 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1095 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1096 incbin_size); 1097 } 1098 1099 void sram_restore(void) 1100 { 1101 size_t text_size = (char *)&__bl31_sram_text_real_end - 1102 (char *)&__bl31_sram_text_start; 1103 size_t data_size = (char *)&__bl31_sram_data_real_end - 1104 (char *)&__bl31_sram_data_start; 1105 size_t incbin_size = (char *)&__sram_incbin_real_end - 1106 (char *)&__sram_incbin_start; 1107 1108 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1109 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1110 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1111 incbin_size); 1112 } 1113 1114 struct uart_debug { 1115 uint32_t uart_dll; 1116 uint32_t uart_dlh; 1117 uint32_t uart_ier; 1118 uint32_t uart_fcr; 1119 uint32_t uart_mcr; 1120 uint32_t uart_lcr; 1121 }; 1122 1123 #define UART_DLL 0x00 1124 #define UART_DLH 0x04 1125 #define UART_IER 0x04 1126 #define UART_FCR 0x08 1127 #define UART_LCR 0x0c 1128 #define UART_MCR 0x10 1129 #define UARTSRR 0x88 1130 1131 #define UART_RESET BIT(0) 1132 #define UARTFCR_FIFOEN BIT(0) 1133 #define RCVR_FIFO_RESET BIT(1) 1134 #define XMIT_FIFO_RESET BIT(2) 1135 #define DIAGNOSTIC_MODE BIT(4) 1136 #define UARTLCR_DLAB BIT(7) 1137 1138 static struct uart_debug uart_save; 1139 1140 void suspend_uart(void) 1141 { 1142 uart_save.uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1143 uart_save.uart_ier = mmio_read_32(PLAT_RK_UART_BASE + UART_IER); 1144 uart_save.uart_mcr = mmio_read_32(PLAT_RK_UART_BASE + UART_MCR); 1145 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, 1146 uart_save.uart_lcr | UARTLCR_DLAB); 1147 uart_save.uart_dll = mmio_read_32(PLAT_RK_UART_BASE + UART_DLL); 1148 uart_save.uart_dlh = mmio_read_32(PLAT_RK_UART_BASE + UART_DLH); 1149 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1150 } 1151 1152 void resume_uart(void) 1153 { 1154 uint32_t uart_lcr; 1155 1156 mmio_write_32(PLAT_RK_UART_BASE + UARTSRR, 1157 XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); 1158 1159 uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1160 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, DIAGNOSTIC_MODE); 1161 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_lcr | UARTLCR_DLAB); 1162 mmio_write_32(PLAT_RK_UART_BASE + UART_DLL, uart_save.uart_dll); 1163 mmio_write_32(PLAT_RK_UART_BASE + UART_DLH, uart_save.uart_dlh); 1164 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1165 mmio_write_32(PLAT_RK_UART_BASE + UART_IER, uart_save.uart_ier); 1166 mmio_write_32(PLAT_RK_UART_BASE + UART_FCR, UARTFCR_FIFOEN); 1167 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, uart_save.uart_mcr); 1168 } 1169 1170 void save_usbphy(void) 1171 { 1172 store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); 1173 store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); 1174 store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); 1175 store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); 1176 store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); 1177 store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); 1178 store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); 1179 1180 store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); 1181 store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); 1182 store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); 1183 store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); 1184 store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); 1185 store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); 1186 store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); 1187 } 1188 1189 void restore_usbphy(void) 1190 { 1191 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, 1192 REG_SOC_WMSK | store_usbphy0[0]); 1193 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, 1194 REG_SOC_WMSK | store_usbphy0[1]); 1195 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, 1196 REG_SOC_WMSK | store_usbphy0[2]); 1197 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, 1198 REG_SOC_WMSK | store_usbphy0[3]); 1199 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, 1200 REG_SOC_WMSK | store_usbphy0[4]); 1201 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, 1202 REG_SOC_WMSK | store_usbphy0[5]); 1203 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, 1204 REG_SOC_WMSK | store_usbphy0[6]); 1205 1206 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, 1207 REG_SOC_WMSK | store_usbphy1[0]); 1208 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, 1209 REG_SOC_WMSK | store_usbphy1[1]); 1210 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, 1211 REG_SOC_WMSK | store_usbphy1[2]); 1212 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, 1213 REG_SOC_WMSK | store_usbphy1[3]); 1214 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, 1215 REG_SOC_WMSK | store_usbphy1[4]); 1216 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, 1217 REG_SOC_WMSK | store_usbphy1[5]); 1218 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, 1219 REG_SOC_WMSK | store_usbphy1[6]); 1220 } 1221 1222 void grf_register_save(void) 1223 { 1224 int i; 1225 1226 store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); 1227 store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); 1228 store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); 1229 store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); 1230 store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); 1231 store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); 1232 1233 for (i = 0; i < 4; i++) 1234 store_grf_ddrc_con[i] = 1235 mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); 1236 1237 store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); 1238 } 1239 1240 void grf_register_restore(void) 1241 { 1242 int i; 1243 1244 mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 1245 REG_SOC_WMSK | store_grf_soc_con0); 1246 mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 1247 REG_SOC_WMSK | store_grf_soc_con1); 1248 mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 1249 REG_SOC_WMSK | store_grf_soc_con2); 1250 mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 1251 REG_SOC_WMSK | store_grf_soc_con3); 1252 mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 1253 REG_SOC_WMSK | store_grf_soc_con4); 1254 mmio_write_32(GRF_BASE + GRF_SOC_CON(7), 1255 REG_SOC_WMSK | store_grf_soc_con7); 1256 1257 for (i = 0; i < 4; i++) 1258 mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, 1259 REG_SOC_WMSK | store_grf_ddrc_con[i]); 1260 1261 mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); 1262 } 1263 1264 void cru_register_save(void) 1265 { 1266 int i; 1267 1268 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) 1269 store_cru[i / 4] = mmio_read_32(CRU_BASE + i); 1270 } 1271 1272 void cru_register_restore(void) 1273 { 1274 int i; 1275 1276 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { 1277 1278 /* 1279 * since DPLL, CRU_CLKSEL_CON6 have been restore in 1280 * dmc_resume, ABPLL will resote later, so skip them 1281 */ 1282 if ((i == CRU_CLKSEL_CON6) || 1283 (i >= CRU_PLL_CON(ABPLL_ID, 0) && 1284 i <= CRU_PLL_CON(DPLL_ID, 5))) 1285 continue; 1286 1287 if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || 1288 (i == CRU_PLL_CON(CPLL_ID, 2)) || 1289 (i == CRU_PLL_CON(GPLL_ID, 2)) || 1290 (i == CRU_PLL_CON(NPLL_ID, 2)) || 1291 (i == CRU_PLL_CON(VPLL_ID, 2))) 1292 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1293 /* 1294 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 1295 * not need do high 16bit mask 1296 */ 1297 else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) 1298 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1299 else 1300 mmio_write_32(CRU_BASE + i, 1301 REG_SOC_WMSK | store_cru[i / 4]); 1302 } 1303 } 1304 1305 void wdt_register_save(void) 1306 { 1307 int i; 1308 1309 for (i = 0; i < 2; i++) { 1310 store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); 1311 store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); 1312 } 1313 } 1314 1315 void wdt_register_restore(void) 1316 { 1317 int i; 1318 1319 for (i = 0; i < 2; i++) { 1320 mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); 1321 mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); 1322 } 1323 } 1324 1325 int rockchip_soc_sys_pwr_dm_suspend(void) 1326 { 1327 uint32_t wait_cnt = 0; 1328 uint32_t status = 0; 1329 1330 ddr_prepare_for_sys_suspend(); 1331 dmc_suspend(); 1332 pmu_scu_b_pwrdn(); 1333 1334 /* need to save usbphy before shutdown PERIHP PD */ 1335 save_usbphy(); 1336 1337 pmu_power_domains_suspend(); 1338 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1339 BIT(PMU_CLR_ALIVE) | 1340 BIT(PMU_CLR_MSCH0) | 1341 BIT(PMU_CLR_MSCH1) | 1342 BIT(PMU_CLR_CCIM0) | 1343 BIT(PMU_CLR_CCIM1) | 1344 BIT(PMU_CLR_CENTER) | 1345 BIT(PMU_CLR_PERILP) | 1346 BIT(PMU_CLR_PERILPM0) | 1347 BIT(PMU_CLR_GIC)); 1348 set_pmu_rsthold(); 1349 sys_slp_config(); 1350 1351 m0_configure_suspend(); 1352 m0_start(); 1353 1354 pmu_sgrf_rst_hld(); 1355 1356 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1357 ((uintptr_t)&pmu_cpuson_entrypoint >> 1358 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1359 1360 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1361 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1362 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1363 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1364 dsb(); 1365 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1366 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1367 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1368 while ((mmio_read_32(PMU_BASE + 1369 PMU_ADB400_ST) & status) != status) { 1370 wait_cnt++; 1371 if (wait_cnt >= MAX_WAIT_COUNT) { 1372 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1373 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1374 panic(); 1375 } 1376 udelay(1); 1377 } 1378 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1379 1380 secure_watchdog_disable(); 1381 1382 /* 1383 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1384 * the last steps in suspend. 1385 */ 1386 disable_dvfs_plls(); 1387 disable_pwms(); 1388 disable_nodvfs_plls(); 1389 1390 suspend_apio(); 1391 suspend_gpio(); 1392 suspend_uart(); 1393 grf_register_save(); 1394 cru_register_save(); 1395 wdt_register_save(); 1396 sram_save(); 1397 plat_rockchip_save_gpio(); 1398 1399 return 0; 1400 } 1401 1402 int rockchip_soc_sys_pwr_dm_resume(void) 1403 { 1404 uint32_t wait_cnt = 0; 1405 uint32_t status = 0; 1406 1407 plat_rockchip_restore_gpio(); 1408 wdt_register_restore(); 1409 cru_register_restore(); 1410 grf_register_restore(); 1411 resume_uart(); 1412 resume_apio(); 1413 resume_gpio(); 1414 enable_nodvfs_plls(); 1415 enable_pwms(); 1416 /* PWM regulators take time to come up; give 300us to be safe. */ 1417 udelay(300); 1418 enable_dvfs_plls(); 1419 1420 secure_watchdog_enable(); 1421 secure_sgrf_init(); 1422 secure_sgrf_ddr_rgn_init(); 1423 1424 /* restore clk_ddrc_bpll_src_en gate */ 1425 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1426 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1427 1428 /* 1429 * The wakeup status is not cleared by itself, we need to clear it 1430 * manually. Otherwise we will alway query some interrupt next time. 1431 * 1432 * NOTE: If the kernel needs to query this, we might want to stash it 1433 * somewhere. 1434 */ 1435 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1436 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1437 1438 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1439 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1440 CPU_BOOT_ADDR_WMASK); 1441 1442 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1443 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1444 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1445 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1446 dsb(); 1447 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1448 BIT(PMU_SCU_B_PWRDWN_EN)); 1449 1450 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1451 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1452 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1453 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1454 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1455 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1456 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1457 1458 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1459 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1460 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1461 1462 while ((mmio_read_32(PMU_BASE + 1463 PMU_ADB400_ST) & status)) { 1464 wait_cnt++; 1465 if (wait_cnt >= MAX_WAIT_COUNT) { 1466 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1467 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1468 panic(); 1469 } 1470 udelay(1); 1471 } 1472 1473 pmu_sgrf_rst_hld_release(); 1474 pmu_scu_b_pwrup(); 1475 pmu_power_domains_resume(); 1476 1477 restore_abpll(); 1478 restore_pmu_rsthold(); 1479 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1480 BIT(PMU_CLR_ALIVE) | 1481 BIT(PMU_CLR_MSCH0) | 1482 BIT(PMU_CLR_MSCH1) | 1483 BIT(PMU_CLR_CCIM0) | 1484 BIT(PMU_CLR_CCIM1) | 1485 BIT(PMU_CLR_CENTER) | 1486 BIT(PMU_CLR_PERILP) | 1487 BIT(PMU_CLR_PERILPM0) | 1488 BIT(PMU_CLR_GIC)); 1489 1490 plat_rockchip_gic_cpuif_enable(); 1491 m0_stop(); 1492 1493 restore_usbphy(); 1494 1495 ddr_prepare_for_sys_resume(); 1496 1497 return 0; 1498 } 1499 1500 void __dead2 rockchip_soc_soft_reset(void) 1501 { 1502 struct gpio_info *rst_gpio; 1503 1504 rst_gpio = plat_get_rockchip_gpio_reset(); 1505 1506 if (rst_gpio) { 1507 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1508 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1509 } else { 1510 soc_global_soft_reset(); 1511 } 1512 1513 while (1) 1514 ; 1515 } 1516 1517 void __dead2 rockchip_soc_system_off(void) 1518 { 1519 struct gpio_info *poweroff_gpio; 1520 1521 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1522 1523 if (poweroff_gpio) { 1524 /* 1525 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1526 * need to set this pin iomux back to gpio function 1527 */ 1528 if (poweroff_gpio->index == TSADC_INT_PIN) { 1529 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1530 GPIO1A6_IOMUX); 1531 } 1532 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1533 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1534 } else { 1535 WARN("Do nothing when system off\n"); 1536 } 1537 1538 while (1) 1539 ; 1540 } 1541 1542 void rockchip_plat_mmu_el3(void) 1543 { 1544 size_t sram_size; 1545 1546 /* sram.text size */ 1547 sram_size = (char *)&__bl31_sram_text_end - 1548 (char *)&__bl31_sram_text_start; 1549 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1550 (unsigned long)&__bl31_sram_text_start, 1551 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1552 1553 /* sram.data size */ 1554 sram_size = (char *)&__bl31_sram_data_end - 1555 (char *)&__bl31_sram_data_start; 1556 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1557 (unsigned long)&__bl31_sram_data_start, 1558 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1559 1560 sram_size = (char *)&__bl31_sram_stack_end - 1561 (char *)&__bl31_sram_stack_start; 1562 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1563 (unsigned long)&__bl31_sram_stack_start, 1564 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1565 1566 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1567 mmap_add_region((unsigned long)&__sram_incbin_start, 1568 (unsigned long)&__sram_incbin_start, 1569 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1570 } 1571 1572 void plat_rockchip_pmu_init(void) 1573 { 1574 uint32_t cpu; 1575 1576 rockchip_pd_lock_init(); 1577 1578 /* register requires 32bits mode, switch it to 32 bits */ 1579 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1580 1581 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1582 cpuson_flags[cpu] = 0; 1583 1584 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1585 clst_warmboot_data[cpu] = 0; 1586 1587 /* config cpu's warm boot address */ 1588 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1589 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1590 CPU_BOOT_ADDR_WMASK); 1591 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1592 1593 /* 1594 * Enable Schmitt trigger for better 32 kHz input signal, which is 1595 * important for suspend/resume reliability among other things. 1596 */ 1597 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1598 1599 init_pmu_counts(); 1600 1601 nonboot_cpus_off(); 1602 1603 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1604 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1605 } 1606