1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <bl31.h> 11 #include <debug.h> 12 #include <delay_timer.h> 13 #include <dfs.h> 14 #include <errno.h> 15 #include <gicv3.h> 16 #include <gpio.h> 17 #include <m0_ctl.h> 18 #include <mmio.h> 19 #include <plat_params.h> 20 #include <plat_private.h> 21 #include <platform.h> 22 #include <platform_def.h> 23 #include <pmu.h> 24 #include <pmu_com.h> 25 #include <pwm.h> 26 #include <rk3399_def.h> 27 #include <secure.h> 28 #include <soc.h> 29 #include <string.h> 30 #include <suspend.h> 31 32 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 33 34 static uint32_t cpu_warm_boot_addr; 35 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 36 static uint32_t store_cru[CRU_SDIO0_CON1 / 4 + 1]; 37 static uint32_t store_usbphy0[7]; 38 static uint32_t store_usbphy1[7]; 39 static uint32_t store_grf_io_vsel; 40 static uint32_t store_grf_soc_con0; 41 static uint32_t store_grf_soc_con1; 42 static uint32_t store_grf_soc_con2; 43 static uint32_t store_grf_soc_con3; 44 static uint32_t store_grf_soc_con4; 45 static uint32_t store_grf_soc_con7; 46 static uint32_t store_grf_ddrc_con[4]; 47 static uint32_t store_wdt0[2]; 48 static uint32_t store_wdt1[2]; 49 static gicv3_dist_ctx_t dist_ctx; 50 static gicv3_redist_ctx_t rdist_ctx; 51 52 /* 53 * There are two ways to powering on or off on core. 54 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 55 * it is core_pwr_pd mode 56 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 57 * then, if the core enter into wfi, it power domain will be 58 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 59 * so we need core_pm_cfg_info to distinguish which method be used now. 60 */ 61 62 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 63 #if USE_COHERENT_MEM 64 __attribute__ ((section("tzfw_coherent_mem"))) 65 #endif 66 ;/* coheront */ 67 68 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 69 { 70 uint32_t bus_id = BIT(bus); 71 uint32_t bus_req; 72 uint32_t wait_cnt = 0; 73 uint32_t bus_state, bus_ack; 74 75 if (state) 76 bus_req = BIT(bus); 77 else 78 bus_req = 0; 79 80 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 81 82 do { 83 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 84 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 85 if (bus_state == bus_req && bus_ack == bus_req) 86 break; 87 88 wait_cnt++; 89 udelay(1); 90 } while (wait_cnt < MAX_WAIT_COUNT); 91 92 if (bus_state != bus_req || bus_ack != bus_req) { 93 INFO("%s:st=%x(%x)\n", __func__, 94 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 95 bus_state); 96 INFO("%s:st=%x(%x)\n", __func__, 97 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 98 bus_ack); 99 } 100 } 101 102 struct pmu_slpdata_s pmu_slpdata; 103 104 static void qos_restore(void) 105 { 106 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 107 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 108 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 109 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 110 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 111 } 112 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 113 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 114 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 115 } 116 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 117 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 118 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 119 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 120 } 121 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 122 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 123 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 124 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 125 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 126 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 127 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 128 } 129 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 130 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 131 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 132 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 133 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 134 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 135 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 136 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 137 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 138 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 139 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 140 } 141 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 142 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 143 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 144 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 145 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 146 } 147 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 148 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 149 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 150 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 151 } 152 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 153 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 154 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 155 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 156 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 157 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 158 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 159 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 160 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 161 } 162 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 163 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 164 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 165 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 166 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 167 } 168 } 169 170 static void qos_save(void) 171 { 172 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 173 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 174 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 175 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 176 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 177 } 178 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 179 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 180 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 181 } 182 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 183 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 184 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 185 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 186 } 187 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 188 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 189 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 190 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 191 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 192 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 193 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 194 } 195 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 196 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 197 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 198 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 199 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 200 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 201 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 202 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 203 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 204 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 205 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 206 } 207 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 208 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 209 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 210 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 211 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 212 } 213 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 214 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 215 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 216 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 217 } 218 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 219 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 220 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 221 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 222 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 223 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 224 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 225 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 226 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 227 } 228 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 229 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 230 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 231 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 232 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 233 } 234 } 235 236 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 237 { 238 uint32_t state; 239 240 if (pmu_power_domain_st(pd_id) == pd_state) 241 goto out; 242 243 if (pd_state == pmu_pd_on) 244 pmu_power_domain_ctr(pd_id, pd_state); 245 246 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 247 248 switch (pd_id) { 249 case PD_GPU: 250 pmu_bus_idle_req(BUS_ID_GPU, state); 251 break; 252 case PD_VIO: 253 pmu_bus_idle_req(BUS_ID_VIO, state); 254 break; 255 case PD_ISP0: 256 pmu_bus_idle_req(BUS_ID_ISP0, state); 257 break; 258 case PD_ISP1: 259 pmu_bus_idle_req(BUS_ID_ISP1, state); 260 break; 261 case PD_VO: 262 pmu_bus_idle_req(BUS_ID_VOPB, state); 263 pmu_bus_idle_req(BUS_ID_VOPL, state); 264 break; 265 case PD_HDCP: 266 pmu_bus_idle_req(BUS_ID_HDCP, state); 267 break; 268 case PD_TCPD0: 269 break; 270 case PD_TCPD1: 271 break; 272 case PD_GMAC: 273 pmu_bus_idle_req(BUS_ID_GMAC, state); 274 break; 275 case PD_CCI: 276 pmu_bus_idle_req(BUS_ID_CCIM0, state); 277 pmu_bus_idle_req(BUS_ID_CCIM1, state); 278 break; 279 case PD_SD: 280 pmu_bus_idle_req(BUS_ID_SD, state); 281 break; 282 case PD_EMMC: 283 pmu_bus_idle_req(BUS_ID_EMMC, state); 284 break; 285 case PD_EDP: 286 pmu_bus_idle_req(BUS_ID_EDP, state); 287 break; 288 case PD_SDIOAUDIO: 289 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 290 break; 291 case PD_GIC: 292 pmu_bus_idle_req(BUS_ID_GIC, state); 293 break; 294 case PD_RGA: 295 pmu_bus_idle_req(BUS_ID_RGA, state); 296 break; 297 case PD_VCODEC: 298 pmu_bus_idle_req(BUS_ID_VCODEC, state); 299 break; 300 case PD_VDU: 301 pmu_bus_idle_req(BUS_ID_VDU, state); 302 break; 303 case PD_IEP: 304 pmu_bus_idle_req(BUS_ID_IEP, state); 305 break; 306 case PD_USB3: 307 pmu_bus_idle_req(BUS_ID_USB3, state); 308 break; 309 case PD_PERIHP: 310 pmu_bus_idle_req(BUS_ID_PERIHP, state); 311 break; 312 default: 313 /* Do nothing in default case */ 314 break; 315 } 316 317 if (pd_state == pmu_pd_off) 318 pmu_power_domain_ctr(pd_id, pd_state); 319 320 out: 321 return 0; 322 } 323 324 static uint32_t pmu_powerdomain_state; 325 326 static void pmu_power_domains_suspend(void) 327 { 328 clk_gate_con_save(); 329 clk_gate_con_disable(); 330 qos_save(); 331 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 332 pmu_set_power_domain(PD_GPU, pmu_pd_off); 333 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 334 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 335 pmu_set_power_domain(PD_VO, pmu_pd_off); 336 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 337 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 338 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 339 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 340 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 341 pmu_set_power_domain(PD_EDP, pmu_pd_off); 342 pmu_set_power_domain(PD_IEP, pmu_pd_off); 343 pmu_set_power_domain(PD_RGA, pmu_pd_off); 344 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 345 pmu_set_power_domain(PD_VDU, pmu_pd_off); 346 pmu_set_power_domain(PD_USB3, pmu_pd_off); 347 pmu_set_power_domain(PD_EMMC, pmu_pd_off); 348 pmu_set_power_domain(PD_VIO, pmu_pd_off); 349 pmu_set_power_domain(PD_SD, pmu_pd_off); 350 pmu_set_power_domain(PD_PERIHP, pmu_pd_off); 351 clk_gate_con_restore(); 352 } 353 354 static void pmu_power_domains_resume(void) 355 { 356 clk_gate_con_save(); 357 clk_gate_con_disable(); 358 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 359 pmu_set_power_domain(PD_VDU, pmu_pd_on); 360 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 361 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 362 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 363 pmu_set_power_domain(PD_RGA, pmu_pd_on); 364 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 365 pmu_set_power_domain(PD_IEP, pmu_pd_on); 366 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 367 pmu_set_power_domain(PD_EDP, pmu_pd_on); 368 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 369 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 370 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 371 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 372 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 373 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 374 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 375 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 376 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 377 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 378 if (!(pmu_powerdomain_state & BIT(PD_VO))) 379 pmu_set_power_domain(PD_VO, pmu_pd_on); 380 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 381 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 382 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 383 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 384 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 385 pmu_set_power_domain(PD_GPU, pmu_pd_on); 386 if (!(pmu_powerdomain_state & BIT(PD_USB3))) 387 pmu_set_power_domain(PD_USB3, pmu_pd_on); 388 if (!(pmu_powerdomain_state & BIT(PD_EMMC))) 389 pmu_set_power_domain(PD_EMMC, pmu_pd_on); 390 if (!(pmu_powerdomain_state & BIT(PD_VIO))) 391 pmu_set_power_domain(PD_VIO, pmu_pd_on); 392 if (!(pmu_powerdomain_state & BIT(PD_SD))) 393 pmu_set_power_domain(PD_SD, pmu_pd_on); 394 if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) 395 pmu_set_power_domain(PD_PERIHP, pmu_pd_on); 396 qos_restore(); 397 clk_gate_con_restore(); 398 } 399 400 void rk3399_flush_l2_b(void) 401 { 402 uint32_t wait_cnt = 0; 403 404 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 405 dsb(); 406 407 /* 408 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 409 * the enough margin. 410 */ 411 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 412 BIT(L2_FLUSHDONE_CLUSTER_B))) { 413 wait_cnt++; 414 udelay(10); 415 if (wait_cnt == 10000 / 10) 416 WARN("L2 cache flush on suspend took longer than 10ms\n"); 417 } 418 419 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 420 } 421 422 static void pmu_scu_b_pwrdn(void) 423 { 424 uint32_t wait_cnt = 0; 425 426 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 427 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 428 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 429 ERROR("%s: not all cpus is off\n", __func__); 430 return; 431 } 432 433 rk3399_flush_l2_b(); 434 435 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 436 437 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 438 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 439 wait_cnt++; 440 udelay(1); 441 if (wait_cnt >= MAX_WAIT_COUNT) 442 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 443 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 444 } 445 } 446 447 static void pmu_scu_b_pwrup(void) 448 { 449 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 450 } 451 452 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 453 { 454 assert(cpu_id < PLATFORM_CORE_COUNT); 455 return core_pm_cfg_info[cpu_id]; 456 } 457 458 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 459 { 460 assert(cpu_id < PLATFORM_CORE_COUNT); 461 core_pm_cfg_info[cpu_id] = value; 462 #if !USE_COHERENT_MEM 463 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 464 sizeof(uint32_t)); 465 #endif 466 } 467 468 static int cpus_power_domain_on(uint32_t cpu_id) 469 { 470 uint32_t cfg_info; 471 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 472 /* 473 * There are two ways to powering on or off on core. 474 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 475 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 476 * then, if the core enter into wfi, it power domain will be 477 * powered off automatically. 478 */ 479 480 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 481 482 if (cfg_info == core_pwr_pd) { 483 /* disable core_pm cfg */ 484 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 485 CORES_PM_DISABLE); 486 /* if the cores have be on, power off it firstly */ 487 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 488 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 489 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 490 } 491 492 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 493 } else { 494 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 495 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 496 return -EINVAL; 497 } 498 499 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 500 BIT(core_pm_sft_wakeup_en)); 501 dsb(); 502 } 503 504 return 0; 505 } 506 507 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 508 { 509 uint32_t cpu_pd; 510 uint32_t core_pm_value; 511 512 cpu_pd = PD_CPUL0 + cpu_id; 513 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 514 return 0; 515 516 if (pd_cfg == core_pwr_pd) { 517 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 518 return -EINVAL; 519 520 /* disable core_pm cfg */ 521 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 522 CORES_PM_DISABLE); 523 524 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 525 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 526 } else { 527 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 528 529 core_pm_value = BIT(core_pm_en); 530 if (pd_cfg == core_pwr_wfi_int) 531 core_pm_value |= BIT(core_pm_int_wakeup_en); 532 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 533 core_pm_value); 534 dsb(); 535 } 536 537 return 0; 538 } 539 540 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 541 { 542 uint32_t cpu_id = plat_my_core_pos(); 543 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 544 545 assert(cpu_id < PLATFORM_CORE_COUNT); 546 547 if (lvl_state == PLAT_MAX_OFF_STATE) { 548 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 549 pll_id = ALPLL_ID; 550 clst_st_msk = CLST_L_CPUS_MSK; 551 } else { 552 pll_id = ABPLL_ID; 553 clst_st_msk = CLST_B_CPUS_MSK << 554 PLATFORM_CLUSTER0_CORE_COUNT; 555 } 556 557 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 558 559 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 560 561 pmu_st &= clst_st_msk; 562 563 if (pmu_st == clst_st_chk_msk) { 564 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 565 PLL_SLOW_MODE); 566 567 clst_warmboot_data[pll_id] = PMU_CLST_RET; 568 569 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 570 pmu_st &= clst_st_msk; 571 if (pmu_st == clst_st_chk_msk) 572 return; 573 /* 574 * it is mean that others cpu is up again, 575 * we must resume the cfg at once. 576 */ 577 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 578 PLL_NOMAL_MODE); 579 clst_warmboot_data[pll_id] = 0; 580 } 581 } 582 } 583 584 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 585 { 586 uint32_t cpu_id = plat_my_core_pos(); 587 uint32_t pll_id, pll_st; 588 589 assert(cpu_id < PLATFORM_CORE_COUNT); 590 591 if (lvl_state == PLAT_MAX_OFF_STATE) { 592 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 593 pll_id = ALPLL_ID; 594 else 595 pll_id = ABPLL_ID; 596 597 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 598 PLL_MODE_SHIFT; 599 600 if (pll_st != NORMAL_MODE) { 601 WARN("%s: clst (%d) is in error mode (%d)\n", 602 __func__, pll_id, pll_st); 603 return -1; 604 } 605 } 606 607 return 0; 608 } 609 610 static void nonboot_cpus_off(void) 611 { 612 uint32_t boot_cpu, cpu; 613 614 boot_cpu = plat_my_core_pos(); 615 616 /* turn off noboot cpus */ 617 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 618 if (cpu == boot_cpu) 619 continue; 620 cpus_power_domain_off(cpu, core_pwr_pd); 621 } 622 } 623 624 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 625 { 626 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 627 628 assert(cpu_id < PLATFORM_CORE_COUNT); 629 assert(cpuson_flags[cpu_id] == 0); 630 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 631 cpuson_entry_point[cpu_id] = entrypoint; 632 dsb(); 633 634 cpus_power_domain_on(cpu_id); 635 636 return PSCI_E_SUCCESS; 637 } 638 639 int rockchip_soc_cores_pwr_dm_off(void) 640 { 641 uint32_t cpu_id = plat_my_core_pos(); 642 643 cpus_power_domain_off(cpu_id, core_pwr_wfi); 644 645 return PSCI_E_SUCCESS; 646 } 647 648 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 649 plat_local_state_t lvl_state) 650 { 651 if (lvl == MPIDR_AFFLVL1) { 652 clst_pwr_domain_suspend(lvl_state); 653 } 654 655 return PSCI_E_SUCCESS; 656 } 657 658 int rockchip_soc_cores_pwr_dm_suspend(void) 659 { 660 uint32_t cpu_id = plat_my_core_pos(); 661 662 assert(cpu_id < PLATFORM_CORE_COUNT); 663 assert(cpuson_flags[cpu_id] == 0); 664 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 665 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 666 dsb(); 667 668 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 669 670 return PSCI_E_SUCCESS; 671 } 672 673 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 674 { 675 if (lvl == MPIDR_AFFLVL1) { 676 clst_pwr_domain_suspend(lvl_state); 677 } 678 679 return PSCI_E_SUCCESS; 680 } 681 682 int rockchip_soc_cores_pwr_dm_on_finish(void) 683 { 684 uint32_t cpu_id = plat_my_core_pos(); 685 686 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 687 CORES_PM_DISABLE); 688 return PSCI_E_SUCCESS; 689 } 690 691 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 692 plat_local_state_t lvl_state) 693 { 694 if (lvl == MPIDR_AFFLVL1) { 695 clst_pwr_domain_resume(lvl_state); 696 } 697 698 return PSCI_E_SUCCESS; 699 } 700 701 int rockchip_soc_cores_pwr_dm_resume(void) 702 { 703 uint32_t cpu_id = plat_my_core_pos(); 704 705 /* Disable core_pm */ 706 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 707 708 return PSCI_E_SUCCESS; 709 } 710 711 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 712 { 713 if (lvl == MPIDR_AFFLVL1) { 714 clst_pwr_domain_resume(lvl_state); 715 } 716 717 return PSCI_E_SUCCESS; 718 } 719 720 /** 721 * init_pmu_counts - Init timing counts in the PMU register area 722 * 723 * At various points when we power up or down parts of the system we need 724 * a delay to wait for power / clocks to become stable. The PMU has counters 725 * to help software do the delay properly. Basically, it works like this: 726 * - Software sets up counter values 727 * - When software turns on something in the PMU, the counter kicks off 728 * - The hardware sets a bit automatically when the counter has finished and 729 * software knows that the initialization is done. 730 * 731 * It's software's job to setup these counters. The hardware power on default 732 * for these settings is conservative, setting everything to 0x5dc0 733 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 734 * 735 * Note that some of these counters are only really used at suspend/resume 736 * time (for instance, that's the only time we turn off/on the oscillator) and 737 * others are used during normal runtime (like turning on/off a CPU or GPU) but 738 * it doesn't hurt to init everything at boot. 739 * 740 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 741 * clock. While the 24 MHz clock can give us more precision, it's not always 742 * available (like when we turn the oscillator off at sleep time). The 743 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 744 * is that counts work like this: 745 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 746 * use the 24M OSC for counts 747 * ELSE 748 * use the 32K OSC for counts 749 * 750 * Notes: 751 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 752 * we always keep that 0. This apparently choose between using the PLL as 753 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 754 * should consider how it affects these counts (if at all). 755 * - The power_mode_en is documented to auto-clear automatically when we leave 756 * "power mode". That's why most clocks are on 24M. Only timings used when 757 * in "power mode" are 32k. 758 * - In some cases the kernel may override these counts. 759 * 760 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 761 * in power mode, we need to ensure that they are available. 762 */ 763 static void init_pmu_counts(void) 764 { 765 /* COUNTS FOR INSIDE POWER MODE */ 766 767 /* 768 * From limited testing, need PMU stable >= 2ms, but go overkill 769 * and choose 30 ms to match testing on past SoCs. Also let 770 * OSC have 30 ms for stabilization. 771 */ 772 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 773 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 774 775 /* Unclear what these should be; try 3 ms */ 776 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 777 778 /* Unclear what this should be, but set the default explicitly */ 779 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 780 781 /* COUNTS FOR OUTSIDE POWER MODE */ 782 783 /* Put something sorta conservative here until we know better */ 784 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 785 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 786 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 787 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 788 789 /* 790 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 791 * M0 code run in SRAM, and we need it to check whether cpu enter 792 * FSM status, so we must wait M0 finish their code and enter WFI, 793 * then we can shutdown SRAM, according FSM order: 794 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 795 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 796 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 797 */ 798 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 799 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 800 801 /* 802 * Set CPU/GPU to 1 us. 803 * 804 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 805 * counts here. After all ATF controls all these other bits and also 806 * chooses which clock these counters use. 807 */ 808 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 809 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 810 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 811 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 812 } 813 814 static uint32_t clk_ddrc_save; 815 816 static void sys_slp_config(void) 817 { 818 uint32_t slp_mode_cfg = 0; 819 820 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 821 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 822 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 823 824 prepare_abpll_for_ddrctrl(); 825 sram_func_set_ddrctl_pll(ABPLL_ID); 826 827 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 828 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 829 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 830 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 831 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 832 833 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 834 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 835 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 836 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 837 838 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 839 BIT(PMU_WKUP_RST_EN) | 840 BIT(PMU_INPUT_CLAMP_EN) | 841 BIT(PMU_POWER_OFF_REQ_CFG) | 842 BIT(PMU_CPU0_PD_EN) | 843 BIT(PMU_L2_FLUSH_EN) | 844 BIT(PMU_L2_IDLE_EN) | 845 BIT(PMU_SCU_PD_EN) | 846 BIT(PMU_CCI_PD_EN) | 847 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 848 BIT(PMU_ALIVE_USE_LF) | 849 BIT(PMU_SREF0_ENTER_EN) | 850 BIT(PMU_SREF1_ENTER_EN) | 851 BIT(PMU_DDRC0_GATING_EN) | 852 BIT(PMU_DDRC1_GATING_EN) | 853 BIT(PMU_DDRIO0_RET_EN) | 854 BIT(PMU_DDRIO0_RET_DE_REQ) | 855 BIT(PMU_DDRIO1_RET_EN) | 856 BIT(PMU_DDRIO1_RET_DE_REQ) | 857 BIT(PMU_CENTER_PD_EN) | 858 BIT(PMU_PERILP_PD_EN) | 859 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 860 BIT(PMU_PLL_PD_EN) | 861 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 862 BIT(PMU_OSC_DIS) | 863 BIT(PMU_PMU_USE_LF); 864 865 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 866 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 867 868 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 869 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 870 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 871 } 872 873 static void set_hw_idle(uint32_t hw_idle) 874 { 875 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 876 } 877 878 static void clr_hw_idle(uint32_t hw_idle) 879 { 880 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 881 } 882 883 static uint32_t iomux_status[12]; 884 static uint32_t pull_mode_status[12]; 885 static uint32_t gpio_direction[3]; 886 static uint32_t gpio_2_4_clk_gate; 887 888 static void suspend_apio(void) 889 { 890 struct apio_info *suspend_apio; 891 int i; 892 893 suspend_apio = plat_get_rockchip_suspend_apio(); 894 895 if (!suspend_apio) 896 return; 897 898 /* save gpio2 ~ gpio4 iomux and pull mode */ 899 for (i = 0; i < 12; i++) { 900 iomux_status[i] = mmio_read_32(GRF_BASE + 901 GRF_GPIO2A_IOMUX + i * 4); 902 pull_mode_status[i] = mmio_read_32(GRF_BASE + 903 GRF_GPIO2A_P + i * 4); 904 } 905 906 /* store gpio2 ~ gpio4 clock gate state */ 907 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 908 PCLK_GPIO2_GATE_SHIFT) & 0x07; 909 910 /* enable gpio2 ~ gpio4 clock gate */ 911 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 912 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 913 914 /* save gpio2 ~ gpio4 direction */ 915 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 916 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 917 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 918 919 /* apio1 charge gpio3a0 ~ gpio3c7 */ 920 if (suspend_apio->apio1) { 921 922 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 923 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 924 REG_SOC_WMSK | GRF_IOMUX_GPIO); 925 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 926 REG_SOC_WMSK | GRF_IOMUX_GPIO); 927 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 928 REG_SOC_WMSK | GRF_IOMUX_GPIO); 929 930 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 931 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 932 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 933 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 934 935 /* set gpio3a0 ~ gpio3c7 to input */ 936 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 937 } 938 939 /* apio2 charge gpio2a0 ~ gpio2b4 */ 940 if (suspend_apio->apio2) { 941 942 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 943 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 944 REG_SOC_WMSK | GRF_IOMUX_GPIO); 945 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 946 REG_SOC_WMSK | GRF_IOMUX_GPIO); 947 948 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 949 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 950 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 951 952 /* set gpio2a0 ~ gpio2b4 to input */ 953 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 954 } 955 956 /* apio3 charge gpio2c0 ~ gpio2d4*/ 957 if (suspend_apio->apio3) { 958 959 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 960 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 961 REG_SOC_WMSK | GRF_IOMUX_GPIO); 962 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 963 REG_SOC_WMSK | GRF_IOMUX_GPIO); 964 965 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 966 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 967 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 968 969 /* set gpio2c0 ~ gpio2d4 to input */ 970 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 971 } 972 973 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 974 if (suspend_apio->apio4) { 975 976 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 977 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 978 REG_SOC_WMSK | GRF_IOMUX_GPIO); 979 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 980 REG_SOC_WMSK | GRF_IOMUX_GPIO); 981 982 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 983 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 984 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 985 986 /* set gpio4c0 ~ gpio4d6 to input */ 987 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 988 } 989 990 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 991 if (suspend_apio->apio5) { 992 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 993 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 994 REG_SOC_WMSK | GRF_IOMUX_GPIO); 995 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 996 REG_SOC_WMSK | GRF_IOMUX_GPIO); 997 998 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 999 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1000 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1001 1002 /* set gpio4c0 ~ gpio4d6 to input */ 1003 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1004 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1005 } 1006 } 1007 1008 static void resume_apio(void) 1009 { 1010 struct apio_info *suspend_apio; 1011 int i; 1012 1013 suspend_apio = plat_get_rockchip_suspend_apio(); 1014 1015 if (!suspend_apio) 1016 return; 1017 1018 for (i = 0; i < 12; i++) { 1019 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1020 REG_SOC_WMSK | pull_mode_status[i]); 1021 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1022 REG_SOC_WMSK | iomux_status[i]); 1023 } 1024 1025 /* set gpio2 ~ gpio4 direction back to store value */ 1026 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1027 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1028 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1029 1030 /* set gpio2 ~ gpio4 clock gate back to store value */ 1031 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1032 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1033 PCLK_GPIO2_GATE_SHIFT)); 1034 } 1035 1036 static void suspend_gpio(void) 1037 { 1038 struct gpio_info *suspend_gpio; 1039 uint32_t count; 1040 int i; 1041 1042 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1043 1044 for (i = 0; i < count; i++) { 1045 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1046 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1047 udelay(1); 1048 } 1049 } 1050 1051 static void resume_gpio(void) 1052 { 1053 struct gpio_info *suspend_gpio; 1054 uint32_t count; 1055 int i; 1056 1057 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1058 1059 for (i = count - 1; i >= 0; i--) { 1060 gpio_set_value(suspend_gpio[i].index, 1061 !suspend_gpio[i].polarity); 1062 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1063 udelay(1); 1064 } 1065 } 1066 1067 void sram_save(void) 1068 { 1069 size_t text_size = (char *)&__bl31_sram_text_real_end - 1070 (char *)&__bl31_sram_text_start; 1071 size_t data_size = (char *)&__bl31_sram_data_real_end - 1072 (char *)&__bl31_sram_data_start; 1073 size_t incbin_size = (char *)&__sram_incbin_real_end - 1074 (char *)&__sram_incbin_start; 1075 1076 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1077 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1078 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1079 incbin_size); 1080 } 1081 1082 void sram_restore(void) 1083 { 1084 size_t text_size = (char *)&__bl31_sram_text_real_end - 1085 (char *)&__bl31_sram_text_start; 1086 size_t data_size = (char *)&__bl31_sram_data_real_end - 1087 (char *)&__bl31_sram_data_start; 1088 size_t incbin_size = (char *)&__sram_incbin_real_end - 1089 (char *)&__sram_incbin_start; 1090 1091 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1092 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1093 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1094 incbin_size); 1095 } 1096 1097 struct uart_debug { 1098 uint32_t uart_dll; 1099 uint32_t uart_dlh; 1100 uint32_t uart_ier; 1101 uint32_t uart_fcr; 1102 uint32_t uart_mcr; 1103 uint32_t uart_lcr; 1104 }; 1105 1106 #define UART_DLL 0x00 1107 #define UART_DLH 0x04 1108 #define UART_IER 0x04 1109 #define UART_FCR 0x08 1110 #define UART_LCR 0x0c 1111 #define UART_MCR 0x10 1112 #define UARTSRR 0x88 1113 1114 #define UART_RESET BIT(0) 1115 #define UARTFCR_FIFOEN BIT(0) 1116 #define RCVR_FIFO_RESET BIT(1) 1117 #define XMIT_FIFO_RESET BIT(2) 1118 #define DIAGNOSTIC_MODE BIT(4) 1119 #define UARTLCR_DLAB BIT(7) 1120 1121 static struct uart_debug uart_save; 1122 1123 void suspend_uart(void) 1124 { 1125 uart_save.uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1126 uart_save.uart_ier = mmio_read_32(PLAT_RK_UART_BASE + UART_IER); 1127 uart_save.uart_mcr = mmio_read_32(PLAT_RK_UART_BASE + UART_MCR); 1128 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, 1129 uart_save.uart_lcr | UARTLCR_DLAB); 1130 uart_save.uart_dll = mmio_read_32(PLAT_RK_UART_BASE + UART_DLL); 1131 uart_save.uart_dlh = mmio_read_32(PLAT_RK_UART_BASE + UART_DLH); 1132 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1133 } 1134 1135 void resume_uart(void) 1136 { 1137 uint32_t uart_lcr; 1138 1139 mmio_write_32(PLAT_RK_UART_BASE + UARTSRR, 1140 XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); 1141 1142 uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1143 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, DIAGNOSTIC_MODE); 1144 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_lcr | UARTLCR_DLAB); 1145 mmio_write_32(PLAT_RK_UART_BASE + UART_DLL, uart_save.uart_dll); 1146 mmio_write_32(PLAT_RK_UART_BASE + UART_DLH, uart_save.uart_dlh); 1147 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1148 mmio_write_32(PLAT_RK_UART_BASE + UART_IER, uart_save.uart_ier); 1149 mmio_write_32(PLAT_RK_UART_BASE + UART_FCR, UARTFCR_FIFOEN); 1150 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, uart_save.uart_mcr); 1151 } 1152 1153 void save_usbphy(void) 1154 { 1155 store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); 1156 store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); 1157 store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); 1158 store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); 1159 store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); 1160 store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); 1161 store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); 1162 1163 store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); 1164 store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); 1165 store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); 1166 store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); 1167 store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); 1168 store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); 1169 store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); 1170 } 1171 1172 void restore_usbphy(void) 1173 { 1174 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, 1175 REG_SOC_WMSK | store_usbphy0[0]); 1176 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, 1177 REG_SOC_WMSK | store_usbphy0[1]); 1178 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, 1179 REG_SOC_WMSK | store_usbphy0[2]); 1180 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, 1181 REG_SOC_WMSK | store_usbphy0[3]); 1182 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, 1183 REG_SOC_WMSK | store_usbphy0[4]); 1184 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, 1185 REG_SOC_WMSK | store_usbphy0[5]); 1186 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, 1187 REG_SOC_WMSK | store_usbphy0[6]); 1188 1189 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, 1190 REG_SOC_WMSK | store_usbphy1[0]); 1191 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, 1192 REG_SOC_WMSK | store_usbphy1[1]); 1193 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, 1194 REG_SOC_WMSK | store_usbphy1[2]); 1195 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, 1196 REG_SOC_WMSK | store_usbphy1[3]); 1197 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, 1198 REG_SOC_WMSK | store_usbphy1[4]); 1199 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, 1200 REG_SOC_WMSK | store_usbphy1[5]); 1201 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, 1202 REG_SOC_WMSK | store_usbphy1[6]); 1203 } 1204 1205 void grf_register_save(void) 1206 { 1207 int i; 1208 1209 store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); 1210 store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); 1211 store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); 1212 store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); 1213 store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); 1214 store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); 1215 1216 for (i = 0; i < 4; i++) 1217 store_grf_ddrc_con[i] = 1218 mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); 1219 1220 store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); 1221 } 1222 1223 void grf_register_restore(void) 1224 { 1225 int i; 1226 1227 mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 1228 REG_SOC_WMSK | store_grf_soc_con0); 1229 mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 1230 REG_SOC_WMSK | store_grf_soc_con1); 1231 mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 1232 REG_SOC_WMSK | store_grf_soc_con2); 1233 mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 1234 REG_SOC_WMSK | store_grf_soc_con3); 1235 mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 1236 REG_SOC_WMSK | store_grf_soc_con4); 1237 mmio_write_32(GRF_BASE + GRF_SOC_CON(7), 1238 REG_SOC_WMSK | store_grf_soc_con7); 1239 1240 for (i = 0; i < 4; i++) 1241 mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, 1242 REG_SOC_WMSK | store_grf_ddrc_con[i]); 1243 1244 mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); 1245 } 1246 1247 void cru_register_save(void) 1248 { 1249 int i; 1250 1251 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) 1252 store_cru[i / 4] = mmio_read_32(CRU_BASE + i); 1253 } 1254 1255 void cru_register_restore(void) 1256 { 1257 int i; 1258 1259 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { 1260 1261 /* 1262 * since DPLL, CRU_CLKSEL_CON6 have been restore in 1263 * dmc_resume, ABPLL will resote later, so skip them 1264 */ 1265 if ((i == CRU_CLKSEL_CON6) || 1266 (i >= CRU_PLL_CON(ABPLL_ID, 0) && 1267 i <= CRU_PLL_CON(DPLL_ID, 5))) 1268 continue; 1269 1270 if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || 1271 (i == CRU_PLL_CON(CPLL_ID, 2)) || 1272 (i == CRU_PLL_CON(GPLL_ID, 2)) || 1273 (i == CRU_PLL_CON(NPLL_ID, 2)) || 1274 (i == CRU_PLL_CON(VPLL_ID, 2))) 1275 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1276 /* 1277 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 1278 * not need do high 16bit mask 1279 */ 1280 else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) 1281 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1282 else 1283 mmio_write_32(CRU_BASE + i, 1284 REG_SOC_WMSK | store_cru[i / 4]); 1285 } 1286 } 1287 1288 void wdt_register_save(void) 1289 { 1290 int i; 1291 1292 for (i = 0; i < 2; i++) { 1293 store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); 1294 store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); 1295 } 1296 } 1297 1298 void wdt_register_restore(void) 1299 { 1300 int i; 1301 1302 for (i = 1; i >= 0; i--) { 1303 mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); 1304 mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); 1305 } 1306 1307 /* write 0x76 to cnt_restart to keep watchdog alive */ 1308 mmio_write_32(WDT0_BASE + 0x0c, 0x76); 1309 mmio_write_32(WDT1_BASE + 0x0c, 0x76); 1310 } 1311 1312 int rockchip_soc_sys_pwr_dm_suspend(void) 1313 { 1314 uint32_t wait_cnt = 0; 1315 uint32_t status = 0; 1316 1317 ddr_prepare_for_sys_suspend(); 1318 dmc_suspend(); 1319 pmu_scu_b_pwrdn(); 1320 1321 gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx); 1322 gicv3_distif_save(&dist_ctx); 1323 1324 /* need to save usbphy before shutdown PERIHP PD */ 1325 save_usbphy(); 1326 1327 pmu_power_domains_suspend(); 1328 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1329 BIT(PMU_CLR_ALIVE) | 1330 BIT(PMU_CLR_MSCH0) | 1331 BIT(PMU_CLR_MSCH1) | 1332 BIT(PMU_CLR_CCIM0) | 1333 BIT(PMU_CLR_CCIM1) | 1334 BIT(PMU_CLR_CENTER) | 1335 BIT(PMU_CLR_PERILP) | 1336 BIT(PMU_CLR_PERILPM0) | 1337 BIT(PMU_CLR_GIC)); 1338 set_pmu_rsthold(); 1339 sys_slp_config(); 1340 1341 m0_configure_execute_addr(M0PMU_BINCODE_BASE); 1342 m0_start(); 1343 1344 pmu_sgrf_rst_hld(); 1345 1346 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1347 ((uintptr_t)&pmu_cpuson_entrypoint >> 1348 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1349 1350 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1351 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1352 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1353 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1354 dsb(); 1355 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1356 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1357 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1358 while ((mmio_read_32(PMU_BASE + 1359 PMU_ADB400_ST) & status) != status) { 1360 wait_cnt++; 1361 if (wait_cnt >= MAX_WAIT_COUNT) { 1362 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1363 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1364 panic(); 1365 } 1366 udelay(1); 1367 } 1368 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1369 1370 wdt_register_save(); 1371 secure_watchdog_gate(); 1372 1373 /* 1374 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1375 * the last steps in suspend. 1376 */ 1377 disable_dvfs_plls(); 1378 disable_pwms(); 1379 disable_nodvfs_plls(); 1380 1381 suspend_apio(); 1382 suspend_gpio(); 1383 suspend_uart(); 1384 grf_register_save(); 1385 cru_register_save(); 1386 sram_save(); 1387 plat_rockchip_save_gpio(); 1388 1389 return 0; 1390 } 1391 1392 int rockchip_soc_sys_pwr_dm_resume(void) 1393 { 1394 uint32_t wait_cnt = 0; 1395 uint32_t status = 0; 1396 1397 plat_rockchip_restore_gpio(); 1398 cru_register_restore(); 1399 grf_register_restore(); 1400 wdt_register_restore(); 1401 resume_uart(); 1402 resume_apio(); 1403 resume_gpio(); 1404 enable_nodvfs_plls(); 1405 enable_pwms(); 1406 /* PWM regulators take time to come up; give 300us to be safe. */ 1407 udelay(300); 1408 enable_dvfs_plls(); 1409 1410 secure_sgrf_init(); 1411 secure_sgrf_ddr_rgn_init(); 1412 1413 /* restore clk_ddrc_bpll_src_en gate */ 1414 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1415 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1416 1417 /* 1418 * The wakeup status is not cleared by itself, we need to clear it 1419 * manually. Otherwise we will alway query some interrupt next time. 1420 * 1421 * NOTE: If the kernel needs to query this, we might want to stash it 1422 * somewhere. 1423 */ 1424 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1425 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1426 1427 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1428 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1429 CPU_BOOT_ADDR_WMASK); 1430 1431 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1432 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1433 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1434 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1435 dsb(); 1436 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1437 BIT(PMU_SCU_B_PWRDWN_EN)); 1438 1439 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1440 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1441 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1442 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1443 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1444 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1445 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1446 1447 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1448 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1449 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1450 1451 while ((mmio_read_32(PMU_BASE + 1452 PMU_ADB400_ST) & status)) { 1453 wait_cnt++; 1454 if (wait_cnt >= MAX_WAIT_COUNT) { 1455 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1456 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1457 panic(); 1458 } 1459 udelay(1); 1460 } 1461 1462 pmu_scu_b_pwrup(); 1463 pmu_power_domains_resume(); 1464 1465 restore_abpll(); 1466 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1467 BIT(PMU_CLR_ALIVE) | 1468 BIT(PMU_CLR_MSCH0) | 1469 BIT(PMU_CLR_MSCH1) | 1470 BIT(PMU_CLR_CCIM0) | 1471 BIT(PMU_CLR_CCIM1) | 1472 BIT(PMU_CLR_CENTER) | 1473 BIT(PMU_CLR_PERILP) | 1474 BIT(PMU_CLR_PERILPM0) | 1475 BIT(PMU_CLR_GIC)); 1476 1477 gicv3_distif_init_restore(&dist_ctx); 1478 gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx); 1479 plat_rockchip_gic_cpuif_enable(); 1480 m0_stop(); 1481 1482 restore_usbphy(); 1483 1484 ddr_prepare_for_sys_resume(); 1485 1486 return 0; 1487 } 1488 1489 void __dead2 rockchip_soc_soft_reset(void) 1490 { 1491 struct gpio_info *rst_gpio; 1492 1493 rst_gpio = plat_get_rockchip_gpio_reset(); 1494 1495 if (rst_gpio) { 1496 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1497 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1498 } else { 1499 soc_global_soft_reset(); 1500 } 1501 1502 while (1) 1503 ; 1504 } 1505 1506 void __dead2 rockchip_soc_system_off(void) 1507 { 1508 struct gpio_info *poweroff_gpio; 1509 1510 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1511 1512 if (poweroff_gpio) { 1513 /* 1514 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1515 * need to set this pin iomux back to gpio function 1516 */ 1517 if (poweroff_gpio->index == TSADC_INT_PIN) { 1518 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1519 GPIO1A6_IOMUX); 1520 } 1521 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1522 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1523 } else { 1524 WARN("Do nothing when system off\n"); 1525 } 1526 1527 while (1) 1528 ; 1529 } 1530 1531 void rockchip_plat_mmu_el3(void) 1532 { 1533 size_t sram_size; 1534 1535 /* sram.text size */ 1536 sram_size = (char *)&__bl31_sram_text_end - 1537 (char *)&__bl31_sram_text_start; 1538 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1539 (unsigned long)&__bl31_sram_text_start, 1540 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1541 1542 /* sram.data size */ 1543 sram_size = (char *)&__bl31_sram_data_end - 1544 (char *)&__bl31_sram_data_start; 1545 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1546 (unsigned long)&__bl31_sram_data_start, 1547 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1548 1549 sram_size = (char *)&__bl31_sram_stack_end - 1550 (char *)&__bl31_sram_stack_start; 1551 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1552 (unsigned long)&__bl31_sram_stack_start, 1553 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1554 1555 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1556 mmap_add_region((unsigned long)&__sram_incbin_start, 1557 (unsigned long)&__sram_incbin_start, 1558 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1559 } 1560 1561 void plat_rockchip_pmu_init(void) 1562 { 1563 uint32_t cpu; 1564 1565 rockchip_pd_lock_init(); 1566 1567 /* register requires 32bits mode, switch it to 32 bits */ 1568 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1569 1570 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1571 cpuson_flags[cpu] = 0; 1572 1573 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1574 clst_warmboot_data[cpu] = 0; 1575 1576 /* config cpu's warm boot address */ 1577 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1578 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1579 CPU_BOOT_ADDR_WMASK); 1580 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1581 1582 /* 1583 * Enable Schmitt trigger for better 32 kHz input signal, which is 1584 * important for suspend/resume reliability among other things. 1585 */ 1586 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1587 1588 init_pmu_counts(); 1589 1590 nonboot_cpus_off(); 1591 1592 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1593 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1594 } 1595