1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bakery_lock.h> 10 #include <bl31.h> 11 #include <debug.h> 12 #include <delay_timer.h> 13 #include <dfs.h> 14 #include <errno.h> 15 #include <gpio.h> 16 #include <m0_ctl.h> 17 #include <mmio.h> 18 #include <plat_params.h> 19 #include <plat_private.h> 20 #include <platform.h> 21 #include <platform_def.h> 22 #include <pmu.h> 23 #include <pmu_com.h> 24 #include <pwm.h> 25 #include <rk3399_def.h> 26 #include <secure.h> 27 #include <soc.h> 28 #include <string.h> 29 #include <suspend.h> 30 31 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 32 33 static uint32_t cpu_warm_boot_addr; 34 static char store_sram[SRAM_BIN_LIMIT + SRAM_TEXT_LIMIT + SRAM_DATA_LIMIT]; 35 static uint32_t store_cru[CRU_SDIO0_CON1 / 4]; 36 static uint32_t store_usbphy0[7]; 37 static uint32_t store_usbphy1[7]; 38 static uint32_t store_grf_io_vsel; 39 static uint32_t store_grf_soc_con0; 40 static uint32_t store_grf_soc_con1; 41 static uint32_t store_grf_soc_con2; 42 static uint32_t store_grf_soc_con3; 43 static uint32_t store_grf_soc_con4; 44 static uint32_t store_grf_soc_con7; 45 static uint32_t store_grf_ddrc_con[4]; 46 static uint32_t store_wdt0[2]; 47 static uint32_t store_wdt1[2]; 48 49 /* 50 * There are two ways to powering on or off on core. 51 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 52 * it is core_pwr_pd mode 53 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 54 * then, if the core enter into wfi, it power domain will be 55 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 56 * so we need core_pm_cfg_info to distinguish which method be used now. 57 */ 58 59 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 60 #if USE_COHERENT_MEM 61 __attribute__ ((section("tzfw_coherent_mem"))) 62 #endif 63 ;/* coheront */ 64 65 static void pmu_bus_idle_req(uint32_t bus, uint32_t state) 66 { 67 uint32_t bus_id = BIT(bus); 68 uint32_t bus_req; 69 uint32_t wait_cnt = 0; 70 uint32_t bus_state, bus_ack; 71 72 if (state) 73 bus_req = BIT(bus); 74 else 75 bus_req = 0; 76 77 mmio_clrsetbits_32(PMU_BASE + PMU_BUS_IDLE_REQ, bus_id, bus_req); 78 79 do { 80 bus_state = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST) & bus_id; 81 bus_ack = mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK) & bus_id; 82 wait_cnt++; 83 } while ((bus_state != bus_req || bus_ack != bus_req) && 84 (wait_cnt < MAX_WAIT_COUNT)); 85 86 if (bus_state != bus_req || bus_ack != bus_req) { 87 INFO("%s:st=%x(%x)\n", __func__, 88 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ST), 89 bus_state); 90 INFO("%s:st=%x(%x)\n", __func__, 91 mmio_read_32(PMU_BASE + PMU_BUS_IDLE_ACK), 92 bus_ack); 93 } 94 } 95 96 struct pmu_slpdata_s pmu_slpdata; 97 98 static void qos_save(void) 99 { 100 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 101 RESTORE_QOS(pmu_slpdata.gpu_qos, GPU); 102 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 103 RESTORE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 104 RESTORE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 105 } 106 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 107 RESTORE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 108 RESTORE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 109 } 110 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 111 RESTORE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 112 RESTORE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 113 RESTORE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 114 } 115 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 116 RESTORE_QOS(pmu_slpdata.hdcp_qos, HDCP); 117 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 118 RESTORE_QOS(pmu_slpdata.gmac_qos, GMAC); 119 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 120 RESTORE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 121 RESTORE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 122 } 123 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 124 RESTORE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 125 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 126 RESTORE_QOS(pmu_slpdata.emmc_qos, EMMC); 127 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 128 RESTORE_QOS(pmu_slpdata.sdio_qos, SDIO); 129 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 130 RESTORE_QOS(pmu_slpdata.gic_qos, GIC); 131 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 132 RESTORE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 133 RESTORE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 134 } 135 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 136 RESTORE_QOS(pmu_slpdata.iep_qos, IEP); 137 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 138 RESTORE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 139 RESTORE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 140 } 141 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 142 RESTORE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 143 RESTORE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 144 RESTORE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 145 } 146 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 147 RESTORE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 148 RESTORE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 149 RESTORE_QOS(pmu_slpdata.dcf_qos, DCF); 150 RESTORE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 151 RESTORE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 152 RESTORE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 153 RESTORE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 154 RESTORE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 155 } 156 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 157 RESTORE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 158 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 159 RESTORE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 160 RESTORE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 161 } 162 } 163 164 static void qos_restore(void) 165 { 166 if (pmu_power_domain_st(PD_GPU) == pmu_pd_on) 167 SAVE_QOS(pmu_slpdata.gpu_qos, GPU); 168 if (pmu_power_domain_st(PD_ISP0) == pmu_pd_on) { 169 SAVE_QOS(pmu_slpdata.isp0_m0_qos, ISP0_M0); 170 SAVE_QOS(pmu_slpdata.isp0_m1_qos, ISP0_M1); 171 } 172 if (pmu_power_domain_st(PD_ISP1) == pmu_pd_on) { 173 SAVE_QOS(pmu_slpdata.isp1_m0_qos, ISP1_M0); 174 SAVE_QOS(pmu_slpdata.isp1_m1_qos, ISP1_M1); 175 } 176 if (pmu_power_domain_st(PD_VO) == pmu_pd_on) { 177 SAVE_QOS(pmu_slpdata.vop_big_r, VOP_BIG_R); 178 SAVE_QOS(pmu_slpdata.vop_big_w, VOP_BIG_W); 179 SAVE_QOS(pmu_slpdata.vop_little, VOP_LITTLE); 180 } 181 if (pmu_power_domain_st(PD_HDCP) == pmu_pd_on) 182 SAVE_QOS(pmu_slpdata.hdcp_qos, HDCP); 183 if (pmu_power_domain_st(PD_GMAC) == pmu_pd_on) 184 SAVE_QOS(pmu_slpdata.gmac_qos, GMAC); 185 if (pmu_power_domain_st(PD_CCI) == pmu_pd_on) { 186 SAVE_QOS(pmu_slpdata.cci_m0_qos, CCI_M0); 187 SAVE_QOS(pmu_slpdata.cci_m1_qos, CCI_M1); 188 } 189 if (pmu_power_domain_st(PD_SD) == pmu_pd_on) 190 SAVE_QOS(pmu_slpdata.sdmmc_qos, SDMMC); 191 if (pmu_power_domain_st(PD_EMMC) == pmu_pd_on) 192 SAVE_QOS(pmu_slpdata.emmc_qos, EMMC); 193 if (pmu_power_domain_st(PD_SDIOAUDIO) == pmu_pd_on) 194 SAVE_QOS(pmu_slpdata.sdio_qos, SDIO); 195 if (pmu_power_domain_st(PD_GIC) == pmu_pd_on) 196 SAVE_QOS(pmu_slpdata.gic_qos, GIC); 197 if (pmu_power_domain_st(PD_RGA) == pmu_pd_on) { 198 SAVE_QOS(pmu_slpdata.rga_r_qos, RGA_R); 199 SAVE_QOS(pmu_slpdata.rga_w_qos, RGA_W); 200 } 201 if (pmu_power_domain_st(PD_IEP) == pmu_pd_on) 202 SAVE_QOS(pmu_slpdata.iep_qos, IEP); 203 if (pmu_power_domain_st(PD_USB3) == pmu_pd_on) { 204 SAVE_QOS(pmu_slpdata.usb_otg0_qos, USB_OTG0); 205 SAVE_QOS(pmu_slpdata.usb_otg1_qos, USB_OTG1); 206 } 207 if (pmu_power_domain_st(PD_PERIHP) == pmu_pd_on) { 208 SAVE_QOS(pmu_slpdata.usb_host0_qos, USB_HOST0); 209 SAVE_QOS(pmu_slpdata.usb_host1_qos, USB_HOST1); 210 SAVE_QOS(pmu_slpdata.perihp_nsp_qos, PERIHP_NSP); 211 } 212 if (pmu_power_domain_st(PD_PERILP) == pmu_pd_on) { 213 SAVE_QOS(pmu_slpdata.dmac0_qos, DMAC0); 214 SAVE_QOS(pmu_slpdata.dmac1_qos, DMAC1); 215 SAVE_QOS(pmu_slpdata.dcf_qos, DCF); 216 SAVE_QOS(pmu_slpdata.crypto0_qos, CRYPTO0); 217 SAVE_QOS(pmu_slpdata.crypto1_qos, CRYPTO1); 218 SAVE_QOS(pmu_slpdata.perilp_nsp_qos, PERILP_NSP); 219 SAVE_QOS(pmu_slpdata.perilpslv_nsp_qos, PERILPSLV_NSP); 220 SAVE_QOS(pmu_slpdata.peri_cm1_qos, PERI_CM1); 221 } 222 if (pmu_power_domain_st(PD_VDU) == pmu_pd_on) 223 SAVE_QOS(pmu_slpdata.video_m0_qos, VIDEO_M0); 224 if (pmu_power_domain_st(PD_VCODEC) == pmu_pd_on) { 225 SAVE_QOS(pmu_slpdata.video_m1_r_qos, VIDEO_M1_R); 226 SAVE_QOS(pmu_slpdata.video_m1_w_qos, VIDEO_M1_W); 227 } 228 } 229 230 static int pmu_set_power_domain(uint32_t pd_id, uint32_t pd_state) 231 { 232 uint32_t state; 233 234 if (pmu_power_domain_st(pd_id) == pd_state) 235 goto out; 236 237 if (pd_state == pmu_pd_on) 238 pmu_power_domain_ctr(pd_id, pd_state); 239 240 state = (pd_state == pmu_pd_off) ? BUS_IDLE : BUS_ACTIVE; 241 242 switch (pd_id) { 243 case PD_GPU: 244 pmu_bus_idle_req(BUS_ID_GPU, state); 245 break; 246 case PD_VIO: 247 pmu_bus_idle_req(BUS_ID_VIO, state); 248 break; 249 case PD_ISP0: 250 pmu_bus_idle_req(BUS_ID_ISP0, state); 251 break; 252 case PD_ISP1: 253 pmu_bus_idle_req(BUS_ID_ISP1, state); 254 break; 255 case PD_VO: 256 pmu_bus_idle_req(BUS_ID_VOPB, state); 257 pmu_bus_idle_req(BUS_ID_VOPL, state); 258 break; 259 case PD_HDCP: 260 pmu_bus_idle_req(BUS_ID_HDCP, state); 261 break; 262 case PD_TCPD0: 263 break; 264 case PD_TCPD1: 265 break; 266 case PD_GMAC: 267 pmu_bus_idle_req(BUS_ID_GMAC, state); 268 break; 269 case PD_CCI: 270 pmu_bus_idle_req(BUS_ID_CCIM0, state); 271 pmu_bus_idle_req(BUS_ID_CCIM1, state); 272 break; 273 case PD_SD: 274 pmu_bus_idle_req(BUS_ID_SD, state); 275 break; 276 case PD_EMMC: 277 pmu_bus_idle_req(BUS_ID_EMMC, state); 278 break; 279 case PD_EDP: 280 pmu_bus_idle_req(BUS_ID_EDP, state); 281 break; 282 case PD_SDIOAUDIO: 283 pmu_bus_idle_req(BUS_ID_SDIOAUDIO, state); 284 break; 285 case PD_GIC: 286 pmu_bus_idle_req(BUS_ID_GIC, state); 287 break; 288 case PD_RGA: 289 pmu_bus_idle_req(BUS_ID_RGA, state); 290 break; 291 case PD_VCODEC: 292 pmu_bus_idle_req(BUS_ID_VCODEC, state); 293 break; 294 case PD_VDU: 295 pmu_bus_idle_req(BUS_ID_VDU, state); 296 break; 297 case PD_IEP: 298 pmu_bus_idle_req(BUS_ID_IEP, state); 299 break; 300 case PD_USB3: 301 pmu_bus_idle_req(BUS_ID_USB3, state); 302 break; 303 case PD_PERIHP: 304 pmu_bus_idle_req(BUS_ID_PERIHP, state); 305 break; 306 default: 307 break; 308 } 309 310 if (pd_state == pmu_pd_off) 311 pmu_power_domain_ctr(pd_id, pd_state); 312 313 out: 314 return 0; 315 } 316 317 static uint32_t pmu_powerdomain_state; 318 319 static void pmu_power_domains_suspend(void) 320 { 321 clk_gate_con_save(); 322 clk_gate_con_disable(); 323 qos_save(); 324 pmu_powerdomain_state = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 325 pmu_set_power_domain(PD_GPU, pmu_pd_off); 326 pmu_set_power_domain(PD_TCPD0, pmu_pd_off); 327 pmu_set_power_domain(PD_TCPD1, pmu_pd_off); 328 pmu_set_power_domain(PD_VO, pmu_pd_off); 329 pmu_set_power_domain(PD_ISP0, pmu_pd_off); 330 pmu_set_power_domain(PD_ISP1, pmu_pd_off); 331 pmu_set_power_domain(PD_HDCP, pmu_pd_off); 332 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_off); 333 pmu_set_power_domain(PD_GMAC, pmu_pd_off); 334 pmu_set_power_domain(PD_EDP, pmu_pd_off); 335 pmu_set_power_domain(PD_IEP, pmu_pd_off); 336 pmu_set_power_domain(PD_RGA, pmu_pd_off); 337 pmu_set_power_domain(PD_VCODEC, pmu_pd_off); 338 pmu_set_power_domain(PD_VDU, pmu_pd_off); 339 pmu_set_power_domain(PD_USB3, pmu_pd_off); 340 pmu_set_power_domain(PD_EMMC, pmu_pd_off); 341 pmu_set_power_domain(PD_VIO, pmu_pd_off); 342 pmu_set_power_domain(PD_SD, pmu_pd_off); 343 pmu_set_power_domain(PD_PERIHP, pmu_pd_off); 344 clk_gate_con_restore(); 345 } 346 347 static void pmu_power_domains_resume(void) 348 { 349 clk_gate_con_save(); 350 clk_gate_con_disable(); 351 if (!(pmu_powerdomain_state & BIT(PD_VDU))) 352 pmu_set_power_domain(PD_VDU, pmu_pd_on); 353 if (!(pmu_powerdomain_state & BIT(PD_VCODEC))) 354 pmu_set_power_domain(PD_VCODEC, pmu_pd_on); 355 if (!(pmu_powerdomain_state & BIT(PD_RGA))) 356 pmu_set_power_domain(PD_RGA, pmu_pd_on); 357 if (!(pmu_powerdomain_state & BIT(PD_IEP))) 358 pmu_set_power_domain(PD_IEP, pmu_pd_on); 359 if (!(pmu_powerdomain_state & BIT(PD_EDP))) 360 pmu_set_power_domain(PD_EDP, pmu_pd_on); 361 if (!(pmu_powerdomain_state & BIT(PD_GMAC))) 362 pmu_set_power_domain(PD_GMAC, pmu_pd_on); 363 if (!(pmu_powerdomain_state & BIT(PD_SDIOAUDIO))) 364 pmu_set_power_domain(PD_SDIOAUDIO, pmu_pd_on); 365 if (!(pmu_powerdomain_state & BIT(PD_HDCP))) 366 pmu_set_power_domain(PD_HDCP, pmu_pd_on); 367 if (!(pmu_powerdomain_state & BIT(PD_ISP1))) 368 pmu_set_power_domain(PD_ISP1, pmu_pd_on); 369 if (!(pmu_powerdomain_state & BIT(PD_ISP0))) 370 pmu_set_power_domain(PD_ISP0, pmu_pd_on); 371 if (!(pmu_powerdomain_state & BIT(PD_VO))) 372 pmu_set_power_domain(PD_VO, pmu_pd_on); 373 if (!(pmu_powerdomain_state & BIT(PD_TCPD1))) 374 pmu_set_power_domain(PD_TCPD1, pmu_pd_on); 375 if (!(pmu_powerdomain_state & BIT(PD_TCPD0))) 376 pmu_set_power_domain(PD_TCPD0, pmu_pd_on); 377 if (!(pmu_powerdomain_state & BIT(PD_GPU))) 378 pmu_set_power_domain(PD_GPU, pmu_pd_on); 379 if (!(pmu_powerdomain_state & BIT(PD_USB3))) 380 pmu_set_power_domain(PD_USB3, pmu_pd_on); 381 if (!(pmu_powerdomain_state & BIT(PD_EMMC))) 382 pmu_set_power_domain(PD_EMMC, pmu_pd_on); 383 if (!(pmu_powerdomain_state & BIT(PD_VIO))) 384 pmu_set_power_domain(PD_VIO, pmu_pd_on); 385 if (!(pmu_powerdomain_state & BIT(PD_SD))) 386 pmu_set_power_domain(PD_SD, pmu_pd_on); 387 if (!(pmu_powerdomain_state & BIT(PD_PERIHP))) 388 pmu_set_power_domain(PD_PERIHP, pmu_pd_on); 389 qos_restore(); 390 clk_gate_con_restore(); 391 } 392 393 void rk3399_flush_l2_b(void) 394 { 395 uint32_t wait_cnt = 0; 396 397 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 398 dsb(); 399 400 /* 401 * The Big cluster flush L2 cache took ~4ms by default, give 10ms for 402 * the enough margin. 403 */ 404 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 405 BIT(L2_FLUSHDONE_CLUSTER_B))) { 406 wait_cnt++; 407 udelay(10); 408 if (wait_cnt == 10000 / 10) 409 WARN("L2 cache flush on suspend took longer than 10ms\n"); 410 } 411 412 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 413 } 414 415 static void pmu_scu_b_pwrdn(void) 416 { 417 uint32_t wait_cnt = 0; 418 419 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 420 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 421 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 422 ERROR("%s: not all cpus is off\n", __func__); 423 return; 424 } 425 426 rk3399_flush_l2_b(); 427 428 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 429 430 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 431 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 432 wait_cnt++; 433 if (wait_cnt >= MAX_WAIT_COUNT) 434 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 435 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 436 } 437 } 438 439 static void pmu_scu_b_pwrup(void) 440 { 441 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 442 } 443 444 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 445 { 446 assert(cpu_id < PLATFORM_CORE_COUNT); 447 return core_pm_cfg_info[cpu_id]; 448 } 449 450 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 451 { 452 assert(cpu_id < PLATFORM_CORE_COUNT); 453 core_pm_cfg_info[cpu_id] = value; 454 #if !USE_COHERENT_MEM 455 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 456 sizeof(uint32_t)); 457 #endif 458 } 459 460 static int cpus_power_domain_on(uint32_t cpu_id) 461 { 462 uint32_t cfg_info; 463 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 464 /* 465 * There are two ways to powering on or off on core. 466 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 467 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 468 * then, if the core enter into wfi, it power domain will be 469 * powered off automatically. 470 */ 471 472 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 473 474 if (cfg_info == core_pwr_pd) { 475 /* disable core_pm cfg */ 476 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 477 CORES_PM_DISABLE); 478 /* if the cores have be on, power off it firstly */ 479 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 480 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 481 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 482 } 483 484 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 485 } else { 486 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 487 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 488 return -EINVAL; 489 } 490 491 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 492 BIT(core_pm_sft_wakeup_en)); 493 dsb(); 494 } 495 496 return 0; 497 } 498 499 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 500 { 501 uint32_t cpu_pd; 502 uint32_t core_pm_value; 503 504 cpu_pd = PD_CPUL0 + cpu_id; 505 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 506 return 0; 507 508 if (pd_cfg == core_pwr_pd) { 509 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 510 return -EINVAL; 511 512 /* disable core_pm cfg */ 513 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 514 CORES_PM_DISABLE); 515 516 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 517 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 518 } else { 519 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 520 521 core_pm_value = BIT(core_pm_en); 522 if (pd_cfg == core_pwr_wfi_int) 523 core_pm_value |= BIT(core_pm_int_wakeup_en); 524 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 525 core_pm_value); 526 dsb(); 527 } 528 529 return 0; 530 } 531 532 static inline void clst_pwr_domain_suspend(plat_local_state_t lvl_state) 533 { 534 uint32_t cpu_id = plat_my_core_pos(); 535 uint32_t pll_id, clst_st_msk, clst_st_chk_msk, pmu_st; 536 537 assert(cpu_id < PLATFORM_CORE_COUNT); 538 539 if (lvl_state == PLAT_MAX_OFF_STATE) { 540 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) { 541 pll_id = ALPLL_ID; 542 clst_st_msk = CLST_L_CPUS_MSK; 543 } else { 544 pll_id = ABPLL_ID; 545 clst_st_msk = CLST_B_CPUS_MSK << 546 PLATFORM_CLUSTER0_CORE_COUNT; 547 } 548 549 clst_st_chk_msk = clst_st_msk & ~(BIT(cpu_id)); 550 551 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 552 553 pmu_st &= clst_st_msk; 554 555 if (pmu_st == clst_st_chk_msk) { 556 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 557 PLL_SLOW_MODE); 558 559 clst_warmboot_data[pll_id] = PMU_CLST_RET; 560 561 pmu_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST); 562 pmu_st &= clst_st_msk; 563 if (pmu_st == clst_st_chk_msk) 564 return; 565 /* 566 * it is mean that others cpu is up again, 567 * we must resume the cfg at once. 568 */ 569 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), 570 PLL_NOMAL_MODE); 571 clst_warmboot_data[pll_id] = 0; 572 } 573 } 574 } 575 576 static int clst_pwr_domain_resume(plat_local_state_t lvl_state) 577 { 578 uint32_t cpu_id = plat_my_core_pos(); 579 uint32_t pll_id, pll_st; 580 581 assert(cpu_id < PLATFORM_CORE_COUNT); 582 583 if (lvl_state == PLAT_MAX_OFF_STATE) { 584 if (cpu_id < PLATFORM_CLUSTER0_CORE_COUNT) 585 pll_id = ALPLL_ID; 586 else 587 pll_id = ABPLL_ID; 588 589 pll_st = mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 3)) >> 590 PLL_MODE_SHIFT; 591 592 if (pll_st != NORMAL_MODE) { 593 WARN("%s: clst (%d) is in error mode (%d)\n", 594 __func__, pll_id, pll_st); 595 return -1; 596 } 597 } 598 599 return 0; 600 } 601 602 static void nonboot_cpus_off(void) 603 { 604 uint32_t boot_cpu, cpu; 605 606 boot_cpu = plat_my_core_pos(); 607 608 /* turn off noboot cpus */ 609 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 610 if (cpu == boot_cpu) 611 continue; 612 cpus_power_domain_off(cpu, core_pwr_pd); 613 } 614 } 615 616 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 617 { 618 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 619 620 assert(cpu_id < PLATFORM_CORE_COUNT); 621 assert(cpuson_flags[cpu_id] == 0); 622 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 623 cpuson_entry_point[cpu_id] = entrypoint; 624 dsb(); 625 626 cpus_power_domain_on(cpu_id); 627 628 return PSCI_E_SUCCESS; 629 } 630 631 int rockchip_soc_cores_pwr_dm_off(void) 632 { 633 uint32_t cpu_id = plat_my_core_pos(); 634 635 cpus_power_domain_off(cpu_id, core_pwr_wfi); 636 637 return PSCI_E_SUCCESS; 638 } 639 640 int rockchip_soc_hlvl_pwr_dm_off(uint32_t lvl, 641 plat_local_state_t lvl_state) 642 { 643 switch (lvl) { 644 case MPIDR_AFFLVL1: 645 clst_pwr_domain_suspend(lvl_state); 646 break; 647 default: 648 break; 649 } 650 651 return PSCI_E_SUCCESS; 652 } 653 654 int rockchip_soc_cores_pwr_dm_suspend(void) 655 { 656 uint32_t cpu_id = plat_my_core_pos(); 657 658 assert(cpu_id < PLATFORM_CORE_COUNT); 659 assert(cpuson_flags[cpu_id] == 0); 660 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 661 cpuson_entry_point[cpu_id] = plat_get_sec_entrypoint(); 662 dsb(); 663 664 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 665 666 return PSCI_E_SUCCESS; 667 } 668 669 int rockchip_soc_hlvl_pwr_dm_suspend(uint32_t lvl, plat_local_state_t lvl_state) 670 { 671 switch (lvl) { 672 case MPIDR_AFFLVL1: 673 clst_pwr_domain_suspend(lvl_state); 674 break; 675 default: 676 break; 677 } 678 679 return PSCI_E_SUCCESS; 680 } 681 682 int rockchip_soc_cores_pwr_dm_on_finish(void) 683 { 684 uint32_t cpu_id = plat_my_core_pos(); 685 686 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 687 CORES_PM_DISABLE); 688 return PSCI_E_SUCCESS; 689 } 690 691 int rockchip_soc_hlvl_pwr_dm_on_finish(uint32_t lvl, 692 plat_local_state_t lvl_state) 693 { 694 switch (lvl) { 695 case MPIDR_AFFLVL1: 696 clst_pwr_domain_resume(lvl_state); 697 break; 698 default: 699 break; 700 } 701 702 return PSCI_E_SUCCESS; 703 } 704 705 int rockchip_soc_cores_pwr_dm_resume(void) 706 { 707 uint32_t cpu_id = plat_my_core_pos(); 708 709 /* Disable core_pm */ 710 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 711 712 return PSCI_E_SUCCESS; 713 } 714 715 int rockchip_soc_hlvl_pwr_dm_resume(uint32_t lvl, plat_local_state_t lvl_state) 716 { 717 switch (lvl) { 718 case MPIDR_AFFLVL1: 719 clst_pwr_domain_resume(lvl_state); 720 default: 721 break; 722 } 723 724 return PSCI_E_SUCCESS; 725 } 726 727 /** 728 * init_pmu_counts - Init timing counts in the PMU register area 729 * 730 * At various points when we power up or down parts of the system we need 731 * a delay to wait for power / clocks to become stable. The PMU has counters 732 * to help software do the delay properly. Basically, it works like this: 733 * - Software sets up counter values 734 * - When software turns on something in the PMU, the counter kicks off 735 * - The hardware sets a bit automatically when the counter has finished and 736 * software knows that the initialization is done. 737 * 738 * It's software's job to setup these counters. The hardware power on default 739 * for these settings is conservative, setting everything to 0x5dc0 740 * (750 ms in 32 kHz counts or 1 ms in 24 MHz counts). 741 * 742 * Note that some of these counters are only really used at suspend/resume 743 * time (for instance, that's the only time we turn off/on the oscillator) and 744 * others are used during normal runtime (like turning on/off a CPU or GPU) but 745 * it doesn't hurt to init everything at boot. 746 * 747 * Also note that these counters can run off the 32 kHz clock or the 24 MHz 748 * clock. While the 24 MHz clock can give us more precision, it's not always 749 * available (like when we turn the oscillator off at sleep time). The 750 * pmu_use_lf (lf: low freq) is available in power mode. Current understanding 751 * is that counts work like this: 752 * IF (pmu_use_lf == 0) || (power_mode_en == 0) 753 * use the 24M OSC for counts 754 * ELSE 755 * use the 32K OSC for counts 756 * 757 * Notes: 758 * - There is a separate bit for the PMU called PMU_24M_EN_CFG. At the moment 759 * we always keep that 0. This apparently choose between using the PLL as 760 * the source for the PMU vs. the 24M clock. If we ever set it to 1 we 761 * should consider how it affects these counts (if at all). 762 * - The power_mode_en is documented to auto-clear automatically when we leave 763 * "power mode". That's why most clocks are on 24M. Only timings used when 764 * in "power mode" are 32k. 765 * - In some cases the kernel may override these counts. 766 * 767 * The PMU_STABLE_CNT / PMU_OSC_CNT / PMU_PLLLOCK_CNT are important CNTs 768 * in power mode, we need to ensure that they are available. 769 */ 770 static void init_pmu_counts(void) 771 { 772 /* COUNTS FOR INSIDE POWER MODE */ 773 774 /* 775 * From limited testing, need PMU stable >= 2ms, but go overkill 776 * and choose 30 ms to match testing on past SoCs. Also let 777 * OSC have 30 ms for stabilization. 778 */ 779 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_32K_CNT_MS(30)); 780 mmio_write_32(PMU_BASE + PMU_OSC_CNT, CYCL_32K_CNT_MS(30)); 781 782 /* Unclear what these should be; try 3 ms */ 783 mmio_write_32(PMU_BASE + PMU_WAKEUP_RST_CLR_CNT, CYCL_32K_CNT_MS(3)); 784 785 /* Unclear what this should be, but set the default explicitly */ 786 mmio_write_32(PMU_BASE + PMU_TIMEOUT_CNT, 0x5dc0); 787 788 /* COUNTS FOR OUTSIDE POWER MODE */ 789 790 /* Put something sorta conservative here until we know better */ 791 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(3)); 792 mmio_write_32(PMU_BASE + PMU_DDRIO_PWRON_CNT, CYCL_24M_CNT_MS(1)); 793 mmio_write_32(PMU_BASE + PMU_CENTER_PWRDN_CNT, CYCL_24M_CNT_MS(1)); 794 mmio_write_32(PMU_BASE + PMU_CENTER_PWRUP_CNT, CYCL_24M_CNT_MS(1)); 795 796 /* 797 * when we enable PMU_CLR_PERILP, it will shut down the SRAM, but 798 * M0 code run in SRAM, and we need it to check whether cpu enter 799 * FSM status, so we must wait M0 finish their code and enter WFI, 800 * then we can shutdown SRAM, according FSM order: 801 * ST_NORMAL->..->ST_SCU_L_PWRDN->..->ST_CENTER_PWRDN->ST_PERILP_PWRDN 802 * we can add delay when shutdown ST_SCU_L_PWRDN to guarantee M0 get 803 * the FSM status and enter WFI, then enable PMU_CLR_PERILP. 804 */ 805 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(5)); 806 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_US(1)); 807 808 /* 809 * Set CPU/GPU to 1 us. 810 * 811 * NOTE: Even though ATF doesn't configure the GPU we'll still setup 812 * counts here. After all ATF controls all these other bits and also 813 * chooses which clock these counters use. 814 */ 815 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_US(1)); 816 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_US(1)); 817 mmio_write_32(PMU_BASE + PMU_GPU_PWRDN_CNT, CYCL_24M_CNT_US(1)); 818 mmio_write_32(PMU_BASE + PMU_GPU_PWRUP_CNT, CYCL_24M_CNT_US(1)); 819 } 820 821 static uint32_t clk_ddrc_save; 822 823 static void sys_slp_config(void) 824 { 825 uint32_t slp_mode_cfg = 0; 826 827 /* keep enabling clk_ddrc_bpll_src_en gate for DDRC */ 828 clk_ddrc_save = mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(3)); 829 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), WMSK_BIT(1)); 830 831 prepare_abpll_for_ddrctrl(); 832 sram_func_set_ddrctl_pll(ABPLL_ID); 833 834 mmio_write_32(GRF_BASE + GRF_SOC_CON4, CCI_FORCE_WAKEUP); 835 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 836 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 837 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 838 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 839 840 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 841 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 842 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 843 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 844 845 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 846 BIT(PMU_INPUT_CLAMP_EN) | 847 BIT(PMU_POWER_OFF_REQ_CFG) | 848 BIT(PMU_CPU0_PD_EN) | 849 BIT(PMU_L2_FLUSH_EN) | 850 BIT(PMU_L2_IDLE_EN) | 851 BIT(PMU_SCU_PD_EN) | 852 BIT(PMU_CCI_PD_EN) | 853 BIT(PMU_CLK_CORE_SRC_GATE_EN) | 854 BIT(PMU_ALIVE_USE_LF) | 855 BIT(PMU_SREF0_ENTER_EN) | 856 BIT(PMU_SREF1_ENTER_EN) | 857 BIT(PMU_DDRC0_GATING_EN) | 858 BIT(PMU_DDRC1_GATING_EN) | 859 BIT(PMU_DDRIO0_RET_EN) | 860 BIT(PMU_DDRIO0_RET_DE_REQ) | 861 BIT(PMU_DDRIO1_RET_EN) | 862 BIT(PMU_DDRIO1_RET_DE_REQ) | 863 BIT(PMU_DDRIO_RET_HW_DE_REQ) | 864 BIT(PMU_CENTER_PD_EN) | 865 BIT(PMU_PERILP_PD_EN) | 866 BIT(PMU_CLK_PERILP_SRC_GATE_EN) | 867 BIT(PMU_PLL_PD_EN) | 868 BIT(PMU_CLK_CENTER_SRC_GATE_EN) | 869 BIT(PMU_OSC_DIS) | 870 BIT(PMU_PMU_USE_LF); 871 872 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, BIT(PMU_GPIO_WKUP_EN)); 873 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 874 875 mmio_write_32(PMU_BASE + PMU_PLL_CON, PLL_PD_HW); 876 mmio_write_32(PMUGRF_BASE + PMUGRF_SOC_CON0, EXTERNAL_32K); 877 mmio_write_32(PMUGRF_BASE, IOMUX_CLK_32K); /* 32k iomux */ 878 } 879 880 static void set_hw_idle(uint32_t hw_idle) 881 { 882 mmio_setbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 883 } 884 885 static void clr_hw_idle(uint32_t hw_idle) 886 { 887 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, hw_idle); 888 } 889 890 static uint32_t iomux_status[12]; 891 static uint32_t pull_mode_status[12]; 892 static uint32_t gpio_direction[3]; 893 static uint32_t gpio_2_4_clk_gate; 894 895 static void suspend_apio(void) 896 { 897 struct apio_info *suspend_apio; 898 int i; 899 900 suspend_apio = plat_get_rockchip_suspend_apio(); 901 902 if (!suspend_apio) 903 return; 904 905 /* save gpio2 ~ gpio4 iomux and pull mode */ 906 for (i = 0; i < 12; i++) { 907 iomux_status[i] = mmio_read_32(GRF_BASE + 908 GRF_GPIO2A_IOMUX + i * 4); 909 pull_mode_status[i] = mmio_read_32(GRF_BASE + 910 GRF_GPIO2A_P + i * 4); 911 } 912 913 /* store gpio2 ~ gpio4 clock gate state */ 914 gpio_2_4_clk_gate = (mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(31)) >> 915 PCLK_GPIO2_GATE_SHIFT) & 0x07; 916 917 /* enable gpio2 ~ gpio4 clock gate */ 918 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 919 BITS_WITH_WMASK(0, 0x07, PCLK_GPIO2_GATE_SHIFT)); 920 921 /* save gpio2 ~ gpio4 direction */ 922 gpio_direction[0] = mmio_read_32(GPIO2_BASE + 0x04); 923 gpio_direction[1] = mmio_read_32(GPIO3_BASE + 0x04); 924 gpio_direction[2] = mmio_read_32(GPIO4_BASE + 0x04); 925 926 /* apio1 charge gpio3a0 ~ gpio3c7 */ 927 if (suspend_apio->apio1) { 928 929 /* set gpio3a0 ~ gpio3c7 iomux to gpio */ 930 mmio_write_32(GRF_BASE + GRF_GPIO3A_IOMUX, 931 REG_SOC_WMSK | GRF_IOMUX_GPIO); 932 mmio_write_32(GRF_BASE + GRF_GPIO3B_IOMUX, 933 REG_SOC_WMSK | GRF_IOMUX_GPIO); 934 mmio_write_32(GRF_BASE + GRF_GPIO3C_IOMUX, 935 REG_SOC_WMSK | GRF_IOMUX_GPIO); 936 937 /* set gpio3a0 ~ gpio3c7 pull mode to pull none */ 938 mmio_write_32(GRF_BASE + GRF_GPIO3A_P, REG_SOC_WMSK | 0); 939 mmio_write_32(GRF_BASE + GRF_GPIO3B_P, REG_SOC_WMSK | 0); 940 mmio_write_32(GRF_BASE + GRF_GPIO3C_P, REG_SOC_WMSK | 0); 941 942 /* set gpio3a0 ~ gpio3c7 to input */ 943 mmio_clrbits_32(GPIO3_BASE + 0x04, 0x00ffffff); 944 } 945 946 /* apio2 charge gpio2a0 ~ gpio2b4 */ 947 if (suspend_apio->apio2) { 948 949 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 950 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX, 951 REG_SOC_WMSK | GRF_IOMUX_GPIO); 952 mmio_write_32(GRF_BASE + GRF_GPIO2B_IOMUX, 953 REG_SOC_WMSK | GRF_IOMUX_GPIO); 954 955 /* set gpio2a0 ~ gpio2b4 pull mode to pull none */ 956 mmio_write_32(GRF_BASE + GRF_GPIO2A_P, REG_SOC_WMSK | 0); 957 mmio_write_32(GRF_BASE + GRF_GPIO2B_P, REG_SOC_WMSK | 0); 958 959 /* set gpio2a0 ~ gpio2b4 to input */ 960 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x00001fff); 961 } 962 963 /* apio3 charge gpio2c0 ~ gpio2d4*/ 964 if (suspend_apio->apio3) { 965 966 /* set gpio2a0 ~ gpio2b4 iomux to gpio */ 967 mmio_write_32(GRF_BASE + GRF_GPIO2C_IOMUX, 968 REG_SOC_WMSK | GRF_IOMUX_GPIO); 969 mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, 970 REG_SOC_WMSK | GRF_IOMUX_GPIO); 971 972 /* set gpio2c0 ~ gpio2d4 pull mode to pull none */ 973 mmio_write_32(GRF_BASE + GRF_GPIO2C_P, REG_SOC_WMSK | 0); 974 mmio_write_32(GRF_BASE + GRF_GPIO2D_P, REG_SOC_WMSK | 0); 975 976 /* set gpio2c0 ~ gpio2d4 to input */ 977 mmio_clrbits_32(GPIO2_BASE + 0x04, 0x1fff0000); 978 } 979 980 /* apio4 charge gpio4c0 ~ gpio4c7, gpio4d0 ~ gpio4d6 */ 981 if (suspend_apio->apio4) { 982 983 /* set gpio4c0 ~ gpio4d6 iomux to gpio */ 984 mmio_write_32(GRF_BASE + GRF_GPIO4C_IOMUX, 985 REG_SOC_WMSK | GRF_IOMUX_GPIO); 986 mmio_write_32(GRF_BASE + GRF_GPIO4D_IOMUX, 987 REG_SOC_WMSK | GRF_IOMUX_GPIO); 988 989 /* set gpio4c0 ~ gpio4d6 pull mode to pull none */ 990 mmio_write_32(GRF_BASE + GRF_GPIO4C_P, REG_SOC_WMSK | 0); 991 mmio_write_32(GRF_BASE + GRF_GPIO4D_P, REG_SOC_WMSK | 0); 992 993 /* set gpio4c0 ~ gpio4d6 to input */ 994 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x7fff0000); 995 } 996 997 /* apio5 charge gpio3d0 ~ gpio3d7, gpio4a0 ~ gpio4a7*/ 998 if (suspend_apio->apio5) { 999 /* set gpio3d0 ~ gpio4a7 iomux to gpio */ 1000 mmio_write_32(GRF_BASE + GRF_GPIO3D_IOMUX, 1001 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1002 mmio_write_32(GRF_BASE + GRF_GPIO4A_IOMUX, 1003 REG_SOC_WMSK | GRF_IOMUX_GPIO); 1004 1005 /* set gpio3d0 ~ gpio4a7 pull mode to pull none */ 1006 mmio_write_32(GRF_BASE + GRF_GPIO3D_P, REG_SOC_WMSK | 0); 1007 mmio_write_32(GRF_BASE + GRF_GPIO4A_P, REG_SOC_WMSK | 0); 1008 1009 /* set gpio4c0 ~ gpio4d6 to input */ 1010 mmio_clrbits_32(GPIO3_BASE + 0x04, 0xff000000); 1011 mmio_clrbits_32(GPIO4_BASE + 0x04, 0x000000ff); 1012 } 1013 } 1014 1015 static void resume_apio(void) 1016 { 1017 struct apio_info *suspend_apio; 1018 int i; 1019 1020 suspend_apio = plat_get_rockchip_suspend_apio(); 1021 1022 if (!suspend_apio) 1023 return; 1024 1025 for (i = 0; i < 12; i++) { 1026 mmio_write_32(GRF_BASE + GRF_GPIO2A_P + i * 4, 1027 REG_SOC_WMSK | pull_mode_status[i]); 1028 mmio_write_32(GRF_BASE + GRF_GPIO2A_IOMUX + i * 4, 1029 REG_SOC_WMSK | iomux_status[i]); 1030 } 1031 1032 /* set gpio2 ~ gpio4 direction back to store value */ 1033 mmio_write_32(GPIO2_BASE + 0x04, gpio_direction[0]); 1034 mmio_write_32(GPIO3_BASE + 0x04, gpio_direction[1]); 1035 mmio_write_32(GPIO4_BASE + 0x04, gpio_direction[2]); 1036 1037 /* set gpio2 ~ gpio4 clock gate back to store value */ 1038 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(31), 1039 BITS_WITH_WMASK(gpio_2_4_clk_gate, 0x07, 1040 PCLK_GPIO2_GATE_SHIFT)); 1041 } 1042 1043 static void suspend_gpio(void) 1044 { 1045 struct gpio_info *suspend_gpio; 1046 uint32_t count; 1047 int i; 1048 1049 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1050 1051 for (i = 0; i < count; i++) { 1052 gpio_set_value(suspend_gpio[i].index, suspend_gpio[i].polarity); 1053 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1054 udelay(1); 1055 } 1056 } 1057 1058 static void resume_gpio(void) 1059 { 1060 struct gpio_info *suspend_gpio; 1061 uint32_t count; 1062 int i; 1063 1064 suspend_gpio = plat_get_rockchip_suspend_gpio(&count); 1065 1066 for (i = count - 1; i >= 0; i--) { 1067 gpio_set_value(suspend_gpio[i].index, 1068 !suspend_gpio[i].polarity); 1069 gpio_set_direction(suspend_gpio[i].index, GPIO_DIR_OUT); 1070 udelay(1); 1071 } 1072 } 1073 1074 static void m0_configure_suspend(void) 1075 { 1076 /* set PARAM to M0_FUNC_SUSPEND */ 1077 mmio_write_32(M0_PARAM_ADDR + PARAM_M0_FUNC, M0_FUNC_SUSPEND); 1078 } 1079 1080 void sram_save(void) 1081 { 1082 size_t text_size = (char *)&__bl31_sram_text_real_end - 1083 (char *)&__bl31_sram_text_start; 1084 size_t data_size = (char *)&__bl31_sram_data_real_end - 1085 (char *)&__bl31_sram_data_start; 1086 size_t incbin_size = (char *)&__sram_incbin_real_end - 1087 (char *)&__sram_incbin_start; 1088 1089 memcpy(&store_sram[0], &__bl31_sram_text_start, text_size); 1090 memcpy(&store_sram[text_size], &__bl31_sram_data_start, data_size); 1091 memcpy(&store_sram[text_size + data_size], &__sram_incbin_start, 1092 incbin_size); 1093 } 1094 1095 void sram_restore(void) 1096 { 1097 size_t text_size = (char *)&__bl31_sram_text_real_end - 1098 (char *)&__bl31_sram_text_start; 1099 size_t data_size = (char *)&__bl31_sram_data_real_end - 1100 (char *)&__bl31_sram_data_start; 1101 size_t incbin_size = (char *)&__sram_incbin_real_end - 1102 (char *)&__sram_incbin_start; 1103 1104 memcpy(&__bl31_sram_text_start, &store_sram[0], text_size); 1105 memcpy(&__bl31_sram_data_start, &store_sram[text_size], data_size); 1106 memcpy(&__sram_incbin_start, &store_sram[text_size + data_size], 1107 incbin_size); 1108 } 1109 1110 struct uart_debug { 1111 uint32_t uart_dll; 1112 uint32_t uart_dlh; 1113 uint32_t uart_ier; 1114 uint32_t uart_fcr; 1115 uint32_t uart_mcr; 1116 uint32_t uart_lcr; 1117 }; 1118 1119 #define UART_DLL 0x00 1120 #define UART_DLH 0x04 1121 #define UART_IER 0x04 1122 #define UART_FCR 0x08 1123 #define UART_LCR 0x0c 1124 #define UART_MCR 0x10 1125 #define UARTSRR 0x88 1126 1127 #define UART_RESET BIT(0) 1128 #define UARTFCR_FIFOEN BIT(0) 1129 #define RCVR_FIFO_RESET BIT(1) 1130 #define XMIT_FIFO_RESET BIT(2) 1131 #define DIAGNOSTIC_MODE BIT(4) 1132 #define UARTLCR_DLAB BIT(7) 1133 1134 static struct uart_debug uart_save; 1135 1136 void suspend_uart(void) 1137 { 1138 uart_save.uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1139 uart_save.uart_ier = mmio_read_32(PLAT_RK_UART_BASE + UART_IER); 1140 uart_save.uart_mcr = mmio_read_32(PLAT_RK_UART_BASE + UART_MCR); 1141 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, 1142 uart_save.uart_lcr | UARTLCR_DLAB); 1143 uart_save.uart_dll = mmio_read_32(PLAT_RK_UART_BASE + UART_DLL); 1144 uart_save.uart_dlh = mmio_read_32(PLAT_RK_UART_BASE + UART_DLH); 1145 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1146 } 1147 1148 void resume_uart(void) 1149 { 1150 uint32_t uart_lcr; 1151 1152 mmio_write_32(PLAT_RK_UART_BASE + UARTSRR, 1153 XMIT_FIFO_RESET | RCVR_FIFO_RESET | UART_RESET); 1154 1155 uart_lcr = mmio_read_32(PLAT_RK_UART_BASE + UART_LCR); 1156 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, DIAGNOSTIC_MODE); 1157 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_lcr | UARTLCR_DLAB); 1158 mmio_write_32(PLAT_RK_UART_BASE + UART_DLL, uart_save.uart_dll); 1159 mmio_write_32(PLAT_RK_UART_BASE + UART_DLH, uart_save.uart_dlh); 1160 mmio_write_32(PLAT_RK_UART_BASE + UART_LCR, uart_save.uart_lcr); 1161 mmio_write_32(PLAT_RK_UART_BASE + UART_IER, uart_save.uart_ier); 1162 mmio_write_32(PLAT_RK_UART_BASE + UART_FCR, UARTFCR_FIFOEN); 1163 mmio_write_32(PLAT_RK_UART_BASE + UART_MCR, uart_save.uart_mcr); 1164 } 1165 1166 void save_usbphy(void) 1167 { 1168 store_usbphy0[0] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL0); 1169 store_usbphy0[1] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL2); 1170 store_usbphy0[2] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL3); 1171 store_usbphy0[3] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL12); 1172 store_usbphy0[4] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL13); 1173 store_usbphy0[5] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL15); 1174 store_usbphy0[6] = mmio_read_32(GRF_BASE + GRF_USBPHY0_CTRL16); 1175 1176 store_usbphy1[0] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL0); 1177 store_usbphy1[1] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL2); 1178 store_usbphy1[2] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL3); 1179 store_usbphy1[3] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL12); 1180 store_usbphy1[4] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL13); 1181 store_usbphy1[5] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL15); 1182 store_usbphy1[6] = mmio_read_32(GRF_BASE + GRF_USBPHY1_CTRL16); 1183 } 1184 1185 void restore_usbphy(void) 1186 { 1187 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL0, 1188 REG_SOC_WMSK | store_usbphy0[0]); 1189 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL2, 1190 REG_SOC_WMSK | store_usbphy0[1]); 1191 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL3, 1192 REG_SOC_WMSK | store_usbphy0[2]); 1193 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL12, 1194 REG_SOC_WMSK | store_usbphy0[3]); 1195 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL13, 1196 REG_SOC_WMSK | store_usbphy0[4]); 1197 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL15, 1198 REG_SOC_WMSK | store_usbphy0[5]); 1199 mmio_write_32(GRF_BASE + GRF_USBPHY0_CTRL16, 1200 REG_SOC_WMSK | store_usbphy0[6]); 1201 1202 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL0, 1203 REG_SOC_WMSK | store_usbphy1[0]); 1204 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL2, 1205 REG_SOC_WMSK | store_usbphy1[1]); 1206 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL3, 1207 REG_SOC_WMSK | store_usbphy1[2]); 1208 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL12, 1209 REG_SOC_WMSK | store_usbphy1[3]); 1210 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL13, 1211 REG_SOC_WMSK | store_usbphy1[4]); 1212 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL15, 1213 REG_SOC_WMSK | store_usbphy1[5]); 1214 mmio_write_32(GRF_BASE + GRF_USBPHY1_CTRL16, 1215 REG_SOC_WMSK | store_usbphy1[6]); 1216 } 1217 1218 void grf_register_save(void) 1219 { 1220 int i; 1221 1222 store_grf_soc_con0 = mmio_read_32(GRF_BASE + GRF_SOC_CON(0)); 1223 store_grf_soc_con1 = mmio_read_32(GRF_BASE + GRF_SOC_CON(1)); 1224 store_grf_soc_con2 = mmio_read_32(GRF_BASE + GRF_SOC_CON(2)); 1225 store_grf_soc_con3 = mmio_read_32(GRF_BASE + GRF_SOC_CON(3)); 1226 store_grf_soc_con4 = mmio_read_32(GRF_BASE + GRF_SOC_CON(4)); 1227 store_grf_soc_con7 = mmio_read_32(GRF_BASE + GRF_SOC_CON(7)); 1228 1229 for (i = 0; i < 4; i++) 1230 store_grf_ddrc_con[i] = 1231 mmio_read_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4); 1232 1233 store_grf_io_vsel = mmio_read_32(GRF_BASE + GRF_IO_VSEL); 1234 } 1235 1236 void grf_register_restore(void) 1237 { 1238 int i; 1239 1240 mmio_write_32(GRF_BASE + GRF_SOC_CON(0), 1241 REG_SOC_WMSK | store_grf_soc_con0); 1242 mmio_write_32(GRF_BASE + GRF_SOC_CON(1), 1243 REG_SOC_WMSK | store_grf_soc_con1); 1244 mmio_write_32(GRF_BASE + GRF_SOC_CON(2), 1245 REG_SOC_WMSK | store_grf_soc_con2); 1246 mmio_write_32(GRF_BASE + GRF_SOC_CON(3), 1247 REG_SOC_WMSK | store_grf_soc_con3); 1248 mmio_write_32(GRF_BASE + GRF_SOC_CON(4), 1249 REG_SOC_WMSK | store_grf_soc_con4); 1250 mmio_write_32(GRF_BASE + GRF_SOC_CON(7), 1251 REG_SOC_WMSK | store_grf_soc_con7); 1252 1253 for (i = 0; i < 4; i++) 1254 mmio_write_32(GRF_BASE + GRF_DDRC0_CON0 + i * 4, 1255 REG_SOC_WMSK | store_grf_ddrc_con[i]); 1256 1257 mmio_write_32(GRF_BASE + GRF_IO_VSEL, REG_SOC_WMSK | store_grf_io_vsel); 1258 } 1259 1260 void cru_register_save(void) 1261 { 1262 int i; 1263 1264 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) 1265 store_cru[i / 4] = mmio_read_32(CRU_BASE + i); 1266 } 1267 1268 void cru_register_restore(void) 1269 { 1270 int i; 1271 1272 for (i = 0; i <= CRU_SDIO0_CON1; i = i + 4) { 1273 1274 /* 1275 * since DPLL, CRU_CLKSEL_CON6 have been restore in 1276 * dmc_resume, ABPLL will resote later, so skip them 1277 */ 1278 if ((i == CRU_CLKSEL_CON6) || 1279 (i >= CRU_PLL_CON(ABPLL_ID, 0) && 1280 i <= CRU_PLL_CON(DPLL_ID, 5))) 1281 continue; 1282 1283 if ((i == CRU_PLL_CON(ALPLL_ID, 2)) || 1284 (i == CRU_PLL_CON(CPLL_ID, 2)) || 1285 (i == CRU_PLL_CON(GPLL_ID, 2)) || 1286 (i == CRU_PLL_CON(NPLL_ID, 2)) || 1287 (i == CRU_PLL_CON(VPLL_ID, 2))) 1288 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1289 /* 1290 * CRU_GLB_CNT_TH and CRU_CLKSEL_CON97~CRU_CLKSEL_CON107 1291 * not need do high 16bit mask 1292 */ 1293 else if ((i > 0x27c && i < 0x2b0) || (i == 0x508)) 1294 mmio_write_32(CRU_BASE + i, store_cru[i / 4]); 1295 else 1296 mmio_write_32(CRU_BASE + i, 1297 REG_SOC_WMSK | store_cru[i / 4]); 1298 } 1299 } 1300 1301 void wdt_register_save(void) 1302 { 1303 int i; 1304 1305 for (i = 0; i < 2; i++) { 1306 store_wdt0[i] = mmio_read_32(WDT0_BASE + i * 4); 1307 store_wdt1[i] = mmio_read_32(WDT1_BASE + i * 4); 1308 } 1309 } 1310 1311 void wdt_register_restore(void) 1312 { 1313 int i; 1314 1315 for (i = 0; i < 2; i++) { 1316 mmio_write_32(WDT0_BASE + i * 4, store_wdt0[i]); 1317 mmio_write_32(WDT1_BASE + i * 4, store_wdt1[i]); 1318 } 1319 } 1320 1321 int rockchip_soc_sys_pwr_dm_suspend(void) 1322 { 1323 uint32_t wait_cnt = 0; 1324 uint32_t status = 0; 1325 1326 ddr_prepare_for_sys_suspend(); 1327 dmc_suspend(); 1328 pmu_scu_b_pwrdn(); 1329 1330 /* need to save usbphy before shutdown PERIHP PD */ 1331 save_usbphy(); 1332 1333 pmu_power_domains_suspend(); 1334 set_hw_idle(BIT(PMU_CLR_CENTER1) | 1335 BIT(PMU_CLR_ALIVE) | 1336 BIT(PMU_CLR_MSCH0) | 1337 BIT(PMU_CLR_MSCH1) | 1338 BIT(PMU_CLR_CCIM0) | 1339 BIT(PMU_CLR_CCIM1) | 1340 BIT(PMU_CLR_CENTER) | 1341 BIT(PMU_CLR_PERILP) | 1342 BIT(PMU_CLR_PERILPM0) | 1343 BIT(PMU_CLR_GIC)); 1344 set_pmu_rsthold(); 1345 sys_slp_config(); 1346 1347 m0_configure_suspend(); 1348 m0_start(); 1349 1350 pmu_sgrf_rst_hld(); 1351 1352 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1353 ((uintptr_t)&pmu_cpuson_entrypoint >> 1354 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 1355 1356 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1357 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1358 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 1359 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 1360 dsb(); 1361 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1362 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1363 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1364 while ((mmio_read_32(PMU_BASE + 1365 PMU_ADB400_ST) & status) != status) { 1366 wait_cnt++; 1367 if (wait_cnt >= MAX_WAIT_COUNT) { 1368 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1369 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1370 panic(); 1371 } 1372 } 1373 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 1374 1375 secure_watchdog_disable(); 1376 1377 /* 1378 * Disabling PLLs/PWM/DVFS is approaching WFI which is 1379 * the last steps in suspend. 1380 */ 1381 disable_dvfs_plls(); 1382 disable_pwms(); 1383 disable_nodvfs_plls(); 1384 1385 suspend_apio(); 1386 suspend_gpio(); 1387 suspend_uart(); 1388 grf_register_save(); 1389 cru_register_save(); 1390 wdt_register_save(); 1391 sram_save(); 1392 plat_rockchip_save_gpio(); 1393 1394 return 0; 1395 } 1396 1397 int rockchip_soc_sys_pwr_dm_resume(void) 1398 { 1399 uint32_t wait_cnt = 0; 1400 uint32_t status = 0; 1401 1402 plat_rockchip_restore_gpio(); 1403 wdt_register_restore(); 1404 cru_register_restore(); 1405 grf_register_restore(); 1406 resume_uart(); 1407 resume_apio(); 1408 resume_gpio(); 1409 enable_nodvfs_plls(); 1410 enable_pwms(); 1411 /* PWM regulators take time to come up; give 300us to be safe. */ 1412 udelay(300); 1413 enable_dvfs_plls(); 1414 1415 secure_watchdog_enable(); 1416 secure_sgrf_init(); 1417 secure_sgrf_ddr_rgn_init(); 1418 1419 /* restore clk_ddrc_bpll_src_en gate */ 1420 mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(3), 1421 BITS_WITH_WMASK(clk_ddrc_save, 0xff, 0)); 1422 1423 /* 1424 * The wakeup status is not cleared by itself, we need to clear it 1425 * manually. Otherwise we will alway query some interrupt next time. 1426 * 1427 * NOTE: If the kernel needs to query this, we might want to stash it 1428 * somewhere. 1429 */ 1430 mmio_write_32(PMU_BASE + PMU_WAKEUP_STATUS, 0xffffffff); 1431 mmio_write_32(PMU_BASE + PMU_WKUP_CFG4, 0x00); 1432 1433 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1434 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1435 CPU_BOOT_ADDR_WMASK); 1436 1437 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 1438 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 1439 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 1440 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 1441 dsb(); 1442 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 1443 BIT(PMU_SCU_B_PWRDWN_EN)); 1444 1445 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 1446 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 1447 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 1448 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW) | 1449 WMSK_BIT(PMU_CLR_CORE_L_HW) | 1450 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 1451 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 1452 1453 status = BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW_ST) | 1454 BIT(PMU_PWRDWN_REQ_CORE_B_SW_ST) | 1455 BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW_ST); 1456 1457 while ((mmio_read_32(PMU_BASE + 1458 PMU_ADB400_ST) & status)) { 1459 wait_cnt++; 1460 if (wait_cnt >= MAX_WAIT_COUNT) { 1461 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 1462 mmio_read_32(PMU_BASE + PMU_ADB400_ST)); 1463 panic(); 1464 } 1465 } 1466 1467 pmu_sgrf_rst_hld_release(); 1468 pmu_scu_b_pwrup(); 1469 pmu_power_domains_resume(); 1470 1471 restore_abpll(); 1472 restore_pmu_rsthold(); 1473 clr_hw_idle(BIT(PMU_CLR_CENTER1) | 1474 BIT(PMU_CLR_ALIVE) | 1475 BIT(PMU_CLR_MSCH0) | 1476 BIT(PMU_CLR_MSCH1) | 1477 BIT(PMU_CLR_CCIM0) | 1478 BIT(PMU_CLR_CCIM1) | 1479 BIT(PMU_CLR_CENTER) | 1480 BIT(PMU_CLR_PERILP) | 1481 BIT(PMU_CLR_PERILPM0) | 1482 BIT(PMU_CLR_GIC)); 1483 1484 plat_rockchip_gic_cpuif_enable(); 1485 m0_stop(); 1486 1487 restore_usbphy(); 1488 1489 ddr_prepare_for_sys_resume(); 1490 1491 return 0; 1492 } 1493 1494 void __dead2 rockchip_soc_soft_reset(void) 1495 { 1496 struct gpio_info *rst_gpio; 1497 1498 rst_gpio = plat_get_rockchip_gpio_reset(); 1499 1500 if (rst_gpio) { 1501 gpio_set_direction(rst_gpio->index, GPIO_DIR_OUT); 1502 gpio_set_value(rst_gpio->index, rst_gpio->polarity); 1503 } else { 1504 soc_global_soft_reset(); 1505 } 1506 1507 while (1) 1508 ; 1509 } 1510 1511 void __dead2 rockchip_soc_system_off(void) 1512 { 1513 struct gpio_info *poweroff_gpio; 1514 1515 poweroff_gpio = plat_get_rockchip_gpio_poweroff(); 1516 1517 if (poweroff_gpio) { 1518 /* 1519 * if use tsadc over temp pin(GPIO1A6) as shutdown gpio, 1520 * need to set this pin iomux back to gpio function 1521 */ 1522 if (poweroff_gpio->index == TSADC_INT_PIN) { 1523 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 1524 GPIO1A6_IOMUX); 1525 } 1526 gpio_set_direction(poweroff_gpio->index, GPIO_DIR_OUT); 1527 gpio_set_value(poweroff_gpio->index, poweroff_gpio->polarity); 1528 } else { 1529 WARN("Do nothing when system off\n"); 1530 } 1531 1532 while (1) 1533 ; 1534 } 1535 1536 void rockchip_plat_mmu_el3(void) 1537 { 1538 size_t sram_size; 1539 1540 /* sram.text size */ 1541 sram_size = (char *)&__bl31_sram_text_end - 1542 (char *)&__bl31_sram_text_start; 1543 mmap_add_region((unsigned long)&__bl31_sram_text_start, 1544 (unsigned long)&__bl31_sram_text_start, 1545 sram_size, MT_MEMORY | MT_RO | MT_SECURE); 1546 1547 /* sram.data size */ 1548 sram_size = (char *)&__bl31_sram_data_end - 1549 (char *)&__bl31_sram_data_start; 1550 mmap_add_region((unsigned long)&__bl31_sram_data_start, 1551 (unsigned long)&__bl31_sram_data_start, 1552 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1553 1554 sram_size = (char *)&__bl31_sram_stack_end - 1555 (char *)&__bl31_sram_stack_start; 1556 mmap_add_region((unsigned long)&__bl31_sram_stack_start, 1557 (unsigned long)&__bl31_sram_stack_start, 1558 sram_size, MT_MEMORY | MT_RW | MT_SECURE); 1559 1560 sram_size = (char *)&__sram_incbin_end - (char *)&__sram_incbin_start; 1561 mmap_add_region((unsigned long)&__sram_incbin_start, 1562 (unsigned long)&__sram_incbin_start, 1563 sram_size, MT_NON_CACHEABLE | MT_RW | MT_SECURE); 1564 } 1565 1566 void plat_rockchip_pmu_init(void) 1567 { 1568 uint32_t cpu; 1569 1570 rockchip_pd_lock_init(); 1571 1572 /* register requires 32bits mode, switch it to 32 bits */ 1573 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 1574 1575 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 1576 cpuson_flags[cpu] = 0; 1577 1578 for (cpu = 0; cpu < PLATFORM_CLUSTER_COUNT; cpu++) 1579 clst_warmboot_data[cpu] = 0; 1580 1581 /* config cpu's warm boot address */ 1582 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 1583 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 1584 CPU_BOOT_ADDR_WMASK); 1585 mmio_write_32(PMU_BASE + PMU_NOC_AUTO_ENA, NOC_AUTO_ENABLE); 1586 1587 /* 1588 * Enable Schmitt trigger for better 32 kHz input signal, which is 1589 * important for suspend/resume reliability among other things. 1590 */ 1591 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO0A_SMT, GPIO0A0_SMT_ENABLE); 1592 1593 init_pmu_counts(); 1594 1595 nonboot_cpus_off(); 1596 1597 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 1598 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 1599 } 1600