1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <arch_helpers.h> 32 #include <assert.h> 33 #include <bakery_lock.h> 34 #include <debug.h> 35 #include <delay_timer.h> 36 #include <errno.h> 37 #include <mmio.h> 38 #include <platform.h> 39 #include <platform_def.h> 40 #include <plat_private.h> 41 #include <rk3399_def.h> 42 #include <pmu_sram.h> 43 #include <soc.h> 44 #include <pmu.h> 45 #include <pmu_com.h> 46 47 static struct psram_data_t *psram_sleep_cfg = 48 (struct psram_data_t *)PSRAM_DT_BASE; 49 50 static uint32_t cpu_warm_boot_addr; 51 52 /* 53 * There are two ways to powering on or off on core. 54 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg, 55 * it is core_pwr_pd mode 56 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 57 * then, if the core enter into wfi, it power domain will be 58 * powered off automatically. it is core_pwr_wfi or core_pwr_wfi_int mode 59 * so we need core_pm_cfg_info to distinguish which method be used now. 60 */ 61 62 static uint32_t core_pm_cfg_info[PLATFORM_CORE_COUNT] 63 #if USE_COHERENT_MEM 64 __attribute__ ((section("tzfw_coherent_mem"))) 65 #endif 66 ;/* coheront */ 67 68 void rk3399_flash_l2_b(void) 69 { 70 uint32_t wait_cnt = 0; 71 72 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 73 dsb(); 74 75 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 76 BIT(L2_FLUSHDONE_CLUSTER_B))) { 77 wait_cnt++; 78 if (!(wait_cnt % MAX_WAIT_CONUT)) 79 WARN("%s:reg %x,wait\n", __func__, 80 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 81 } 82 83 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); 84 } 85 86 static void pmu_scu_b_pwrdn(void) 87 { 88 uint32_t wait_cnt = 0; 89 90 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 91 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != 92 (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { 93 ERROR("%s: not all cpus is off\n", __func__); 94 return; 95 } 96 97 rk3399_flash_l2_b(); 98 99 mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 100 101 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & 102 BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { 103 wait_cnt++; 104 if (!(wait_cnt % MAX_WAIT_CONUT)) 105 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 106 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 107 } 108 } 109 110 static void pmu_scu_b_pwrup(void) 111 { 112 mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); 113 } 114 115 void plat_rockchip_pmusram_prepare(void) 116 { 117 uint32_t *sram_dst, *sram_src; 118 size_t sram_size = 2; 119 120 /* 121 * pmu sram code and data prepare 122 */ 123 sram_dst = (uint32_t *)PMUSRAM_BASE; 124 sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start; 125 sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end - 126 (uint32_t *)sram_src; 127 128 u32_align_cpy(sram_dst, sram_src, sram_size); 129 130 psram_sleep_cfg->sp = PSRAM_DT_BASE; 131 } 132 133 static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id) 134 { 135 assert(cpu_id < PLATFORM_CORE_COUNT); 136 return core_pm_cfg_info[cpu_id]; 137 } 138 139 static inline void set_cpus_pwr_domain_cfg_info(uint32_t cpu_id, uint32_t value) 140 { 141 assert(cpu_id < PLATFORM_CORE_COUNT); 142 core_pm_cfg_info[cpu_id] = value; 143 #if !USE_COHERENT_MEM 144 flush_dcache_range((uintptr_t)&core_pm_cfg_info[cpu_id], 145 sizeof(uint32_t)); 146 #endif 147 } 148 149 static int cpus_power_domain_on(uint32_t cpu_id) 150 { 151 uint32_t cfg_info; 152 uint32_t cpu_pd = PD_CPUL0 + cpu_id; 153 /* 154 * There are two ways to powering on or off on core. 155 * 1) Control it power domain into on or off in PMU_PWRDN_CON reg 156 * 2) Enable the core power manage in PMU_CORE_PM_CON reg, 157 * then, if the core enter into wfi, it power domain will be 158 * powered off automatically. 159 */ 160 161 cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); 162 163 if (cfg_info == core_pwr_pd) { 164 /* disable core_pm cfg */ 165 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 166 CORES_PM_DISABLE); 167 /* if the cores have be on, power off it firstly */ 168 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 169 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); 170 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 171 } 172 173 pmu_power_domain_ctr(cpu_pd, pmu_pd_on); 174 } else { 175 if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { 176 WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); 177 return -EINVAL; 178 } 179 180 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 181 BIT(core_pm_sft_wakeup_en)); 182 dsb(); 183 } 184 185 return 0; 186 } 187 188 static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg) 189 { 190 uint32_t cpu_pd; 191 uint32_t core_pm_value; 192 193 cpu_pd = PD_CPUL0 + cpu_id; 194 if (pmu_power_domain_st(cpu_pd) == pmu_pd_off) 195 return 0; 196 197 if (pd_cfg == core_pwr_pd) { 198 if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK)) 199 return -EINVAL; 200 201 /* disable core_pm cfg */ 202 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 203 CORES_PM_DISABLE); 204 205 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 206 pmu_power_domain_ctr(cpu_pd, pmu_pd_off); 207 } else { 208 set_cpus_pwr_domain_cfg_info(cpu_id, pd_cfg); 209 210 core_pm_value = BIT(core_pm_en); 211 if (pd_cfg == core_pwr_wfi_int) 212 core_pm_value |= BIT(core_pm_int_wakeup_en); 213 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 214 core_pm_value); 215 dsb(); 216 } 217 218 return 0; 219 } 220 221 static void nonboot_cpus_off(void) 222 { 223 uint32_t boot_cpu, cpu; 224 225 boot_cpu = plat_my_core_pos(); 226 227 /* turn off noboot cpus */ 228 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) { 229 if (cpu == boot_cpu) 230 continue; 231 cpus_power_domain_off(cpu, core_pwr_pd); 232 } 233 } 234 235 static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) 236 { 237 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); 238 239 assert(cpu_id < PLATFORM_CORE_COUNT); 240 assert(cpuson_flags[cpu_id] == 0); 241 cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; 242 cpuson_entry_point[cpu_id] = entrypoint; 243 dsb(); 244 245 cpus_power_domain_on(cpu_id); 246 247 return 0; 248 } 249 250 static int cores_pwr_domain_off(void) 251 { 252 uint32_t cpu_id = plat_my_core_pos(); 253 254 cpus_power_domain_off(cpu_id, core_pwr_wfi); 255 256 return 0; 257 } 258 259 static int cores_pwr_domain_suspend(void) 260 { 261 uint32_t cpu_id = plat_my_core_pos(); 262 263 assert(cpu_id < PLATFORM_CORE_COUNT); 264 assert(cpuson_flags[cpu_id] == 0); 265 cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN; 266 cpuson_entry_point[cpu_id] = (uintptr_t)psci_entrypoint; 267 dsb(); 268 269 cpus_power_domain_off(cpu_id, core_pwr_wfi_int); 270 271 return 0; 272 } 273 274 static int cores_pwr_domain_on_finish(void) 275 { 276 uint32_t cpu_id = plat_my_core_pos(); 277 278 /* Disable core_pm */ 279 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 280 281 return 0; 282 } 283 284 static int cores_pwr_domain_resume(void) 285 { 286 uint32_t cpu_id = plat_my_core_pos(); 287 288 /* Disable core_pm */ 289 mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); 290 291 return 0; 292 } 293 294 static void sys_slp_config(void) 295 { 296 uint32_t slp_mode_cfg = 0; 297 298 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 299 BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | 300 BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | 301 BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); 302 303 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 304 BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | 305 BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | 306 BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); 307 308 mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, 309 BIT_WITH_WMSK(AP_PWROFF)); 310 311 slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | 312 BIT(PMU_POWER_OFF_REQ_CFG) | 313 BIT(PMU_CPU0_PD_EN) | 314 BIT(PMU_L2_FLUSH_EN) | 315 BIT(PMU_L2_IDLE_EN) | 316 BIT(PMU_SCU_PD_EN); 317 318 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_L_WKUP_EN); 319 mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_B_WKUP_EN); 320 mmio_clrbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_GPIO_WKUP_EN); 321 322 mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); 323 324 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(5)); 325 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(2)); 326 mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_MS(2)); 327 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_MS(2)); 328 mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_MS(2)); 329 } 330 331 static int sys_pwr_domain_suspend(void) 332 { 333 sys_slp_config(); 334 plls_suspend(); 335 pmu_sgrf_rst_hld(); 336 337 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 338 (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | 339 CPU_BOOT_ADDR_WMASK); 340 341 pmu_scu_b_pwrdn(); 342 343 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 344 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 345 BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | 346 BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 347 dsb(); 348 mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); 349 350 return 0; 351 } 352 353 static int sys_pwr_domain_resume(void) 354 { 355 pmu_sgrf_rst_hld(); 356 357 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 358 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 359 CPU_BOOT_ADDR_WMASK); 360 361 plls_resume(); 362 363 mmio_write_32(PMU_BASE + PMU_CCI500_CON, 364 WMSK_BIT(PMU_CLR_PREQ_CCI500_HW) | 365 WMSK_BIT(PMU_CLR_QREQ_CCI500_HW) | 366 WMSK_BIT(PMU_QGATING_CCI500_CFG)); 367 368 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 369 WMSK_BIT(PMU_CLR_CORE_L_HW) | 370 WMSK_BIT(PMU_CLR_CORE_L_2GIC_HW) | 371 WMSK_BIT(PMU_CLR_GIC2_CORE_L_HW)); 372 373 mmio_clrbits_32(PMU_BASE + PMU_PWRDN_CON, 374 BIT(PMU_SCU_B_PWRDWN_EN)); 375 376 mmio_write_32(PMU_BASE + PMU_ADB400_CON, 377 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | 378 WMSK_BIT(PMU_PWRDWN_REQ_CORE_B_SW) | 379 WMSK_BIT(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); 380 381 pmu_scu_b_pwrup(); 382 383 plat_rockchip_gic_cpuif_enable(); 384 return 0; 385 } 386 387 static struct rockchip_pm_ops_cb pm_ops = { 388 .cores_pwr_dm_on = cores_pwr_domain_on, 389 .cores_pwr_dm_off = cores_pwr_domain_off, 390 .cores_pwr_dm_on_finish = cores_pwr_domain_on_finish, 391 .cores_pwr_dm_suspend = cores_pwr_domain_suspend, 392 .cores_pwr_dm_resume = cores_pwr_domain_resume, 393 .sys_pwr_dm_suspend = sys_pwr_domain_suspend, 394 .sys_pwr_dm_resume = sys_pwr_domain_resume, 395 .sys_gbl_soft_reset = soc_global_soft_reset, 396 }; 397 398 void plat_rockchip_pmu_init(void) 399 { 400 uint32_t cpu; 401 402 rockchip_pd_lock_init(); 403 plat_setup_rockchip_pm_ops(&pm_ops); 404 405 /* register requires 32bits mode, switch it to 32 bits */ 406 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 407 408 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 409 cpuson_flags[cpu] = 0; 410 411 psram_sleep_cfg->ddr_func = 0x00; 412 psram_sleep_cfg->ddr_data = 0x00; 413 psram_sleep_cfg->ddr_flag = 0x00; 414 psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff; 415 416 /* cpu boot from pmusram */ 417 mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), 418 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 419 CPU_BOOT_ADDR_WMASK); 420 421 nonboot_cpus_off(); 422 423 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 424 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 425 } 426