10d5ec955Stony.xie /*
20d5ec955Stony.xie * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
30d5ec955Stony.xie *
4c3e70be1Sdp-arm * SPDX-License-Identifier: BSD-3-Clause
50d5ec955Stony.xie */
60d5ec955Stony.xie
70d5ec955Stony.xie #include <assert.h>
80d5ec955Stony.xie #include <errno.h>
909d40e0eSAntonio Nino Diaz
100d5ec955Stony.xie #include <platform_def.h>
1109d40e0eSAntonio Nino Diaz
1209d40e0eSAntonio Nino Diaz #include <arch_helpers.h>
1309d40e0eSAntonio Nino Diaz #include <bl31/bl31.h>
1409d40e0eSAntonio Nino Diaz #include <common/debug.h>
1509d40e0eSAntonio Nino Diaz #include <drivers/console.h>
1609d40e0eSAntonio Nino Diaz #include <drivers/delay_timer.h>
1709d40e0eSAntonio Nino Diaz #include <lib/bakery_lock.h>
1809d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1909d40e0eSAntonio Nino Diaz #include <plat/common/platform.h>
2009d40e0eSAntonio Nino Diaz
2109d40e0eSAntonio Nino Diaz #include <plat_private.h>
220d5ec955Stony.xie #include <pmu.h>
230d5ec955Stony.xie #include <pmu_com.h>
24ee1ebbd1SIsla Mitchell #include <rk3328_def.h>
250d5ec955Stony.xie
260d5ec955Stony.xie DEFINE_BAKERY_LOCK(rockchip_pd_lock);
270d5ec955Stony.xie
280d5ec955Stony.xie static struct rk3328_sleep_ddr_data ddr_data;
290d5ec955Stony.xie static __sramdata struct rk3328_sleep_sram_data sram_data;
300d5ec955Stony.xie
310d5ec955Stony.xie static uint32_t cpu_warm_boot_addr;
320d5ec955Stony.xie
330d5ec955Stony.xie #pragma weak rk3328_pmic_suspend
340d5ec955Stony.xie #pragma weak rk3328_pmic_resume
350d5ec955Stony.xie
get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)360d5ec955Stony.xie static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
370d5ec955Stony.xie {
380d5ec955Stony.xie uint32_t pd_reg, apm_reg;
390d5ec955Stony.xie
400d5ec955Stony.xie pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
410d5ec955Stony.xie apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
420d5ec955Stony.xie BIT(core_pm_en);
430d5ec955Stony.xie
440d5ec955Stony.xie if (pd_reg && !apm_reg)
450d5ec955Stony.xie return core_pwr_pd;
460d5ec955Stony.xie else if (!pd_reg && apm_reg)
470d5ec955Stony.xie return core_pwr_wfi;
480d5ec955Stony.xie
490d5ec955Stony.xie ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
500d5ec955Stony.xie while (1)
510d5ec955Stony.xie ;
520d5ec955Stony.xie }
530d5ec955Stony.xie
cpus_power_domain_on(uint32_t cpu_id)540d5ec955Stony.xie static int cpus_power_domain_on(uint32_t cpu_id)
550d5ec955Stony.xie {
560d5ec955Stony.xie uint32_t cpu_pd, cfg_info;
570d5ec955Stony.xie
580d5ec955Stony.xie cpu_pd = PD_CPU0 + cpu_id;
590d5ec955Stony.xie cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
600d5ec955Stony.xie
610d5ec955Stony.xie if (cfg_info == core_pwr_pd) {
620d5ec955Stony.xie /* disable apm cfg */
630d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
640d5ec955Stony.xie CORES_PM_DISABLE);
650d5ec955Stony.xie
660d5ec955Stony.xie /* if the cores have be on, power off it firstly */
670d5ec955Stony.xie if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
680d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
690d5ec955Stony.xie CORES_PM_DISABLE);
700d5ec955Stony.xie pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
710d5ec955Stony.xie }
720d5ec955Stony.xie pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
730d5ec955Stony.xie } else {
740d5ec955Stony.xie if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
750d5ec955Stony.xie WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
760d5ec955Stony.xie return -EINVAL;
770d5ec955Stony.xie }
780d5ec955Stony.xie
790d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
800d5ec955Stony.xie BIT(core_pm_sft_wakeup_en));
810d5ec955Stony.xie }
820d5ec955Stony.xie
830d5ec955Stony.xie return 0;
840d5ec955Stony.xie }
850d5ec955Stony.xie
cpus_power_domain_off(uint32_t cpu_id,uint32_t pd_cfg)860d5ec955Stony.xie static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
870d5ec955Stony.xie {
880d5ec955Stony.xie uint32_t cpu_pd, core_pm_value;
890d5ec955Stony.xie
900d5ec955Stony.xie cpu_pd = PD_CPU0 + cpu_id;
910d5ec955Stony.xie if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
920d5ec955Stony.xie return 0;
930d5ec955Stony.xie
940d5ec955Stony.xie if (pd_cfg == core_pwr_pd) {
950d5ec955Stony.xie if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
960d5ec955Stony.xie return -EINVAL;
970d5ec955Stony.xie /* disable apm cfg */
980d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
990d5ec955Stony.xie CORES_PM_DISABLE);
1000d5ec955Stony.xie pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
1010d5ec955Stony.xie } else {
1020d5ec955Stony.xie core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
1030d5ec955Stony.xie if (pd_cfg == core_pwr_wfi_int)
1040d5ec955Stony.xie core_pm_value |= BIT(core_pm_int_wakeup_en);
1050d5ec955Stony.xie
1060d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
1070d5ec955Stony.xie core_pm_value);
1080d5ec955Stony.xie }
1090d5ec955Stony.xie
1100d5ec955Stony.xie return 0;
1110d5ec955Stony.xie }
1120d5ec955Stony.xie
nonboot_cpus_off(void)1130d5ec955Stony.xie static void nonboot_cpus_off(void)
1140d5ec955Stony.xie {
1150d5ec955Stony.xie uint32_t boot_cpu, cpu;
1160d5ec955Stony.xie
1170d5ec955Stony.xie /* turn off noboot cpus */
1180d5ec955Stony.xie boot_cpu = plat_my_core_pos();
1190d5ec955Stony.xie for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
1200d5ec955Stony.xie if (cpu == boot_cpu)
1210d5ec955Stony.xie continue;
1220d5ec955Stony.xie cpus_power_domain_off(cpu, core_pwr_pd);
1230d5ec955Stony.xie }
1240d5ec955Stony.xie }
1250d5ec955Stony.xie
sram_save(void)126bc5c3007SLin Huang void sram_save(void)
127bc5c3007SLin Huang {
128bc5c3007SLin Huang /* TODO: support the sdram save for rk3328 SoCs*/
129bc5c3007SLin Huang }
130bc5c3007SLin Huang
sram_restore(void)131bc5c3007SLin Huang void sram_restore(void)
132bc5c3007SLin Huang {
133bc5c3007SLin Huang /* TODO: support the sdram restore for rk3328 SoCs */
134bc5c3007SLin Huang }
135bc5c3007SLin Huang
rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,uint64_t entrypoint)1360d5ec955Stony.xie int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
1370d5ec955Stony.xie {
1380d5ec955Stony.xie uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
1390d5ec955Stony.xie
1406bf14e1dStony.xie assert(cpu_id < PLATFORM_CORE_COUNT);
1410d5ec955Stony.xie assert(cpuson_flags[cpu_id] == 0);
1420d5ec955Stony.xie cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
1430d5ec955Stony.xie cpuson_entry_point[cpu_id] = entrypoint;
1440d5ec955Stony.xie dsb();
1450d5ec955Stony.xie
1460d5ec955Stony.xie cpus_power_domain_on(cpu_id);
1470d5ec955Stony.xie
1480d5ec955Stony.xie return 0;
1490d5ec955Stony.xie }
1500d5ec955Stony.xie
rockchip_soc_cores_pwr_dm_off(void)1510d5ec955Stony.xie int rockchip_soc_cores_pwr_dm_off(void)
1520d5ec955Stony.xie {
1530d5ec955Stony.xie uint32_t cpu_id = plat_my_core_pos();
1540d5ec955Stony.xie
1550d5ec955Stony.xie cpus_power_domain_off(cpu_id, core_pwr_wfi);
1560d5ec955Stony.xie
1570d5ec955Stony.xie return 0;
1580d5ec955Stony.xie }
1590d5ec955Stony.xie
rockchip_soc_cores_pwr_dm_suspend(void)1600d5ec955Stony.xie int rockchip_soc_cores_pwr_dm_suspend(void)
1610d5ec955Stony.xie {
1620d5ec955Stony.xie uint32_t cpu_id = plat_my_core_pos();
1630d5ec955Stony.xie
1646bf14e1dStony.xie assert(cpu_id < PLATFORM_CORE_COUNT);
1650d5ec955Stony.xie assert(cpuson_flags[cpu_id] == 0);
1660d5ec955Stony.xie cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
1670d5ec955Stony.xie cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
1680d5ec955Stony.xie dsb();
1690d5ec955Stony.xie
1700d5ec955Stony.xie cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
1710d5ec955Stony.xie
1720d5ec955Stony.xie return 0;
1730d5ec955Stony.xie }
1740d5ec955Stony.xie
rockchip_soc_cores_pwr_dm_on_finish(void)1750d5ec955Stony.xie int rockchip_soc_cores_pwr_dm_on_finish(void)
1760d5ec955Stony.xie {
1770d5ec955Stony.xie uint32_t cpu_id = plat_my_core_pos();
1780d5ec955Stony.xie
1790d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
1800d5ec955Stony.xie
1810d5ec955Stony.xie return 0;
1820d5ec955Stony.xie }
1830d5ec955Stony.xie
rockchip_soc_cores_pwr_dm_resume(void)1840d5ec955Stony.xie int rockchip_soc_cores_pwr_dm_resume(void)
1850d5ec955Stony.xie {
1860d5ec955Stony.xie uint32_t cpu_id = plat_my_core_pos();
1870d5ec955Stony.xie
1880d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
1890d5ec955Stony.xie
1900d5ec955Stony.xie return 0;
1910d5ec955Stony.xie }
1920d5ec955Stony.xie
rockchip_soc_soft_reset(void)1930d5ec955Stony.xie void __dead2 rockchip_soc_soft_reset(void)
1940d5ec955Stony.xie {
1950d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
1960d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
1970d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
1980d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
1990d5ec955Stony.xie dsb();
2000d5ec955Stony.xie
2010d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
2020d5ec955Stony.xie dsb();
2030d5ec955Stony.xie /*
2040d5ec955Stony.xie * Maybe the HW needs some times to reset the system,
2051b491eeaSElyes Haouas * so we do not hope the core to execute valid codes.
2060d5ec955Stony.xie */
2070d5ec955Stony.xie while (1)
2080d5ec955Stony.xie ;
2090d5ec955Stony.xie }
2100d5ec955Stony.xie
2110d5ec955Stony.xie /*
2120d5ec955Stony.xie * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
2131b491eeaSElyes Haouas * If the PMIC is configured for responding the sleep pin to power off it,
2140d5ec955Stony.xie * once the pin is output high, it will get the pmic power off.
2150d5ec955Stony.xie */
rockchip_soc_system_off(void)2160d5ec955Stony.xie void __dead2 rockchip_soc_system_off(void)
2170d5ec955Stony.xie {
2180d5ec955Stony.xie uint32_t val;
2190d5ec955Stony.xie
2200d5ec955Stony.xie /* gpio config */
2210d5ec955Stony.xie val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
2220d5ec955Stony.xie val &= ~GPIO2_D2_GPIO_MODE;
2230d5ec955Stony.xie mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
2240d5ec955Stony.xie
2250d5ec955Stony.xie /* config output */
2260d5ec955Stony.xie val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
2270d5ec955Stony.xie val |= GPIO2_D2;
2280d5ec955Stony.xie mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
2290d5ec955Stony.xie
2300d5ec955Stony.xie /* config output high level */
2310d5ec955Stony.xie val = mmio_read_32(GPIO2_BASE);
2320d5ec955Stony.xie val |= GPIO2_D2;
2330d5ec955Stony.xie mmio_write_32(GPIO2_BASE, val);
2340d5ec955Stony.xie dsb();
2350d5ec955Stony.xie
2360d5ec955Stony.xie while (1)
2370d5ec955Stony.xie ;
2380d5ec955Stony.xie }
2390d5ec955Stony.xie
2400d5ec955Stony.xie static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
2410d5ec955Stony.xie 0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
2420d5ec955Stony.xie 0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
2430d5ec955Stony.xie 0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
2440d5ec955Stony.xie 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
2450d5ec955Stony.xie 0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
2460d5ec955Stony.xie 0x0000, 0x0000, 0x0003, 0x0008
2470d5ec955Stony.xie };
2480d5ec955Stony.xie
clks_gating_suspend(uint32_t * ungt_msk)2490d5ec955Stony.xie static void clks_gating_suspend(uint32_t *ungt_msk)
2500d5ec955Stony.xie {
2510d5ec955Stony.xie int i;
2520d5ec955Stony.xie
2530d5ec955Stony.xie for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
2540d5ec955Stony.xie ddr_data.clk_ungt_save[i] =
2550d5ec955Stony.xie mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
2560d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
2570d5ec955Stony.xie ((~ungt_msk[i]) << 16) | 0xffff);
2580d5ec955Stony.xie }
2590d5ec955Stony.xie }
2600d5ec955Stony.xie
clks_gating_resume(void)2610d5ec955Stony.xie static void clks_gating_resume(void)
2620d5ec955Stony.xie {
2630d5ec955Stony.xie int i;
2640d5ec955Stony.xie
2650d5ec955Stony.xie for (i = 0; i < CRU_CLKGATE_NUMS; i++)
2660d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
2670d5ec955Stony.xie ddr_data.clk_ungt_save[i] | 0xffff0000);
2680d5ec955Stony.xie }
2690d5ec955Stony.xie
pm_pll_wait_lock(uint32_t pll_id)2700d5ec955Stony.xie static inline void pm_pll_wait_lock(uint32_t pll_id)
2710d5ec955Stony.xie {
2720d5ec955Stony.xie uint32_t delay = PLL_LOCKED_TIMEOUT;
2730d5ec955Stony.xie
2740d5ec955Stony.xie while (delay > 0) {
2750d5ec955Stony.xie if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
2760d5ec955Stony.xie PLL_IS_LOCKED)
2770d5ec955Stony.xie break;
2780d5ec955Stony.xie delay--;
2790d5ec955Stony.xie }
2800d5ec955Stony.xie if (delay == 0)
2810d5ec955Stony.xie ERROR("lock-pll: %d\n", pll_id);
2820d5ec955Stony.xie }
2830d5ec955Stony.xie
pll_pwr_dwn(uint32_t pll_id,uint32_t pd)2840d5ec955Stony.xie static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
2850d5ec955Stony.xie {
2860d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
28779ca7807SJustin Chadwell BITS_WITH_WMASK(1U, 1U, 15));
2880d5ec955Stony.xie if (pd)
2890d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
2900d5ec955Stony.xie BITS_WITH_WMASK(1, 1, 14));
2910d5ec955Stony.xie else
2920d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
2930d5ec955Stony.xie BITS_WITH_WMASK(0, 1, 14));
2940d5ec955Stony.xie }
2950d5ec955Stony.xie
dpll_suspend(void)2960d5ec955Stony.xie static __sramfunc void dpll_suspend(void)
2970d5ec955Stony.xie {
2980d5ec955Stony.xie int i;
2990d5ec955Stony.xie
3000d5ec955Stony.xie /* slow mode */
3010d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
3020d5ec955Stony.xie
3030d5ec955Stony.xie /* save pll con */
3040d5ec955Stony.xie for (i = 0; i < CRU_PLL_CON_NUMS; i++)
3050d5ec955Stony.xie sram_data.dpll_con_save[i] =
3060d5ec955Stony.xie mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
3070d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
30879ca7807SJustin Chadwell BITS_WITH_WMASK(1U, 1U, 15));
3090d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
3100d5ec955Stony.xie BITS_WITH_WMASK(1, 1, 14));
3110d5ec955Stony.xie }
3120d5ec955Stony.xie
dpll_resume(void)3130d5ec955Stony.xie static __sramfunc void dpll_resume(void)
3140d5ec955Stony.xie {
3150d5ec955Stony.xie uint32_t delay = PLL_LOCKED_TIMEOUT;
3160d5ec955Stony.xie
3170d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
31879ca7807SJustin Chadwell BITS_WITH_WMASK(1U, 1U, 15));
3190d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
3200d5ec955Stony.xie BITS_WITH_WMASK(0, 1, 14));
3210d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
3220d5ec955Stony.xie sram_data.dpll_con_save[1] | 0xc0000000);
3230d5ec955Stony.xie
3240d5ec955Stony.xie dsb();
3250d5ec955Stony.xie
3260d5ec955Stony.xie while (delay > 0) {
3270d5ec955Stony.xie if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
3280d5ec955Stony.xie PLL_IS_LOCKED)
3290d5ec955Stony.xie break;
3300d5ec955Stony.xie delay--;
3310d5ec955Stony.xie }
3320d5ec955Stony.xie if (delay == 0)
3330d5ec955Stony.xie while (1)
3340d5ec955Stony.xie ;
3350d5ec955Stony.xie
3360d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE,
3370d5ec955Stony.xie PLL_NORM_MODE(DPLL_ID));
3380d5ec955Stony.xie }
3390d5ec955Stony.xie
pll_suspend(uint32_t pll_id)3400d5ec955Stony.xie static inline void pll_suspend(uint32_t pll_id)
3410d5ec955Stony.xie {
3420d5ec955Stony.xie int i;
3430d5ec955Stony.xie
3440d5ec955Stony.xie /* slow mode */
3450d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
3460d5ec955Stony.xie
3470d5ec955Stony.xie /* save pll con */
3480d5ec955Stony.xie for (i = 0; i < CRU_PLL_CON_NUMS; i++)
3490d5ec955Stony.xie ddr_data.cru_plls_con_save[pll_id][i] =
3500d5ec955Stony.xie mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
3510d5ec955Stony.xie
3520d5ec955Stony.xie /* powerdown pll */
3530d5ec955Stony.xie pll_pwr_dwn(pll_id, pmu_pd_off);
3540d5ec955Stony.xie }
3550d5ec955Stony.xie
pll_resume(uint32_t pll_id)3560d5ec955Stony.xie static inline void pll_resume(uint32_t pll_id)
3570d5ec955Stony.xie {
3580d5ec955Stony.xie mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
3590d5ec955Stony.xie ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
3600d5ec955Stony.xie
3610d5ec955Stony.xie pm_pll_wait_lock(pll_id);
3620d5ec955Stony.xie
3630d5ec955Stony.xie if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
3640d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CRU_MODE,
3650d5ec955Stony.xie PLL_NORM_MODE(pll_id));
3660d5ec955Stony.xie }
3670d5ec955Stony.xie
pm_plls_suspend(void)3680d5ec955Stony.xie static void pm_plls_suspend(void)
3690d5ec955Stony.xie {
3700d5ec955Stony.xie ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
3710d5ec955Stony.xie ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
3720d5ec955Stony.xie ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
3730d5ec955Stony.xie ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
3740d5ec955Stony.xie ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
3750d5ec955Stony.xie ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
3760d5ec955Stony.xie ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
3770d5ec955Stony.xie pll_suspend(NPLL_ID);
3780d5ec955Stony.xie pll_suspend(CPLL_ID);
3790d5ec955Stony.xie pll_suspend(GPLL_ID);
3800d5ec955Stony.xie pll_suspend(APLL_ID);
3810d5ec955Stony.xie
3820d5ec955Stony.xie /* core */
3830d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
3840d5ec955Stony.xie BITS_WITH_WMASK(0, 0x1f, 0));
3850d5ec955Stony.xie
3860d5ec955Stony.xie /* pclk_dbg */
3870d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
3880d5ec955Stony.xie BITS_WITH_WMASK(0, 0xf, 0));
3890d5ec955Stony.xie
3900d5ec955Stony.xie /* crypto */
3910d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
3920d5ec955Stony.xie BITS_WITH_WMASK(0, 0x1f, 0));
3930d5ec955Stony.xie
3940d5ec955Stony.xie /* pwm0 */
3950d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
3960d5ec955Stony.xie BITS_WITH_WMASK(0, 0x7f, 8));
3970d5ec955Stony.xie
3980d5ec955Stony.xie /* uart2 from 24M */
3990d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
4000d5ec955Stony.xie BITS_WITH_WMASK(2, 0x3, 8));
4010d5ec955Stony.xie
4020d5ec955Stony.xie /* clk_rtc32k */
4030d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
4040d5ec955Stony.xie BITS_WITH_WMASK(767, 0x3fff, 0) |
40579ca7807SJustin Chadwell BITS_WITH_WMASK(2U, 0x3u, 14));
4060d5ec955Stony.xie }
4070d5ec955Stony.xie
pm_plls_resume(void)4080d5ec955Stony.xie static void pm_plls_resume(void)
4090d5ec955Stony.xie {
4100d5ec955Stony.xie /* clk_rtc32k */
4110d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
4120d5ec955Stony.xie ddr_data.clk_sel38 |
4130d5ec955Stony.xie BITS_WMSK(0x3fff, 0) |
41479ca7807SJustin Chadwell BITS_WMSK(0x3u, 14));
4150d5ec955Stony.xie
4160d5ec955Stony.xie /* uart2 */
4170d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
4180d5ec955Stony.xie ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
4190d5ec955Stony.xie
4200d5ec955Stony.xie /* pwm0 */
4210d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
4220d5ec955Stony.xie ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
4230d5ec955Stony.xie
4240d5ec955Stony.xie /* crypto */
4250d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
4260d5ec955Stony.xie ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
4270d5ec955Stony.xie
4280d5ec955Stony.xie /* pclk_dbg */
4290d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
4300d5ec955Stony.xie ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
4310d5ec955Stony.xie
4320d5ec955Stony.xie /* core */
4330d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
4340d5ec955Stony.xie ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
4350d5ec955Stony.xie
4360d5ec955Stony.xie pll_pwr_dwn(APLL_ID, pmu_pd_on);
4370d5ec955Stony.xie pll_pwr_dwn(GPLL_ID, pmu_pd_on);
4380d5ec955Stony.xie pll_pwr_dwn(CPLL_ID, pmu_pd_on);
4390d5ec955Stony.xie pll_pwr_dwn(NPLL_ID, pmu_pd_on);
4400d5ec955Stony.xie
4410d5ec955Stony.xie pll_resume(APLL_ID);
4420d5ec955Stony.xie pll_resume(GPLL_ID);
4430d5ec955Stony.xie pll_resume(CPLL_ID);
4440d5ec955Stony.xie pll_resume(NPLL_ID);
4450d5ec955Stony.xie }
4460d5ec955Stony.xie
4470d5ec955Stony.xie #define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
4480d5ec955Stony.xie
sram_udelay(uint32_t us)4490d5ec955Stony.xie static __sramfunc void sram_udelay(uint32_t us)
4500d5ec955Stony.xie {
4510d5ec955Stony.xie uint64_t pct_orig, pct_now;
4520d5ec955Stony.xie uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
4530d5ec955Stony.xie
4540d5ec955Stony.xie isb();
4550d5ec955Stony.xie pct_orig = read_cntpct_el0();
4560d5ec955Stony.xie
4570d5ec955Stony.xie do {
4580d5ec955Stony.xie isb();
4590d5ec955Stony.xie pct_now = read_cntpct_el0();
4600d5ec955Stony.xie } while ((pct_now - pct_orig) <= to_wait);
4610d5ec955Stony.xie }
4620d5ec955Stony.xie
4630d5ec955Stony.xie /*
4640d5ec955Stony.xie * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
4651b491eeaSElyes Haouas * If the PMIC is configured for responding the sleep pin
4660d5ec955Stony.xie * to get it into sleep mode,
4670d5ec955Stony.xie * once the pin is output high, it will get the pmic into sleep mode.
4680d5ec955Stony.xie */
rk3328_pmic_suspend(void)4690d5ec955Stony.xie __sramfunc void rk3328_pmic_suspend(void)
4700d5ec955Stony.xie {
4710d5ec955Stony.xie sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
4720d5ec955Stony.xie sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
4730d5ec955Stony.xie sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
4740d5ec955Stony.xie mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
4750d5ec955Stony.xie mmio_write_32(GPIO2_BASE + 4,
4760d5ec955Stony.xie sram_data.pmic_sleep_gpio_save[1] | BIT(26));
4770d5ec955Stony.xie mmio_write_32(GPIO2_BASE,
4780d5ec955Stony.xie sram_data.pmic_sleep_gpio_save[0] | BIT(26));
4790d5ec955Stony.xie }
4800d5ec955Stony.xie
rk3328_pmic_resume(void)4810d5ec955Stony.xie __sramfunc void rk3328_pmic_resume(void)
4820d5ec955Stony.xie {
4830d5ec955Stony.xie mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
4840d5ec955Stony.xie mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
4850d5ec955Stony.xie mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
48679ca7807SJustin Chadwell sram_data.pmic_sleep_save | BITS_WMSK(0xffffu, 0));
4870d5ec955Stony.xie /* Resuming volt need a lot of time */
4880d5ec955Stony.xie sram_udelay(100);
4890d5ec955Stony.xie }
4900d5ec955Stony.xie
ddr_suspend(void)4910d5ec955Stony.xie static __sramfunc void ddr_suspend(void)
4920d5ec955Stony.xie {
4930d5ec955Stony.xie sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
4940d5ec955Stony.xie DDR_PCTL2_PWRCTL);
4950d5ec955Stony.xie sram_data.pd_sr_idle_save &= SELFREF_EN;
4960d5ec955Stony.xie
4970d5ec955Stony.xie mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
4980d5ec955Stony.xie sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
4990d5ec955Stony.xie DDRGRF_SOC_CON(0));
5000d5ec955Stony.xie mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
5010d5ec955Stony.xie
5020d5ec955Stony.xie /*
5030d5ec955Stony.xie * Override csysreq from ddrc and
5040d5ec955Stony.xie * send valid csysreq signal to PMU,
5050d5ec955Stony.xie * csysreq is controlled by ddrc only
5060d5ec955Stony.xie */
5070d5ec955Stony.xie
5080d5ec955Stony.xie /* in self-refresh */
5090d5ec955Stony.xie mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
5100d5ec955Stony.xie while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
5110d5ec955Stony.xie (0x03 << 12)) != (0x02 << 12))
5120d5ec955Stony.xie ;
5130d5ec955Stony.xie /* ddr retention */
5140d5ec955Stony.xie mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
5150d5ec955Stony.xie
5160d5ec955Stony.xie /* ddr gating */
5170d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
5180d5ec955Stony.xie BITS_WITH_WMASK(0x7, 0x7, 4));
5190d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
5200d5ec955Stony.xie BITS_WITH_WMASK(1, 1, 4));
5210d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
5220d5ec955Stony.xie BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
5230d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
5240d5ec955Stony.xie BITS_WITH_WMASK(0x3, 0x3, 0));
5250d5ec955Stony.xie
5260d5ec955Stony.xie dpll_suspend();
5270d5ec955Stony.xie }
5280d5ec955Stony.xie
dmc_restore(void)529bc5c3007SLin Huang __sramfunc void dmc_restore(void)
5300d5ec955Stony.xie {
5310d5ec955Stony.xie dpll_resume();
5320d5ec955Stony.xie
5330d5ec955Stony.xie /* ddr gating */
5340d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
5350d5ec955Stony.xie BITS_WITH_WMASK(0, 0x7, 4));
5360d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
5370d5ec955Stony.xie BITS_WITH_WMASK(0, 1, 4));
5380d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
5390d5ec955Stony.xie BITS_WITH_WMASK(0, 0x1ff, 1));
5400d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
5410d5ec955Stony.xie BITS_WITH_WMASK(0, 0x3, 0));
5420d5ec955Stony.xie
5430d5ec955Stony.xie /* ddr de_retention */
5440d5ec955Stony.xie mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
5450d5ec955Stony.xie /* exit self-refresh */
5460d5ec955Stony.xie mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
5470d5ec955Stony.xie while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
5480d5ec955Stony.xie (0x03 << 12)) != (0x00 << 12))
5490d5ec955Stony.xie ;
5500d5ec955Stony.xie
5510d5ec955Stony.xie mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
5520d5ec955Stony.xie if (sram_data.pd_sr_idle_save)
5530d5ec955Stony.xie mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
5540d5ec955Stony.xie SELFREF_EN);
5550d5ec955Stony.xie }
5560d5ec955Stony.xie
sram_dbg_uart_suspend(void)5570d5ec955Stony.xie static __sramfunc void sram_dbg_uart_suspend(void)
5580d5ec955Stony.xie {
5590d5ec955Stony.xie sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
5600d5ec955Stony.xie mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
5610d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
5620d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
5630d5ec955Stony.xie }
5640d5ec955Stony.xie
sram_dbg_uart_resume(void)565bc5c3007SLin Huang __sramfunc void sram_dbg_uart_resume(void)
5660d5ec955Stony.xie {
5670d5ec955Stony.xie /* restore uart clk and reset fifo */
5680d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
5690d5ec955Stony.xie mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
5700d5ec955Stony.xie mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
5710d5ec955Stony.xie mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
5720d5ec955Stony.xie }
5730d5ec955Stony.xie
sram_soc_enter_lp(void)5740d5ec955Stony.xie static __sramfunc void sram_soc_enter_lp(void)
5750d5ec955Stony.xie {
5760d5ec955Stony.xie uint32_t apm_value;
5770d5ec955Stony.xie
5780d5ec955Stony.xie apm_value = BIT(core_pm_en) |
5790d5ec955Stony.xie BIT(core_pm_dis_int) |
5800d5ec955Stony.xie BIT(core_pm_int_wakeup_en);
5810d5ec955Stony.xie mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
5820d5ec955Stony.xie
5830d5ec955Stony.xie dsb();
5840d5ec955Stony.xie isb();
5850d5ec955Stony.xie err_loop:
5860d5ec955Stony.xie wfi();
5870d5ec955Stony.xie /*
5880d5ec955Stony.xie *Soc will enter low power mode and
5890d5ec955Stony.xie *do not return to here.
5900d5ec955Stony.xie */
5910d5ec955Stony.xie goto err_loop;
5920d5ec955Stony.xie }
5930d5ec955Stony.xie
sram_suspend(void)5940d5ec955Stony.xie __sramfunc void sram_suspend(void)
5950d5ec955Stony.xie {
5960d5ec955Stony.xie /* disable mmu and icache */
5970d5ec955Stony.xie disable_mmu_icache_el3();
5986bf0e079SAntonio Nino Diaz tlbialle3();
5996bf0e079SAntonio Nino Diaz dsbsy();
6006bf0e079SAntonio Nino Diaz isb();
6010d5ec955Stony.xie
6020d5ec955Stony.xie mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
603bc5c3007SLin Huang ((uintptr_t)&pmu_cpuson_entrypoint >> CPU_BOOT_ADDR_ALIGN) |
6040d5ec955Stony.xie CPU_BOOT_ADDR_WMASK);
6050d5ec955Stony.xie
6060d5ec955Stony.xie /* ddr self-refresh and gating phy */
6070d5ec955Stony.xie ddr_suspend();
6080d5ec955Stony.xie
6090d5ec955Stony.xie rk3328_pmic_suspend();
6100d5ec955Stony.xie
6110d5ec955Stony.xie sram_dbg_uart_suspend();
6120d5ec955Stony.xie
6130d5ec955Stony.xie sram_soc_enter_lp();
6140d5ec955Stony.xie }
6150d5ec955Stony.xie
rockchip_soc_sys_pd_pwr_dn_wfi(void)616*1ed77d1bSBoyan Karatotev void rockchip_soc_sys_pd_pwr_dn_wfi(void)
6170d5ec955Stony.xie {
6180d5ec955Stony.xie sram_suspend();
6190d5ec955Stony.xie }
6200d5ec955Stony.xie
rockchip_soc_sys_pwr_dm_suspend(void)6210d5ec955Stony.xie int rockchip_soc_sys_pwr_dm_suspend(void)
6220d5ec955Stony.xie {
6230d5ec955Stony.xie clks_gating_suspend(clk_ungt_msk);
6240d5ec955Stony.xie
6250d5ec955Stony.xie pm_plls_suspend();
6260d5ec955Stony.xie
6270d5ec955Stony.xie return 0;
6280d5ec955Stony.xie }
6290d5ec955Stony.xie
rockchip_soc_sys_pwr_dm_resume(void)6300d5ec955Stony.xie int rockchip_soc_sys_pwr_dm_resume(void)
6310d5ec955Stony.xie {
6320d5ec955Stony.xie pm_plls_resume();
6330d5ec955Stony.xie
6340d5ec955Stony.xie clks_gating_resume();
6350d5ec955Stony.xie
6360d5ec955Stony.xie plat_rockchip_gic_cpuif_enable();
6370d5ec955Stony.xie
6380d5ec955Stony.xie return 0;
6390d5ec955Stony.xie }
6400d5ec955Stony.xie
rockchip_plat_mmu_el3(void)641bc5c3007SLin Huang void rockchip_plat_mmu_el3(void)
642bc5c3007SLin Huang {
643bc5c3007SLin Huang /* TODO: support the el3 for rk3328 SoCs */
644bc5c3007SLin Huang }
645bc5c3007SLin Huang
plat_rockchip_pmu_init(void)6460d5ec955Stony.xie void plat_rockchip_pmu_init(void)
6470d5ec955Stony.xie {
6480d5ec955Stony.xie uint32_t cpu;
6490d5ec955Stony.xie
6500d5ec955Stony.xie for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
6510d5ec955Stony.xie cpuson_flags[cpu] = 0;
6520d5ec955Stony.xie
6530d5ec955Stony.xie cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
6540d5ec955Stony.xie
6550d5ec955Stony.xie /* the warm booting address of cpus */
6560d5ec955Stony.xie mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
6570d5ec955Stony.xie (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
6580d5ec955Stony.xie CPU_BOOT_ADDR_WMASK);
6590d5ec955Stony.xie
6600d5ec955Stony.xie nonboot_cpus_off();
6610d5ec955Stony.xie
6620d5ec955Stony.xie INFO("%s: pd status 0x%x\n",
6630d5ec955Stony.xie __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
6640d5ec955Stony.xie }
665