1 /* 2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <ddr_rk3368.h> 10 #include <debug.h> 11 #include <delay_timer.h> 12 #include <errno.h> 13 #include <mmio.h> 14 #include <plat_private.h> 15 #include <platform.h> 16 #include <platform_def.h> 17 #include <pmu.h> 18 #include <pmu_com.h> 19 #include <rk3368_def.h> 20 #include <soc.h> 21 22 DEFINE_BAKERY_LOCK(rockchip_pd_lock); 23 24 static uint32_t cpu_warm_boot_addr; 25 26 void rk3368_flash_l2_b(void) 27 { 28 uint32_t wait_cnt = 0; 29 30 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b); 31 dsb(); 32 33 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) 34 & BIT(clst_b_l2_flsh_done))) { 35 wait_cnt++; 36 if (!(wait_cnt % MAX_WAIT_CONUT)) 37 WARN("%s:reg %x,wait\n", __func__, 38 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 39 } 40 41 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b); 42 } 43 44 static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle) 45 { 46 uint32_t mask = BIT(req); 47 uint32_t idle_mask = 0; 48 uint32_t idle_target = 0; 49 uint32_t val; 50 uint32_t wait_cnt = 0; 51 52 switch (req) { 53 case bus_ide_req_clst_l: 54 idle_mask = BIT(pmu_idle_ack_cluster_l); 55 idle_target = (idle << pmu_idle_ack_cluster_l); 56 break; 57 58 case bus_ide_req_clst_b: 59 idle_mask = BIT(pmu_idle_ack_cluster_b); 60 idle_target = (idle << pmu_idle_ack_cluster_b); 61 break; 62 63 case bus_ide_req_cxcs: 64 idle_mask = BIT(pmu_idle_ack_cxcs); 65 idle_target = ((!idle) << pmu_idle_ack_cxcs); 66 break; 67 68 case bus_ide_req_cci400: 69 idle_mask = BIT(pmu_idle_ack_cci400); 70 idle_target = ((!idle) << pmu_idle_ack_cci400); 71 break; 72 73 case bus_ide_req_gpu: 74 idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu); 75 idle_target = (idle << pmu_idle_ack_gpu) | 76 (idle << pmu_idle_gpu); 77 break; 78 79 case bus_ide_req_core: 80 idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core); 81 idle_target = (idle << pmu_idle_ack_core) | 82 (idle << pmu_idle_core); 83 break; 84 85 case bus_ide_req_bus: 86 idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus); 87 idle_target = (idle << pmu_idle_ack_bus) | 88 (idle << pmu_idle_bus); 89 break; 90 case bus_ide_req_dma: 91 idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma); 92 idle_target = (idle << pmu_idle_ack_dma) | 93 (idle << pmu_idle_dma); 94 break; 95 96 case bus_ide_req_peri: 97 idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri); 98 idle_target = (idle << pmu_idle_ack_peri) | 99 (idle << pmu_idle_peri); 100 break; 101 102 case bus_ide_req_video: 103 idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video); 104 idle_target = (idle << pmu_idle_ack_video) | 105 (idle << pmu_idle_video); 106 break; 107 108 case bus_ide_req_vio: 109 idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio); 110 idle_target = (pmu_idle_ack_vio) | 111 (idle << pmu_idle_vio); 112 break; 113 114 case bus_ide_req_alive: 115 idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive); 116 idle_target = (idle << pmu_idle_ack_alive) | 117 (idle << pmu_idle_alive); 118 break; 119 120 case bus_ide_req_pmu: 121 idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu); 122 idle_target = (idle << pmu_idle_ack_pmu) | 123 (idle << pmu_idle_pmu); 124 break; 125 126 case bus_ide_req_msch: 127 idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch); 128 idle_target = (idle << pmu_idle_ack_msch) | 129 (idle << pmu_idle_msch); 130 break; 131 132 case bus_ide_req_cci: 133 idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci); 134 idle_target = (idle << pmu_idle_ack_cci) | 135 (idle << pmu_idle_cci); 136 break; 137 138 default: 139 ERROR("%s: Unsupported the idle request\n", __func__); 140 break; 141 } 142 143 val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ); 144 if (idle) 145 val |= mask; 146 else 147 val &= ~mask; 148 149 mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val); 150 151 while ((mmio_read_32(PMU_BASE + 152 PMU_BUS_IDE_ST) & idle_mask) != idle_target) { 153 wait_cnt++; 154 if (!(wait_cnt % MAX_WAIT_CONUT)) 155 WARN("%s:st=%x(%x)\n", __func__, 156 mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST), 157 idle_mask); 158 } 159 160 return 0; 161 } 162 163 void pmu_scu_b_pwrup(void) 164 { 165 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b); 166 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0); 167 } 168 169 static void pmu_scu_b_pwrdn(void) 170 { 171 uint32_t wait_cnt = 0; 172 173 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & 174 PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) { 175 ERROR("%s: not all cpus is off\n", __func__); 176 return; 177 } 178 179 rk3368_flash_l2_b(); 180 181 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b); 182 183 while (!(mmio_read_32(PMU_BASE + 184 PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) { 185 wait_cnt++; 186 if (!(wait_cnt % MAX_WAIT_CONUT)) 187 ERROR("%s:wait cluster-b l2(%x)\n", __func__, 188 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); 189 } 190 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1); 191 } 192 193 static void pmu_sleep_mode_config(void) 194 { 195 uint32_t pwrmd_core, pwrmd_com; 196 197 pwrmd_core = BIT(pmu_mdcr_cpu0_pd) | 198 BIT(pmu_mdcr_scu_l_pd) | 199 BIT(pmu_mdcr_l2_flush) | 200 BIT(pmu_mdcr_l2_idle) | 201 BIT(pmu_mdcr_clr_clst_l) | 202 BIT(pmu_mdcr_clr_core) | 203 BIT(pmu_mdcr_clr_cci) | 204 BIT(pmu_mdcr_core_pd); 205 206 pwrmd_com = BIT(pmu_mode_en) | 207 BIT(pmu_mode_sref_enter) | 208 BIT(pmu_mode_pwr_off); 209 210 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en); 211 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en); 212 regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en); 213 214 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2)); 215 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100)); 216 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2)); 217 mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core); 218 mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com); 219 dsb(); 220 } 221 222 static void pmu_set_sleep_mode(void) 223 { 224 pmu_sleep_mode_config(); 225 soc_sleep_config(); 226 regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis); 227 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b); 228 pmu_scu_b_pwrdn(); 229 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 230 ((uintptr_t)&pmu_cpuson_entrypoint >> 231 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 232 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), 233 ((uintptr_t)&pmu_cpuson_entrypoint >> 234 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); 235 } 236 237 static int cpus_id_power_domain(uint32_t cluster, 238 uint32_t cpu, 239 uint32_t pd_state, 240 uint32_t wfie_msk) 241 { 242 uint32_t pd; 243 uint64_t mpidr; 244 245 if (cluster) 246 pd = PD_CPUB0 + cpu; 247 else 248 pd = PD_CPUL0 + cpu; 249 250 if (pmu_power_domain_st(pd) == pd_state) 251 return 0; 252 253 if (pd_state == pmu_pd_off) { 254 mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu; 255 if (check_cpu_wfie(mpidr, wfie_msk)) 256 return -EINVAL; 257 } 258 259 return pmu_power_domain_ctr(pd, pd_state); 260 } 261 262 static void nonboot_cpus_off(void) 263 { 264 uint32_t boot_cpu, boot_cluster, cpu; 265 266 boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1()); 267 boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1()); 268 269 /* turn off noboot cpus */ 270 for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) { 271 if (!boot_cluster && (cpu == boot_cpu)) 272 continue; 273 cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK); 274 } 275 276 for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) { 277 if (boot_cluster && (cpu == boot_cpu)) 278 continue; 279 cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK); 280 } 281 } 282 283 void sram_save(void) 284 { 285 /* TODO: support the sdram save for rk3368 SoCs*/ 286 } 287 288 void sram_restore(void) 289 { 290 /* TODO: support the sdram restore for rk3368 SoCs */ 291 } 292 293 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint) 294 { 295 uint32_t cpu, cluster; 296 uint32_t cpuon_id; 297 298 cpu = MPIDR_AFFLVL0_VAL(mpidr); 299 cluster = MPIDR_AFFLVL1_VAL(mpidr); 300 301 /* Make sure the cpu is off,Before power up the cpu! */ 302 cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK); 303 304 cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu; 305 assert(cpuon_id < PLATFORM_CORE_COUNT); 306 assert(cpuson_flags[cpuon_id] == 0); 307 cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG; 308 cpuson_entry_point[cpuon_id] = entrypoint; 309 310 /* Switch boot addr to pmusram */ 311 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), 312 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) | 313 CPU_BOOT_ADDR_WMASK); 314 dsb(); 315 316 cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK); 317 318 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), 319 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | 320 CPU_BOOT_ADDR_WMASK); 321 322 return 0; 323 } 324 325 int rockchip_soc_cores_pwr_dm_on_finish(void) 326 { 327 return 0; 328 } 329 330 int rockchip_soc_sys_pwr_dm_resume(void) 331 { 332 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1), 333 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | 334 CPU_BOOT_ADDR_WMASK); 335 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), 336 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | 337 CPU_BOOT_ADDR_WMASK); 338 pm_plls_resume(); 339 pmu_scu_b_pwrup(); 340 341 return 0; 342 } 343 344 int rockchip_soc_sys_pwr_dm_suspend(void) 345 { 346 nonboot_cpus_off(); 347 pmu_set_sleep_mode(); 348 349 return 0; 350 } 351 352 void rockchip_plat_mmu_el3(void) 353 { 354 /* TODO: support the el3 for rk3368 SoCs */ 355 } 356 357 void plat_rockchip_pmu_init(void) 358 { 359 uint32_t cpu; 360 361 /* register requires 32bits mode, switch it to 32 bits */ 362 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot; 363 364 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) 365 cpuson_flags[cpu] = 0; 366 367 nonboot_cpus_off(); 368 INFO("%s(%d): pd status %x\n", __func__, __LINE__, 369 mmio_read_32(PMU_BASE + PMU_PWRDN_ST)); 370 } 371