1 /* 2 * Copyright (c) 2021, MediaTek Inc. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 /* common headers */ 8 #include <assert.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <lib/psci/psci.h> 13 14 /* platform specific headers */ 15 #include <mt_gic_v3.h> 16 #include <mtspmc.h> 17 #include <plat/common/platform.h> 18 #include <plat_mtk_lpm.h> 19 #include <plat_pm.h> 20 21 /* 22 * Cluster state request: 23 * [0] : The CPU requires cluster power down 24 * [1] : The CPU requires cluster power on 25 */ 26 #define coordinate_cluster(onoff) write_clusterpwrdn_el1(onoff) 27 #define coordinate_cluster_pwron() coordinate_cluster(1) 28 #define coordinate_cluster_pwroff() coordinate_cluster(0) 29 30 /* platform secure entry point */ 31 static uintptr_t secure_entrypoint; 32 /* per-CPU power state */ 33 static unsigned int plat_power_state[PLATFORM_CORE_COUNT]; 34 35 /* platform CPU power domain - ops */ 36 static const struct mt_lpm_tz *plat_mt_pm; 37 38 #define plat_mt_pm_invoke(_name, _cpu, _state) ({ \ 39 int ret = -1; \ 40 if (plat_mt_pm != NULL && plat_mt_pm->_name != NULL) { \ 41 ret = plat_mt_pm->_name(_cpu, _state); \ 42 } \ 43 ret; }) 44 45 #define plat_mt_pm_invoke_no_check(_name, _cpu, _state) ({ \ 46 if (plat_mt_pm != NULL && plat_mt_pm->_name != NULL) { \ 47 (void) plat_mt_pm->_name(_cpu, _state); \ 48 } \ 49 }) 50 51 /* 52 * Common MTK_platform operations to power on/off a 53 * CPU in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 54 */ 55 56 static void plat_cpu_pwrdwn_common(unsigned int cpu, 57 const psci_power_state_t *state, unsigned int req_pstate) 58 { 59 assert(cpu == plat_my_core_pos()); 60 61 plat_mt_pm_invoke_no_check(pwr_cpu_dwn, cpu, state); 62 63 if ((psci_get_pstate_pwrlvl(req_pstate) >= MTK_AFFLVL_CLUSTER) || 64 (req_pstate == 0U)) { /* hotplug off */ 65 coordinate_cluster_pwroff(); 66 } 67 68 /* Prevent interrupts from spuriously waking up this CPU */ 69 mt_gic_rdistif_save(); 70 gicv3_cpuif_disable(cpu); 71 gicv3_rdistif_off(cpu); 72 } 73 74 static void plat_cpu_pwron_common(unsigned int cpu, 75 const psci_power_state_t *state, unsigned int req_pstate) 76 { 77 assert(cpu == plat_my_core_pos()); 78 79 plat_mt_pm_invoke_no_check(pwr_cpu_on, cpu, state); 80 81 coordinate_cluster_pwron(); 82 83 /* Enable the GIC CPU interface */ 84 gicv3_rdistif_on(cpu); 85 gicv3_cpuif_enable(cpu); 86 mt_gic_rdistif_init(); 87 88 /* 89 * If mcusys does power down before then restore 90 * all CPUs' GIC Redistributors 91 */ 92 if (IS_MCUSYS_OFF_STATE(state)) { 93 mt_gic_rdistif_restore_all(); 94 } else { 95 mt_gic_rdistif_restore(); 96 } 97 } 98 99 /* 100 * Common MTK_platform operations to power on/off a 101 * cluster in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 102 */ 103 104 static void plat_cluster_pwrdwn_common(unsigned int cpu, 105 const psci_power_state_t *state, unsigned int req_pstate) 106 { 107 assert(cpu == plat_my_core_pos()); 108 109 if (plat_mt_pm_invoke(pwr_cluster_dwn, cpu, state) != 0) { 110 coordinate_cluster_pwron(); 111 112 /* TODO: return on fail. 113 * Add a 'return' here before adding any code following 114 * the if-block. 115 */ 116 } 117 } 118 119 static void plat_cluster_pwron_common(unsigned int cpu, 120 const psci_power_state_t *state, unsigned int req_pstate) 121 { 122 assert(cpu == plat_my_core_pos()); 123 124 if (plat_mt_pm_invoke(pwr_cluster_on, cpu, state) != 0) { 125 /* TODO: return on fail. 126 * Add a 'return' here before adding any code following 127 * the if-block. 128 */ 129 } 130 } 131 132 /* 133 * Common MTK_platform operations to power on/off a 134 * mcusys in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 135 */ 136 137 static void plat_mcusys_pwrdwn_common(unsigned int cpu, 138 const psci_power_state_t *state, unsigned int req_pstate) 139 { 140 assert(cpu == plat_my_core_pos()); 141 142 if (plat_mt_pm_invoke(pwr_mcusys_dwn, cpu, state) != 0) { 143 return; /* return on fail */ 144 } 145 146 mt_gic_distif_save(); 147 gic_sgi_save_all(); 148 } 149 150 static void plat_mcusys_pwron_common(unsigned int cpu, 151 const psci_power_state_t *state, unsigned int req_pstate) 152 { 153 assert(cpu == plat_my_core_pos()); 154 155 if (plat_mt_pm_invoke(pwr_mcusys_on, cpu, state) != 0) { 156 return; /* return on fail */ 157 } 158 159 mt_gic_init(); 160 mt_gic_distif_restore(); 161 gic_sgi_restore_all(); 162 163 plat_mt_pm_invoke_no_check(pwr_mcusys_on_finished, cpu, state); 164 } 165 166 /* 167 * plat_psci_ops implementation 168 */ 169 170 static void plat_cpu_standby(plat_local_state_t cpu_state) 171 { 172 uint64_t scr; 173 174 scr = read_scr_el3(); 175 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); 176 177 isb(); 178 dsb(); 179 wfi(); 180 181 write_scr_el3(scr); 182 } 183 184 static int plat_power_domain_on(u_register_t mpidr) 185 { 186 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 187 unsigned int cluster = 0U; 188 189 if (cpu >= PLATFORM_CORE_COUNT) { 190 return PSCI_E_INVALID_PARAMS; 191 } 192 193 if (!spm_get_cluster_powerstate(cluster)) { 194 spm_poweron_cluster(cluster); 195 } 196 197 /* init CPU reset arch as AARCH64 */ 198 mcucfg_init_archstate(cluster, cpu, true); 199 mcucfg_set_bootaddr(cluster, cpu, secure_entrypoint); 200 spm_poweron_cpu(cluster, cpu); 201 202 return PSCI_E_SUCCESS; 203 } 204 205 static void plat_power_domain_on_finish(const psci_power_state_t *state) 206 { 207 unsigned long mpidr = read_mpidr_el1(); 208 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 209 210 assert(cpu < PLATFORM_CORE_COUNT); 211 212 /* Allow IRQs to wakeup this core in IDLE flow */ 213 mcucfg_enable_gic_wakeup(0U, cpu); 214 215 if (IS_CLUSTER_OFF_STATE(state)) { 216 plat_cluster_pwron_common(cpu, state, 0U); 217 } 218 219 plat_cpu_pwron_common(cpu, state, 0U); 220 } 221 222 static void plat_power_domain_off(const psci_power_state_t *state) 223 { 224 unsigned long mpidr = read_mpidr_el1(); 225 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 226 227 assert(cpu < PLATFORM_CORE_COUNT); 228 229 plat_cpu_pwrdwn_common(cpu, state, 0U); 230 spm_poweroff_cpu(0U, cpu); 231 232 /* prevent unintended IRQs from waking up the hot-unplugged core */ 233 mcucfg_disable_gic_wakeup(0U, cpu); 234 235 if (IS_CLUSTER_OFF_STATE(state)) { 236 plat_cluster_pwrdwn_common(cpu, state, 0U); 237 } 238 } 239 240 static void plat_power_domain_suspend(const psci_power_state_t *state) 241 { 242 unsigned int cpu = plat_my_core_pos(); 243 244 assert(cpu < PLATFORM_CORE_COUNT); 245 246 plat_mt_pm_invoke_no_check(pwr_prompt, cpu, state); 247 248 /* Perform the common CPU specific operations */ 249 plat_cpu_pwrdwn_common(cpu, state, plat_power_state[cpu]); 250 251 if (IS_CLUSTER_OFF_STATE(state)) { 252 /* Perform the common cluster specific operations */ 253 plat_cluster_pwrdwn_common(cpu, state, plat_power_state[cpu]); 254 } 255 256 if (IS_MCUSYS_OFF_STATE(state)) { 257 /* Perform the common mcusys specific operations */ 258 plat_mcusys_pwrdwn_common(cpu, state, plat_power_state[cpu]); 259 } 260 } 261 262 static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 263 { 264 unsigned int cpu = plat_my_core_pos(); 265 266 assert(cpu < PLATFORM_CORE_COUNT); 267 268 if (IS_MCUSYS_OFF_STATE(state)) { 269 /* Perform the common mcusys specific operations */ 270 plat_mcusys_pwron_common(cpu, state, plat_power_state[cpu]); 271 } 272 273 if (IS_CLUSTER_OFF_STATE(state)) { 274 /* Perform the common cluster specific operations */ 275 plat_cluster_pwron_common(cpu, state, plat_power_state[cpu]); 276 } 277 278 /* Perform the common CPU specific operations */ 279 plat_cpu_pwron_common(cpu, state, plat_power_state[cpu]); 280 281 plat_mt_pm_invoke_no_check(pwr_reflect, cpu, state); 282 } 283 284 static int plat_validate_power_state(unsigned int power_state, 285 psci_power_state_t *req_state) 286 { 287 unsigned int pstate = psci_get_pstate_type(power_state); 288 unsigned int aff_lvl = psci_get_pstate_pwrlvl(power_state); 289 unsigned int cpu = plat_my_core_pos(); 290 291 if (aff_lvl > PLAT_MAX_PWR_LVL) { 292 return PSCI_E_INVALID_PARAMS; 293 } 294 295 if (pstate == PSTATE_TYPE_STANDBY) { 296 req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE; 297 } else { 298 unsigned int i; 299 unsigned int pstate_id = psci_get_pstate_id(power_state); 300 plat_local_state_t s = MTK_LOCAL_STATE_OFF; 301 302 /* Use pstate_id to be power domain state */ 303 if (pstate_id > s) { 304 s = (plat_local_state_t)pstate_id; 305 } 306 307 for (i = 0U; i <= aff_lvl; i++) { 308 req_state->pwr_domain_state[i] = s; 309 } 310 } 311 312 plat_power_state[cpu] = power_state; 313 return PSCI_E_SUCCESS; 314 } 315 316 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 317 { 318 unsigned int lv; 319 unsigned int cpu = plat_my_core_pos(); 320 321 for (lv = PSCI_CPU_PWR_LVL; lv <= PLAT_MAX_PWR_LVL; lv++) { 322 req_state->pwr_domain_state[lv] = PLAT_MAX_OFF_STATE; 323 } 324 325 plat_power_state[cpu] = 326 psci_make_powerstate( 327 MT_PLAT_PWR_STATE_SYSTEM_SUSPEND, 328 PSTATE_TYPE_POWERDOWN, PLAT_MAX_PWR_LVL); 329 330 flush_dcache_range((uintptr_t) 331 &plat_power_state[cpu], 332 sizeof(plat_power_state[cpu])); 333 } 334 335 static const plat_psci_ops_t plat_psci_ops = { 336 .cpu_standby = plat_cpu_standby, 337 .pwr_domain_on = plat_power_domain_on, 338 .pwr_domain_on_finish = plat_power_domain_on_finish, 339 .pwr_domain_off = plat_power_domain_off, 340 .pwr_domain_suspend = plat_power_domain_suspend, 341 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 342 .validate_power_state = plat_validate_power_state, 343 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state 344 }; 345 346 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 347 const plat_psci_ops_t **psci_ops) 348 { 349 *psci_ops = &plat_psci_ops; 350 secure_entrypoint = sec_entrypoint; 351 352 /* 353 * init the warm reset config for boot CPU 354 * reset arch as AARCH64 355 * reset addr as function bl31_warm_entrypoint() 356 */ 357 mcucfg_init_archstate(0U, 0U, true); 358 mcucfg_set_bootaddr(0U, 0U, secure_entrypoint); 359 360 spmc_init(); 361 plat_mt_pm = mt_plat_cpu_pm_init(); 362 363 return 0; 364 } 365