1 /* 2 * Copyright (c) 2020, MediaTek Inc. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 /* common headers */ 8 #include <assert.h> 9 10 #include <arch_helpers.h> 11 #include <common/debug.h> 12 #include <drivers/gpio.h> 13 #include <lib/psci/psci.h> 14 15 /* platform specific headers */ 16 #include <mt_gic_v3.h> 17 #include <mtspmc.h> 18 #include <plat/common/platform.h> 19 #include <plat_mtk_lpm.h> 20 #include <plat_params.h> 21 #include <plat_pm.h> 22 23 /* 24 * Cluster state request: 25 * [0] : The CPU requires cluster power down 26 * [1] : The CPU requires cluster power on 27 */ 28 #define coordinate_cluster(onoff) write_clusterpwrdn_el1(onoff) 29 #define coordinate_cluster_pwron() coordinate_cluster(1) 30 #define coordinate_cluster_pwroff() coordinate_cluster(0) 31 32 /* platform secure entry point */ 33 static uintptr_t secure_entrypoint; 34 /* per-CPU power state */ 35 static unsigned int plat_power_state[PLATFORM_CORE_COUNT]; 36 37 /* platform CPU power domain - ops */ 38 static const struct mt_lpm_tz *plat_mt_pm; 39 40 #define plat_mt_pm_invoke(_name, _cpu, _state) ({ \ 41 int ret = -1; \ 42 if (plat_mt_pm != NULL && plat_mt_pm->_name != NULL) { \ 43 ret = plat_mt_pm->_name(_cpu, _state); \ 44 } \ 45 ret; }) 46 47 #define plat_mt_pm_invoke_no_check(_name, _cpu, _state) ({ \ 48 if (plat_mt_pm != NULL && plat_mt_pm->_name != NULL) { \ 49 (void) plat_mt_pm->_name(_cpu, _state); \ 50 } \ 51 }) 52 53 /* 54 * Common MTK_platform operations to power on/off a 55 * CPU in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 56 */ 57 58 static void plat_cpu_pwrdwn_common(unsigned int cpu, 59 const psci_power_state_t *state, unsigned int req_pstate) 60 { 61 assert(cpu == plat_my_core_pos()); 62 63 plat_mt_pm_invoke_no_check(pwr_cpu_dwn, cpu, state); 64 65 if ((psci_get_pstate_pwrlvl(req_pstate) >= MTK_AFFLVL_CLUSTER) || 66 (req_pstate == 0U)) { /* hotplug off */ 67 coordinate_cluster_pwroff(); 68 } 69 70 /* Prevent interrupts from spuriously waking up this CPU */ 71 mt_gic_rdistif_save(); 72 gicv3_cpuif_disable(cpu); 73 gicv3_rdistif_off(cpu); 74 } 75 76 static void plat_cpu_pwron_common(unsigned int cpu, 77 const psci_power_state_t *state, unsigned int req_pstate) 78 { 79 assert(cpu == plat_my_core_pos()); 80 81 plat_mt_pm_invoke_no_check(pwr_cpu_on, cpu, state); 82 83 coordinate_cluster_pwron(); 84 85 /* Enable the GIC CPU interface */ 86 gicv3_rdistif_on(cpu); 87 gicv3_cpuif_enable(cpu); 88 mt_gic_rdistif_init(); 89 90 /* 91 * If mcusys does power down before then restore 92 * all CPUs' GIC Redistributors 93 */ 94 if (IS_MCUSYS_OFF_STATE(state)) { 95 mt_gic_rdistif_restore_all(); 96 } else { 97 mt_gic_rdistif_restore(); 98 } 99 } 100 101 /* 102 * Common MTK_platform operations to power on/off a 103 * cluster in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 104 */ 105 106 static void plat_cluster_pwrdwn_common(unsigned int cpu, 107 const psci_power_state_t *state, unsigned int req_pstate) 108 { 109 assert(cpu == plat_my_core_pos()); 110 111 if (plat_mt_pm_invoke(pwr_cluster_dwn, cpu, state) != 0) { 112 coordinate_cluster_pwron(); 113 114 /* TODO: return on fail. 115 * Add a 'return' here before adding any code following 116 * the if-block. 117 */ 118 } 119 } 120 121 static void plat_cluster_pwron_common(unsigned int cpu, 122 const psci_power_state_t *state, unsigned int req_pstate) 123 { 124 assert(cpu == plat_my_core_pos()); 125 126 if (plat_mt_pm_invoke(pwr_cluster_on, cpu, state) != 0) { 127 /* TODO: return on fail. 128 * Add a 'return' here before adding any code following 129 * the if-block. 130 */ 131 } 132 } 133 134 /* 135 * Common MTK_platform operations to power on/off a 136 * mcusys in response to a CPU_ON, CPU_OFF or CPU_SUSPEND request. 137 */ 138 139 static void plat_mcusys_pwrdwn_common(unsigned int cpu, 140 const psci_power_state_t *state, unsigned int req_pstate) 141 { 142 assert(cpu == plat_my_core_pos()); 143 144 if (plat_mt_pm_invoke(pwr_mcusys_dwn, cpu, state) != 0) { 145 return; /* return on fail */ 146 } 147 148 mt_gic_distif_save(); 149 gic_sgi_save_all(); 150 } 151 152 static void plat_mcusys_pwron_common(unsigned int cpu, 153 const psci_power_state_t *state, unsigned int req_pstate) 154 { 155 assert(cpu == plat_my_core_pos()); 156 157 if (plat_mt_pm_invoke(pwr_mcusys_on, cpu, state) != 0) { 158 return; /* return on fail */ 159 } 160 161 mt_gic_init(); 162 mt_gic_distif_restore(); 163 gic_sgi_restore_all(); 164 165 plat_mt_pm_invoke_no_check(pwr_mcusys_on_finished, cpu, state); 166 } 167 168 /* 169 * plat_psci_ops implementation 170 */ 171 172 static void plat_cpu_standby(plat_local_state_t cpu_state) 173 { 174 uint64_t scr; 175 176 scr = read_scr_el3(); 177 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); 178 179 isb(); 180 dsb(); 181 wfi(); 182 183 write_scr_el3(scr); 184 } 185 186 static int plat_power_domain_on(u_register_t mpidr) 187 { 188 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 189 unsigned int cluster = 0U; 190 191 if (cpu >= PLATFORM_CORE_COUNT) { 192 return PSCI_E_INVALID_PARAMS; 193 } 194 195 if (!spm_get_cluster_powerstate(cluster)) { 196 spm_poweron_cluster(cluster); 197 } 198 199 /* init CPU reset arch as AARCH64 */ 200 mcucfg_init_archstate(cluster, cpu, true); 201 mcucfg_set_bootaddr(cluster, cpu, secure_entrypoint); 202 spm_poweron_cpu(cluster, cpu); 203 204 return PSCI_E_SUCCESS; 205 } 206 207 static void plat_power_domain_on_finish(const psci_power_state_t *state) 208 { 209 unsigned long mpidr = read_mpidr_el1(); 210 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 211 212 assert(cpu < PLATFORM_CORE_COUNT); 213 214 /* Allow IRQs to wakeup this core in IDLE flow */ 215 mcucfg_enable_gic_wakeup(0U, cpu); 216 217 if (IS_CLUSTER_OFF_STATE(state)) { 218 plat_cluster_pwron_common(cpu, state, 0U); 219 } 220 221 plat_cpu_pwron_common(cpu, state, 0U); 222 } 223 224 static void plat_power_domain_off(const psci_power_state_t *state) 225 { 226 unsigned long mpidr = read_mpidr_el1(); 227 unsigned int cpu = (unsigned int)plat_core_pos_by_mpidr(mpidr); 228 229 assert(cpu < PLATFORM_CORE_COUNT); 230 231 plat_cpu_pwrdwn_common(cpu, state, 0U); 232 spm_poweroff_cpu(0U, cpu); 233 234 /* prevent unintended IRQs from waking up the hot-unplugged core */ 235 mcucfg_disable_gic_wakeup(0U, cpu); 236 237 if (IS_CLUSTER_OFF_STATE(state)) { 238 plat_cluster_pwrdwn_common(cpu, state, 0U); 239 } 240 } 241 242 static void plat_power_domain_suspend(const psci_power_state_t *state) 243 { 244 unsigned int cpu = plat_my_core_pos(); 245 246 assert(cpu < PLATFORM_CORE_COUNT); 247 248 plat_mt_pm_invoke_no_check(pwr_prompt, cpu, state); 249 250 /* Perform the common CPU specific operations */ 251 plat_cpu_pwrdwn_common(cpu, state, plat_power_state[cpu]); 252 253 if (IS_CLUSTER_OFF_STATE(state)) { 254 /* Perform the common cluster specific operations */ 255 plat_cluster_pwrdwn_common(cpu, state, plat_power_state[cpu]); 256 } 257 258 if (IS_MCUSYS_OFF_STATE(state)) { 259 /* Perform the common mcusys specific operations */ 260 plat_mcusys_pwrdwn_common(cpu, state, plat_power_state[cpu]); 261 } 262 } 263 264 static void plat_power_domain_suspend_finish(const psci_power_state_t *state) 265 { 266 unsigned int cpu = plat_my_core_pos(); 267 268 assert(cpu < PLATFORM_CORE_COUNT); 269 270 if (IS_MCUSYS_OFF_STATE(state)) { 271 /* Perform the common mcusys specific operations */ 272 plat_mcusys_pwron_common(cpu, state, plat_power_state[cpu]); 273 } 274 275 if (IS_CLUSTER_OFF_STATE(state)) { 276 /* Perform the common cluster specific operations */ 277 plat_cluster_pwron_common(cpu, state, plat_power_state[cpu]); 278 } 279 280 /* Perform the common CPU specific operations */ 281 plat_cpu_pwron_common(cpu, state, plat_power_state[cpu]); 282 283 plat_mt_pm_invoke_no_check(pwr_reflect, cpu, state); 284 } 285 286 static int plat_validate_power_state(unsigned int power_state, 287 psci_power_state_t *req_state) 288 { 289 unsigned int pstate = psci_get_pstate_type(power_state); 290 unsigned int aff_lvl = psci_get_pstate_pwrlvl(power_state); 291 unsigned int cpu = plat_my_core_pos(); 292 293 if (aff_lvl > PLAT_MAX_PWR_LVL) { 294 return PSCI_E_INVALID_PARAMS; 295 } 296 297 if (pstate == PSTATE_TYPE_STANDBY) { 298 req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE; 299 } else { 300 unsigned int i; 301 unsigned int pstate_id = psci_get_pstate_id(power_state); 302 plat_local_state_t s = MTK_LOCAL_STATE_OFF; 303 304 /* Use pstate_id to be power domain state */ 305 if (pstate_id > s) { 306 s = (plat_local_state_t)pstate_id; 307 } 308 309 for (i = 0U; i <= aff_lvl; i++) { 310 req_state->pwr_domain_state[i] = s; 311 } 312 } 313 314 plat_power_state[cpu] = power_state; 315 return PSCI_E_SUCCESS; 316 } 317 318 static void plat_get_sys_suspend_power_state(psci_power_state_t *req_state) 319 { 320 unsigned int lv; 321 unsigned int cpu = plat_my_core_pos(); 322 323 for (lv = PSCI_CPU_PWR_LVL; lv <= PLAT_MAX_PWR_LVL; lv++) { 324 req_state->pwr_domain_state[lv] = PLAT_MAX_OFF_STATE; 325 } 326 327 plat_power_state[cpu] = 328 psci_make_powerstate( 329 MT_PLAT_PWR_STATE_SYSTEM_SUSPEND, 330 PSTATE_TYPE_POWERDOWN, PLAT_MAX_PWR_LVL); 331 332 flush_dcache_range((uintptr_t) 333 &plat_power_state[cpu], 334 sizeof(plat_power_state[cpu])); 335 } 336 337 static void __dead2 plat_mtk_system_reset(void) 338 { 339 struct bl_aux_gpio_info *gpio_reset = plat_get_mtk_gpio_reset(); 340 341 INFO("MTK System Reset\n"); 342 343 gpio_set_value(gpio_reset->index, gpio_reset->polarity); 344 345 wfi(); 346 ERROR("MTK System Reset: operation not handled.\n"); 347 panic(); 348 } 349 350 static const plat_psci_ops_t plat_psci_ops = { 351 .system_reset = plat_mtk_system_reset, 352 .cpu_standby = plat_cpu_standby, 353 .pwr_domain_on = plat_power_domain_on, 354 .pwr_domain_on_finish = plat_power_domain_on_finish, 355 .pwr_domain_off = plat_power_domain_off, 356 .pwr_domain_suspend = plat_power_domain_suspend, 357 .pwr_domain_suspend_finish = plat_power_domain_suspend_finish, 358 .validate_power_state = plat_validate_power_state, 359 .get_sys_suspend_power_state = plat_get_sys_suspend_power_state 360 }; 361 362 int plat_setup_psci_ops(uintptr_t sec_entrypoint, 363 const plat_psci_ops_t **psci_ops) 364 { 365 *psci_ops = &plat_psci_ops; 366 secure_entrypoint = sec_entrypoint; 367 368 /* 369 * init the warm reset config for boot CPU 370 * reset arch as AARCH64 371 * reset addr as function bl31_warm_entrypoint() 372 */ 373 mcucfg_init_archstate(0U, 0U, true); 374 mcucfg_set_bootaddr(0U, 0U, secure_entrypoint); 375 376 spmc_init(); 377 plat_mt_pm = mt_plat_cpu_pm_init(); 378 379 return 0; 380 } 381