1 /* 2 * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <platform_def.h> 10 11 #include <arch_helpers.h> 12 #include <bl31/interrupt_mgmt.h> 13 #include <common/debug.h> 14 #include <drivers/arm/css/css_scp.h> 15 #include <lib/cassert.h> 16 #include <plat/arm/common/plat_arm.h> 17 #include <plat/arm/css/common/css_pm.h> 18 19 #include <plat/common/platform.h> 20 21 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */ 22 #pragma weak plat_arm_psci_pm_ops 23 24 #if ARM_RECOM_STATE_ID_ENC 25 /* 26 * The table storing the valid idle power states. Ensure that the 27 * array entries are populated in ascending order of state-id to 28 * enable us to use binary search during power state validation. 29 * The table must be terminated by a NULL entry. 30 */ 31 const unsigned int arm_pm_idle_states[] = { 32 /* State-id - 0x001 */ 33 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, 34 ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY), 35 /* State-id - 0x002 */ 36 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, 37 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 38 /* State-id - 0x022 */ 39 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF, 40 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 41 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1 42 /* State-id - 0x222 */ 43 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF, 44 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 45 #endif 46 0, 47 }; 48 #endif /* __ARM_RECOM_STATE_ID_ENC__ */ 49 50 /* 51 * All the power management helpers in this file assume at least cluster power 52 * level is supported. 53 */ 54 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1, 55 assert_max_pwr_lvl_supported_mismatch); 56 57 /* 58 * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL 59 * assumed by the CSS layer. 60 */ 61 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL, 62 assert_max_pwr_lvl_higher_than_css_sys_lvl); 63 64 /******************************************************************************* 65 * Handler called when a power domain is about to be turned on. The 66 * level and mpidr determine the affinity instance. 67 ******************************************************************************/ 68 int css_pwr_domain_on(u_register_t mpidr) 69 { 70 css_scp_on(mpidr); 71 72 return PSCI_E_SUCCESS; 73 } 74 75 static void css_pwr_domain_on_finisher_common( 76 const psci_power_state_t *target_state) 77 { 78 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 79 80 /* 81 * Perform the common cluster specific operations i.e enable coherency 82 * if this cluster was off. 83 */ 84 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) 85 plat_arm_interconnect_enter_coherency(); 86 } 87 88 /******************************************************************************* 89 * Handler called when a power level has just been powered on after 90 * being turned off earlier. The target_state encodes the low power state that 91 * each level has woken up from. This handler would never be invoked with 92 * the system power domain uninitialized as either the primary would have taken 93 * care of it as part of cold boot or the first core awakened from system 94 * suspend would have already initialized it. 95 ******************************************************************************/ 96 void css_pwr_domain_on_finish(const psci_power_state_t *target_state) 97 { 98 /* Assert that the system power domain need not be initialized */ 99 assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN); 100 101 css_pwr_domain_on_finisher_common(target_state); 102 } 103 104 /******************************************************************************* 105 * Handler called when a power domain has just been powered on and the cpu 106 * and its cluster are fully participating in coherent transaction on the 107 * interconnect. Data cache must be enabled for CPU at this point. 108 ******************************************************************************/ 109 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state) 110 { 111 /* Program the gic per-cpu distributor or re-distributor interface */ 112 plat_arm_gic_pcpu_init(); 113 114 /* Enable the gic cpu interface */ 115 plat_arm_gic_cpuif_enable(); 116 117 /* Setup the CPU power down request interrupt for secondary core(s) */ 118 css_setup_cpu_pwr_down_intr(); 119 } 120 121 /******************************************************************************* 122 * Common function called while turning a cpu off or suspending it. It is called 123 * from css_off() or css_suspend() when these functions in turn are called for 124 * power domain at the highest power level which will be powered down. It 125 * performs the actions common to the OFF and SUSPEND calls. 126 ******************************************************************************/ 127 static void css_power_down_common(const psci_power_state_t *target_state) 128 { 129 /* Prevent interrupts from spuriously waking up this cpu */ 130 plat_arm_gic_cpuif_disable(); 131 132 /* Turn redistributor off */ 133 plat_arm_gic_redistif_off(); 134 135 /* Cluster is to be turned off, so disable coherency */ 136 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { 137 plat_arm_interconnect_exit_coherency(); 138 139 #if HW_ASSISTED_COHERENCY 140 uint32_t reg; 141 142 /* 143 * If we have determined this core to be the last man standing and we 144 * intend to power down the cluster proactively, we provide a hint to 145 * the power controller that cluster power is not required when all 146 * cores are powered down. 147 * Note that this is only an advisory to power controller and is supported 148 * by SoCs with DynamIQ Shared Units only. 149 */ 150 reg = read_clusterpwrdn(); 151 152 /* Clear and set bit 0 : Cluster power not required */ 153 reg &= ~DSU_CLUSTER_PWR_MASK; 154 reg |= DSU_CLUSTER_PWR_OFF; 155 write_clusterpwrdn(reg); 156 #endif 157 } 158 } 159 160 /******************************************************************************* 161 * Handler called when a power domain is about to be turned off. The 162 * target_state encodes the power state that each level should transition to. 163 ******************************************************************************/ 164 void css_pwr_domain_off(const psci_power_state_t *target_state) 165 { 166 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 167 css_power_down_common(target_state); 168 css_scp_off(target_state); 169 } 170 171 /******************************************************************************* 172 * Handler called when a power domain is about to be suspended. The 173 * target_state encodes the power state that each level should transition to. 174 ******************************************************************************/ 175 void css_pwr_domain_suspend(const psci_power_state_t *target_state) 176 { 177 /* 178 * CSS currently supports retention only at cpu level. Just return 179 * as nothing is to be done for retention. 180 */ 181 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) 182 return; 183 184 185 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 186 css_power_down_common(target_state); 187 188 /* Perform system domain state saving if issuing system suspend */ 189 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) { 190 arm_system_pwr_domain_save(); 191 192 /* Power off the Redistributor after having saved its context */ 193 plat_arm_gic_redistif_off(); 194 } 195 196 css_scp_suspend(target_state); 197 } 198 199 /******************************************************************************* 200 * Handler called when a power domain has just been powered on after 201 * having been suspended earlier. The target_state encodes the low power state 202 * that each level has woken up from. 203 * TODO: At the moment we reuse the on finisher and reinitialize the secure 204 * context. Need to implement a separate suspend finisher. 205 ******************************************************************************/ 206 void css_pwr_domain_suspend_finish( 207 const psci_power_state_t *target_state) 208 { 209 /* Return as nothing is to be done on waking up from retention. */ 210 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) 211 return; 212 213 /* Perform system domain restore if woken up from system suspend */ 214 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) 215 /* 216 * At this point, the Distributor must be powered on to be ready 217 * to have its state restored. The Redistributor will be powered 218 * on as part of gicv3_rdistif_init_restore. 219 */ 220 arm_system_pwr_domain_resume(); 221 222 css_pwr_domain_on_finisher_common(target_state); 223 224 /* Enable the gic cpu interface */ 225 plat_arm_gic_cpuif_enable(); 226 } 227 228 /******************************************************************************* 229 * Handlers to shutdown/reboot the system 230 ******************************************************************************/ 231 void __dead2 css_system_off(void) 232 { 233 css_scp_sys_shutdown(); 234 } 235 236 void __dead2 css_system_reset(void) 237 { 238 css_scp_sys_reboot(); 239 } 240 241 /******************************************************************************* 242 * Handler called when the CPU power domain is about to enter standby. 243 ******************************************************************************/ 244 void css_cpu_standby(plat_local_state_t cpu_state) 245 { 246 unsigned int scr; 247 248 assert(cpu_state == ARM_LOCAL_STATE_RET); 249 250 scr = read_scr_el3(); 251 /* 252 * Enable the Non secure interrupt to wake the CPU. 253 * In GICv3 affinity routing mode, the non secure group1 interrupts use 254 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ. 255 * Enabling both the bits works for both GICv2 mode and GICv3 affinity 256 * routing mode. 257 */ 258 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); 259 isb(); 260 dsb(); 261 wfi(); 262 263 /* 264 * Restore SCR to the original value, synchronisation of scr_el3 is 265 * done by eret while el3_exit to save some execution cycles. 266 */ 267 write_scr_el3(scr); 268 } 269 270 /******************************************************************************* 271 * Handler called to return the 'req_state' for system suspend. 272 ******************************************************************************/ 273 void css_get_sys_suspend_power_state(psci_power_state_t *req_state) 274 { 275 unsigned int i; 276 277 /* 278 * System Suspend is supported only if the system power domain node 279 * is implemented. 280 */ 281 assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL); 282 283 for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) 284 req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF; 285 } 286 287 /******************************************************************************* 288 * Handler to query CPU/cluster power states from SCP 289 ******************************************************************************/ 290 int css_node_hw_state(u_register_t mpidr, unsigned int power_level) 291 { 292 return css_scp_get_power_state(mpidr, power_level); 293 } 294 295 /* 296 * The system power domain suspend is only supported only via 297 * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain 298 * will be downgraded to the lower level. 299 */ 300 static int css_validate_power_state(unsigned int power_state, 301 psci_power_state_t *req_state) 302 { 303 int rc; 304 rc = arm_validate_power_state(power_state, req_state); 305 306 /* 307 * Ensure that we don't overrun the pwr_domain_state array in the case 308 * where the platform supported max power level is less than the system 309 * power level 310 */ 311 312 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) 313 314 /* 315 * Ensure that the system power domain level is never suspended 316 * via PSCI CPU SUSPEND API. Currently system suspend is only 317 * supported via PSCI SYSTEM SUSPEND API. 318 */ 319 320 req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = 321 ARM_LOCAL_STATE_RUN; 322 #endif 323 324 return rc; 325 } 326 327 /* 328 * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the 329 * `css_validate_power_state`, we do not downgrade the system power 330 * domain level request in `power_state` as it will be used to query the 331 * PSCI_STAT_COUNT/RESIDENCY at the system power domain level. 332 */ 333 static int css_translate_power_state_by_mpidr(u_register_t mpidr, 334 unsigned int power_state, 335 psci_power_state_t *output_state) 336 { 337 return arm_validate_power_state(power_state, output_state); 338 } 339 340 /* 341 * Setup the SGI interrupt that will be used trigger the execution of power 342 * down sequence for all the secondary cores. This interrupt is setup to be 343 * handled in EL3 context at a priority defined by the platform. 344 */ 345 void css_setup_cpu_pwr_down_intr(void) 346 { 347 #if CSS_SYSTEM_GRACEFUL_RESET 348 plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3); 349 plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR, 350 PLAT_REBOOT_PRI); 351 plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR); 352 #endif 353 } 354 355 /******************************************************************************* 356 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard 357 * platform will take care of registering the handlers with PSCI. 358 ******************************************************************************/ 359 plat_psci_ops_t plat_arm_psci_pm_ops = { 360 .pwr_domain_on = css_pwr_domain_on, 361 .pwr_domain_on_finish = css_pwr_domain_on_finish, 362 .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late, 363 .pwr_domain_off = css_pwr_domain_off, 364 .cpu_standby = css_cpu_standby, 365 .pwr_domain_suspend = css_pwr_domain_suspend, 366 .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, 367 .system_off = css_system_off, 368 .system_reset = css_system_reset, 369 .validate_power_state = css_validate_power_state, 370 .validate_ns_entrypoint = arm_validate_psci_entrypoint, 371 .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr, 372 .get_node_hw_state = css_node_hw_state, 373 .get_sys_suspend_power_state = css_get_sys_suspend_power_state, 374 375 #if defined(PLAT_ARM_MEM_PROT_ADDR) 376 .mem_protect_chk = arm_psci_mem_protect_chk, 377 .read_mem_protect = arm_psci_read_mem_protect, 378 .write_mem_protect = arm_nor_psci_write_mem_protect, 379 #endif 380 #if CSS_USE_SCMI_SDS_DRIVER 381 .system_reset2 = css_system_reset2, 382 #endif 383 }; 384