1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <platform_def.h> 10 11 #include <arch_helpers.h> 12 #include <bl31/interrupt_mgmt.h> 13 #include <common/debug.h> 14 #include <drivers/arm/css/css_scp.h> 15 #include <drivers/arm/css/dsu.h> 16 #include <lib/cassert.h> 17 #include <plat/arm/common/plat_arm.h> 18 19 #include <plat/common/platform.h> 20 21 #include <plat/arm/css/common/css_pm.h> 22 23 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */ 24 #pragma weak plat_arm_psci_pm_ops 25 26 #if ARM_RECOM_STATE_ID_ENC 27 /* 28 * The table storing the valid idle power states. Ensure that the 29 * array entries are populated in ascending order of state-id to 30 * enable us to use binary search during power state validation. 31 * The table must be terminated by a NULL entry. 32 */ 33 const unsigned int arm_pm_idle_states[] = { 34 /* State-id - 0x001 */ 35 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, 36 ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY), 37 /* State-id - 0x002 */ 38 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN, 39 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN), 40 /* State-id - 0x022 */ 41 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF, 42 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN), 43 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1 44 /* State-id - 0x222 */ 45 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF, 46 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN), 47 #endif 48 0, 49 }; 50 #endif /* __ARM_RECOM_STATE_ID_ENC__ */ 51 52 /* 53 * All the power management helpers in this file assume at least cluster power 54 * level is supported. 55 */ 56 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1, 57 assert_max_pwr_lvl_supported_mismatch); 58 59 /* 60 * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL 61 * assumed by the CSS layer. 62 */ 63 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL, 64 assert_max_pwr_lvl_higher_than_css_sys_lvl); 65 66 /******************************************************************************* 67 * Handler called when a power domain is about to be turned on. The 68 * level and mpidr determine the affinity instance. 69 ******************************************************************************/ 70 int css_pwr_domain_on(u_register_t mpidr) 71 { 72 css_scp_on(mpidr); 73 74 return PSCI_E_SUCCESS; 75 } 76 77 static void css_pwr_domain_on_finisher_common( 78 const psci_power_state_t *target_state) 79 { 80 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 81 82 /* 83 * Perform the common cluster specific operations i.e enable coherency 84 * if this cluster was off. 85 */ 86 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { 87 #if PRESERVE_DSU_PMU_REGS 88 cluster_on_dsu_pmu_context_restore(); 89 #endif 90 plat_arm_interconnect_enter_coherency(); 91 } 92 } 93 94 /******************************************************************************* 95 * Handler called when a power level has just been powered on after 96 * being turned off earlier. The target_state encodes the low power state that 97 * each level has woken up from. This handler would never be invoked with 98 * the system power domain uninitialized as either the primary would have taken 99 * care of it as part of cold boot or the first core awakened from system 100 * suspend would have already initialized it. 101 ******************************************************************************/ 102 void css_pwr_domain_on_finish(const psci_power_state_t *target_state) 103 { 104 /* Assert that the system power domain need not be initialized */ 105 assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN); 106 107 css_pwr_domain_on_finisher_common(target_state); 108 } 109 110 /******************************************************************************* 111 * Handler called when a power domain has just been powered on and the cpu 112 * and its cluster are fully participating in coherent transaction on the 113 * interconnect. Data cache must be enabled for CPU at this point. 114 ******************************************************************************/ 115 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state) 116 { 117 /* Program the gic per-cpu distributor or re-distributor interface */ 118 plat_arm_gic_pcpu_init(); 119 120 /* Enable the gic cpu interface */ 121 plat_arm_gic_cpuif_enable(); 122 123 /* Setup the CPU power down request interrupt for secondary core(s) */ 124 css_setup_cpu_pwr_down_intr(); 125 } 126 127 /******************************************************************************* 128 * Common function called while turning a cpu off or suspending it. It is called 129 * from css_off() or css_suspend() when these functions in turn are called for 130 * power domain at the highest power level which will be powered down. It 131 * performs the actions common to the OFF and SUSPEND calls. 132 ******************************************************************************/ 133 static void css_power_down_common(const psci_power_state_t *target_state) 134 { 135 /* Prevent interrupts from spuriously waking up this cpu */ 136 plat_arm_gic_cpuif_disable(); 137 138 /* Cluster is to be turned off, so disable coherency */ 139 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { 140 #if PRESERVE_DSU_PMU_REGS 141 cluster_off_dsu_pmu_context_save(); 142 #endif 143 plat_arm_interconnect_exit_coherency(); 144 } 145 } 146 147 /******************************************************************************* 148 * Handler called when a power domain is about to be turned off. The 149 * target_state encodes the power state that each level should transition to. 150 ******************************************************************************/ 151 void css_pwr_domain_off(const psci_power_state_t *target_state) 152 { 153 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 154 css_power_down_common(target_state); 155 /* ask the GIC not to wake us up */ 156 plat_arm_gic_redistif_off(); 157 css_scp_off(target_state); 158 } 159 160 /******************************************************************************* 161 * Handler called when a power domain is about to be suspended. The 162 * target_state encodes the power state that each level should transition to. 163 ******************************************************************************/ 164 void css_pwr_domain_suspend(const psci_power_state_t *target_state) 165 { 166 /* 167 * CSS currently supports retention only at cpu level. Just return 168 * as nothing is to be done for retention. 169 */ 170 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) 171 return; 172 173 174 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF); 175 css_power_down_common(target_state); 176 177 /* Perform system domain state saving if issuing system suspend */ 178 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) { 179 arm_system_pwr_domain_save(); 180 181 /* Power off the Redistributor after having saved its context */ 182 plat_arm_gic_redistif_off(); 183 } 184 185 css_scp_suspend(target_state); 186 } 187 188 /******************************************************************************* 189 * Handler called when a power domain has just been powered on after 190 * having been suspended earlier. The target_state encodes the low power state 191 * that each level has woken up from. 192 * TODO: At the moment we reuse the on finisher and reinitialize the secure 193 * context. Need to implement a separate suspend finisher. 194 ******************************************************************************/ 195 void css_pwr_domain_suspend_finish( 196 const psci_power_state_t *target_state) 197 { 198 /* Return as nothing is to be done on waking up from retention. */ 199 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) 200 return; 201 202 /* Perform system domain restore if woken up from system suspend */ 203 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) 204 /* 205 * At this point, the Distributor must be powered on to be ready 206 * to have its state restored. The Redistributor will be powered 207 * on as part of gicv3_rdistif_init_restore. 208 */ 209 arm_system_pwr_domain_resume(); 210 211 css_pwr_domain_on_finisher_common(target_state); 212 213 /* Enable the gic cpu interface */ 214 plat_arm_gic_cpuif_enable(); 215 } 216 217 /******************************************************************************* 218 * Handlers to shutdown/reboot the system 219 ******************************************************************************/ 220 void css_system_off(void) 221 { 222 css_scp_sys_shutdown(); 223 } 224 225 void css_system_reset(void) 226 { 227 css_scp_sys_reboot(); 228 } 229 230 /******************************************************************************* 231 * Handler called when the CPU power domain is about to enter standby. 232 ******************************************************************************/ 233 void css_cpu_standby(plat_local_state_t cpu_state) 234 { 235 unsigned int scr; 236 237 assert(cpu_state == ARM_LOCAL_STATE_RET); 238 239 scr = read_scr_el3(); 240 /* 241 * Enable the Non secure interrupt to wake the CPU. 242 * In GICv3 affinity routing mode, the non secure group1 interrupts use 243 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ. 244 * Enabling both the bits works for both GICv2 mode and GICv3 affinity 245 * routing mode. 246 */ 247 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); 248 isb(); 249 dsb(); 250 wfi(); 251 252 /* 253 * Restore SCR to the original value, synchronisation of scr_el3 is 254 * done by eret while el3_exit to save some execution cycles. 255 */ 256 write_scr_el3(scr); 257 } 258 259 /******************************************************************************* 260 * Handler called to return the 'req_state' for system suspend. 261 ******************************************************************************/ 262 void css_get_sys_suspend_power_state(psci_power_state_t *req_state) 263 { 264 unsigned int i; 265 266 /* 267 * System Suspend is supported only if the system power domain node 268 * is implemented. 269 */ 270 assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL); 271 272 for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) 273 req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF; 274 } 275 276 /******************************************************************************* 277 * Handler to query CPU/cluster power states from SCP 278 ******************************************************************************/ 279 int css_node_hw_state(u_register_t mpidr, unsigned int power_level) 280 { 281 return css_scp_get_power_state(mpidr, power_level); 282 } 283 284 /* 285 * The system power domain suspend is only supported only via 286 * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain 287 * will be downgraded to the lower level. 288 */ 289 static int css_validate_power_state(unsigned int power_state, 290 psci_power_state_t *req_state) 291 { 292 int rc; 293 rc = arm_validate_power_state(power_state, req_state); 294 295 /* 296 * Ensure that we don't overrun the pwr_domain_state array in the case 297 * where the platform supported max power level is less than the system 298 * power level 299 */ 300 301 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) 302 303 /* 304 * Ensure that the system power domain level is never suspended 305 * via PSCI CPU SUSPEND API. Currently system suspend is only 306 * supported via PSCI SYSTEM SUSPEND API. 307 */ 308 309 req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = 310 ARM_LOCAL_STATE_RUN; 311 #endif 312 313 return rc; 314 } 315 316 /* 317 * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the 318 * `css_validate_power_state`, we do not downgrade the system power 319 * domain level request in `power_state` as it will be used to query the 320 * PSCI_STAT_COUNT/RESIDENCY at the system power domain level. 321 */ 322 static int css_translate_power_state_by_mpidr(u_register_t mpidr, 323 unsigned int power_state, 324 psci_power_state_t *output_state) 325 { 326 return arm_validate_power_state(power_state, output_state); 327 } 328 329 /* 330 * Setup the SGI interrupt that will be used trigger the execution of power 331 * down sequence for all the secondary cores. This interrupt is setup to be 332 * handled in EL3 context at a priority defined by the platform. 333 */ 334 void css_setup_cpu_pwr_down_intr(void) 335 { 336 #if CSS_SYSTEM_GRACEFUL_RESET 337 plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3); 338 plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR, 339 PLAT_REBOOT_PRI); 340 plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR); 341 #endif 342 } 343 344 /* 345 * For a graceful shutdown/reboot, each CPU in the system should do their power 346 * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an 347 * opportunity to do the powerdown sequence. To achieve graceful reset, of all 348 * cores in the system, the CPU gets the opportunity raise warm reboot SGI to 349 * rest of the CPUs which are online. Add handler for the reboot SGI where the 350 * rest of the CPU execute the powerdown sequence. 351 */ 352 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags, 353 void *handle, void *cookie) 354 { 355 assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR); 356 357 /* Deactivate warm reboot SGI */ 358 plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR); 359 360 /* 361 * Disable GIC CPU interface to prevent pending interrupt from waking 362 * up the AP from WFI. 363 */ 364 plat_arm_gic_cpuif_disable(); 365 plat_arm_gic_redistif_off(); 366 367 psci_pwrdown_cpu_start(PLAT_MAX_PWR_LVL); 368 369 psci_pwrdown_cpu_end_terminal(); 370 return 0; 371 } 372 373 /******************************************************************************* 374 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard 375 * platform will take care of registering the handlers with PSCI. 376 ******************************************************************************/ 377 plat_psci_ops_t plat_arm_psci_pm_ops = { 378 .pwr_domain_on = css_pwr_domain_on, 379 .pwr_domain_on_finish = css_pwr_domain_on_finish, 380 .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late, 381 .pwr_domain_off = css_pwr_domain_off, 382 .cpu_standby = css_cpu_standby, 383 .pwr_domain_suspend = css_pwr_domain_suspend, 384 .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, 385 .system_off = css_system_off, 386 .system_reset = css_system_reset, 387 .validate_power_state = css_validate_power_state, 388 .validate_ns_entrypoint = arm_validate_psci_entrypoint, 389 .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr, 390 .get_node_hw_state = css_node_hw_state, 391 .get_sys_suspend_power_state = css_get_sys_suspend_power_state, 392 393 #if defined(PLAT_ARM_MEM_PROT_ADDR) 394 .mem_protect_chk = arm_psci_mem_protect_chk, 395 .read_mem_protect = arm_psci_read_mem_protect, 396 .write_mem_protect = arm_nor_psci_write_mem_protect, 397 #endif 398 #if CSS_USE_SCMI_SDS_DRIVER 399 .system_reset2 = css_system_reset2, 400 #endif 401 }; 402