1 /* 2 * Copyright 2025 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <stdbool.h> 8 9 #include "../drivers/arm/gic/v3/gicv3_private.h" 10 11 #include <arch.h> 12 #include <arch_helpers.h> 13 #include <common/debug.h> 14 #include <drivers/arm/css/scmi.h> 15 #include <drivers/arm/gicv3.h> 16 #include <lib/psci/psci.h> 17 #include <scmi_imx9.h> 18 19 #include <imx9_psci_common.h> 20 #include <imx9_sys_sleep.h> 21 #include <imx_scmi_client.h> 22 #include <plat_imx8.h> 23 24 #if SYS_PWR_FULL_CTRL == 1 25 #define PLAT_SCMI_SYS_PWR_SHUTDOWN IMX9_SCMI_SYS_PWR_FULL_SHUTDOWN 26 #define PLAT_SCMI_SYS_PWR_COLD_RESET IMX9_SCMI_SYS_PWR_FULL_RESET 27 #else 28 #define PLAT_SCMI_SYS_PWR_SHUTDOWN SCMI_SYS_PWR_SHUTDOWN 29 #define PLAT_SCMI_SYS_PWR_COLD_RESET SCMI_SYS_PWR_COLD_RESET 30 #endif 31 32 /* platform secure warm boot entry */ 33 uintptr_t secure_entrypoint; 34 35 int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint) 36 { 37 /* The non-secure entrypoint should be in RAM space */ 38 if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) { 39 return PSCI_E_INVALID_PARAMS; 40 } 41 42 return PSCI_E_SUCCESS; 43 } 44 45 int imx_validate_power_state(uint32_t power_state, 46 psci_power_state_t *req_state) 47 { 48 int pwr_lvl = psci_get_pstate_pwrlvl(power_state); 49 int pwr_type = psci_get_pstate_type(power_state); 50 int state_id = psci_get_pstate_id(power_state); 51 52 if (pwr_lvl > PLAT_MAX_PWR_LVL) { 53 return PSCI_E_INVALID_PARAMS; 54 } 55 56 if (pwr_type == PSTATE_TYPE_STANDBY) { 57 CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE; 58 CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; 59 } 60 61 if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) { 62 CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE; 63 CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE; 64 } 65 66 return PSCI_E_SUCCESS; 67 } 68 69 void imx_set_cpu_boot_entry(uint32_t core_id, uint64_t boot_entry, 70 uint32_t flag) 71 { 72 scmi_core_set_reset_addr(imx9_scmi_handle, 73 boot_entry, 74 SCMI_CPU_A55_ID(core_id), 75 flag); 76 } 77 78 int imx_pwr_domain_on(u_register_t mpidr) 79 { 80 uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr); 81 uint32_t mask = DEBUG_WAKEUP_MASK | EVENT_WAKEUP_MASK; 82 83 imx_set_cpu_boot_entry(core_id, 84 secure_entrypoint, 85 SCMI_CPU_VEC_FLAGS_BOOT); 86 87 scmi_core_start(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id)); 88 89 /* 90 * Set NON-IRQ wakeup mask to Disable wakeup on DEBUG_WAKEUP 91 */ 92 scmi_core_nonIrq_wake_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 0U, 1U, mask); 93 94 /* Set the default LPM state for cpuidle. */ 95 struct scmi_lpm_config cpu_lpm_cfg = { 96 SCMI_CPU_A55_PD(core_id), 97 SCMI_CPU_PD_LPM_ON_RUN, 98 0U 99 }; 100 101 scmi_core_lpm_mode_set(imx9_scmi_handle, 102 SCMI_CPU_A55_ID(core_id), 103 1U, &cpu_lpm_cfg); 104 105 return PSCI_E_SUCCESS; 106 } 107 108 void imx_pwr_domain_on_finish(const psci_power_state_t *target_state) 109 { 110 uint64_t mpidr = read_mpidr_el1(); 111 uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr); 112 113 scmi_core_set_sleep_mode(imx9_scmi_handle, 114 SCMI_CPU_A55_ID(core_id), 115 SCMI_GIC_WAKEUP, 116 SCMI_CPU_SLEEP_WAIT); 117 } 118 119 void imx_pwr_domain_off(const psci_power_state_t *target_state) 120 { 121 uint64_t mpidr = read_mpidr_el1(); 122 uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr); 123 124 /* Ensure the cluster can be powered off. */ 125 write_clusterpwrdn(DSU_CLUSTER_PWR_OFF); 126 127 /* Configure core LPM state for hotplug. */ 128 struct scmi_lpm_config cpu_lpm_cfg = { 129 SCMI_CPU_A55_PD(core_id), 130 SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP, 131 0U 132 }; 133 /* Set the default LPM state for cpuidle */ 134 scmi_core_lpm_mode_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 135 1U, &cpu_lpm_cfg); 136 137 /* 138 * Mask all the GPC IRQ wakeup to make sure no IRQ can wakeup this core 139 * so we need to use SW_WAKEUP for hotplug purpose 140 */ 141 scmi_core_Irq_wake_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 0U, 142 IMR_NUM, mask_all); 143 144 scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 145 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND); 146 } 147 148 void imx_pwr_domain_suspend(const psci_power_state_t *target_state) 149 { 150 uint64_t mpidr = read_mpidr_el1(); 151 unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr); 152 uint32_t sys_mode; 153 154 /* do cpu level config */ 155 if (is_local_state_off(CORE_PWR_STATE(target_state))) { 156 imx_set_cpu_boot_entry(core_id, secure_entrypoint, SCMI_CPU_VEC_FLAGS_RESUME); 157 } 158 159 /* config DSU for cluster power down */ 160 if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) { 161 /* L3 retention */ 162 if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) { 163 write_clusterpwrdn(DSU_CLUSTER_PWR_OFF | BIT(1)); 164 } else { 165 write_clusterpwrdn(DSU_CLUSTER_PWR_OFF); 166 } 167 } 168 169 if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) { 170 imx9_sys_sleep_prepare(core_id); 171 172 /* switch to GPC wakeup source, config the target mode to SUSPEND */ 173 scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 174 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND); 175 176 scmi_core_set_sleep_mode(imx9_scmi_handle, IMX9_SCMI_CPU_A55P, 177 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND); 178 179 struct scmi_lpm_config cpu_lpm_cfg[] = { 180 { 181 SCMI_PWR_MIX_SLICE_IDX_A55P, 182 SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP, 183 0U 184 }, 185 { 186 SCMI_PWR_MIX_SLICE_IDX_NOC, 187 SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP, 188 BIT_32(SCMI_PWR_MEM_SLICE_IDX_NOC_OCRAM) 189 }, 190 { 191 SCMI_PWR_MIX_SLICE_IDX_WAKEUP, 192 keep_wakeupmix_on ? SCMI_CPU_PD_LPM_ON_ALWAYS : 193 SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP, 194 0U 195 } 196 }; 197 198 /* Set the default LPM state for suspend */ 199 scmi_core_lpm_mode_set(imx9_scmi_handle, 200 IMX9_SCMI_CPU_A55P, 201 ARRAY_SIZE(cpu_lpm_cfg), 202 cpu_lpm_cfg); 203 204 /* Set the system sleep config */ 205 sys_mode = SCMI_IMX_SYS_POWER_STATE_MODE_MASK; 206 if (has_netc_irq) { 207 sys_mode |= SYS_SLEEP_MODE_H(SM_PERF_LVL_LOW); 208 scmi_sys_pwr_state_set(imx9_scmi_handle, 209 SCMI_SYS_PWR_FORCEFUL_REQ, 210 sys_mode); 211 } 212 } 213 214 } 215 void imx_pwr_domain_suspend_finish(const psci_power_state_t *target_state) 216 { 217 uint64_t mpidr = read_mpidr_el1(); 218 unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr); 219 uint32_t sys_mode; 220 221 /* system level */ 222 if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) { 223 imx9_sys_sleep_unprepare(core_id); 224 225 sys_mode = SCMI_IMX_SYS_POWER_STATE_MODE_MASK; 226 if (has_netc_irq) { 227 scmi_sys_pwr_state_set(imx9_scmi_handle, 228 SCMI_SYS_PWR_FORCEFUL_REQ, 229 sys_mode); 230 } 231 232 /* switch to GIC wakeup source, config the target mode to WAIT */ 233 scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 234 SCMI_GIC_WAKEUP, SCMI_CPU_SLEEP_WAIT); 235 236 scmi_core_set_sleep_mode(imx9_scmi_handle, IMX9_SCMI_CPU_A55P, 237 SCMI_GIC_WAKEUP, SCMI_CPU_SLEEP_WAIT); 238 239 struct scmi_lpm_config cpu_lpm_cfg[] = { 240 { 241 SCMI_PWR_MIX_SLICE_IDX_A55P, 242 SCMI_CPU_PD_LPM_ON_RUN, 243 BIT_32(SCMI_PWR_MEM_SLICE_IDX_A55L3) 244 }, 245 { 246 SCMI_PWR_MIX_SLICE_IDX_NOC, 247 SCMI_CPU_PD_LPM_ON_ALWAYS, 248 0U 249 }, 250 { 251 SCMI_PWR_MIX_SLICE_IDX_WAKEUP, 252 SCMI_CPU_PD_LPM_ON_ALWAYS, 253 0U 254 } 255 }; 256 257 /* Set the default LPM state for RUN MODE */ 258 scmi_core_lpm_mode_set(imx9_scmi_handle, 259 IMX9_SCMI_CPU_A55P, 260 ARRAY_SIZE(cpu_lpm_cfg), 261 cpu_lpm_cfg); 262 } 263 } 264 265 void imx_get_sys_suspend_power_state(psci_power_state_t *req_state) 266 { 267 uint32_t i; 268 269 for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) { 270 req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; 271 } 272 } 273 274 void imx_pwr_domain_pwr_down(const psci_power_state_t *target_state) 275 { 276 } 277 278 void imx_system_off(void) 279 { 280 int ret; 281 282 ret = scmi_sys_pwr_state_set(imx9_scmi_handle, 283 SCMI_SYS_PWR_FORCEFUL_REQ, 284 PLAT_SCMI_SYS_PWR_SHUTDOWN); 285 if (ret) { 286 NOTICE("%s failed: %d\n", __func__, ret); 287 } 288 } 289 290 void imx_system_reset(void) 291 { 292 int ret; 293 294 /* TODO: temp workaround for GIC to let reset done */ 295 gicd_clr_ctlr(PLAT_GICD_BASE, 296 CTLR_ENABLE_G0_BIT | 297 CTLR_ENABLE_G1S_BIT | 298 CTLR_ENABLE_G1NS_BIT, 299 RWP_TRUE); 300 301 /* Force: work, Gracefull: not work */ 302 ret = scmi_sys_pwr_state_set(imx9_scmi_handle, 303 SCMI_SYS_PWR_FORCEFUL_REQ, 304 PLAT_SCMI_SYS_PWR_COLD_RESET); 305 if (ret) { 306 VERBOSE("%s failed: %d\n", __func__, ret); 307 } 308 } 309