xref: /rk3399_ARM-atf/plat/imx/imx9/common/imx9_psci_common.c (revision 04cf04c72d403e0c057505882fac9002d39d4102)
1 /*
2  * Copyright 2025 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdbool.h>
8 
9 #include "../drivers/arm/gic/v3/gicv3_private.h"
10 
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/scmi.h>
15 #include <drivers/arm/gicv3.h>
16 #include <lib/psci/psci.h>
17 #include <scmi_imx9.h>
18 
19 #include <imx9_psci_common.h>
20 #include <imx9_sys_sleep.h>
21 #include <imx_scmi_client.h>
22 #include <plat_imx8.h>
23 
24 /* platform secure warm boot entry */
25 uintptr_t secure_entrypoint;
26 
27 int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
28 {
29 	/* The non-secure entrypoint should be in RAM space */
30 	if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) {
31 		return PSCI_E_INVALID_PARAMS;
32 	}
33 
34 	return PSCI_E_SUCCESS;
35 }
36 
37 int imx_validate_power_state(uint32_t power_state,
38 			     psci_power_state_t *req_state)
39 {
40 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
41 	int pwr_type = psci_get_pstate_type(power_state);
42 	int state_id = psci_get_pstate_id(power_state);
43 
44 	if (pwr_lvl > PLAT_MAX_PWR_LVL) {
45 		return PSCI_E_INVALID_PARAMS;
46 	}
47 
48 	if (pwr_type == PSTATE_TYPE_STANDBY) {
49 		CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
50 		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
51 	}
52 
53 	if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) {
54 		CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
55 		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
56 	}
57 
58 	return PSCI_E_SUCCESS;
59 }
60 
61 void imx_set_cpu_boot_entry(uint32_t core_id, uint64_t boot_entry,
62 			    uint32_t flag)
63 {
64 	scmi_core_set_reset_addr(imx9_scmi_handle,
65 				 boot_entry,
66 				 SCMI_CPU_A55_ID(core_id),
67 				 flag);
68 }
69 
70 int imx_pwr_domain_on(u_register_t mpidr)
71 {
72 	uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr);
73 	uint32_t mask = DEBUG_WAKEUP_MASK | EVENT_WAKEUP_MASK;
74 
75 	imx_set_cpu_boot_entry(core_id,
76 			       secure_entrypoint,
77 			       SCMI_CPU_VEC_FLAGS_BOOT);
78 
79 	scmi_core_start(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id));
80 
81 	/*
82 	 * Set NON-IRQ wakeup mask to Disable wakeup on DEBUG_WAKEUP
83 	 */
84 	scmi_core_nonIrq_wake_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 0U, 1U, mask);
85 
86 	/* Set the default LPM state for cpuidle. */
87 	struct scmi_lpm_config cpu_lpm_cfg = {
88 		SCMI_CPU_A55_PD(core_id),
89 		SCMI_CPU_PD_LPM_ON_RUN,
90 		0U
91 	};
92 
93 	scmi_core_lpm_mode_set(imx9_scmi_handle,
94 			       SCMI_CPU_A55_ID(core_id),
95 			       1U, &cpu_lpm_cfg);
96 
97 	return PSCI_E_SUCCESS;
98 }
99 
100 void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
101 {
102 	uint64_t mpidr = read_mpidr_el1();
103 	uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr);
104 
105 	scmi_core_set_sleep_mode(imx9_scmi_handle,
106 				 SCMI_CPU_A55_ID(core_id),
107 				 SCMI_GIC_WAKEUP,
108 				 SCMI_CPU_SLEEP_WAIT);
109 }
110 
111 void imx_pwr_domain_off(const psci_power_state_t *target_state)
112 {
113 	uint64_t mpidr = read_mpidr_el1();
114 	uint32_t core_id = MPIDR_AFFLVL1_VAL(mpidr);
115 
116 	/* Ensure the cluster can be powered off. */
117 	write_clusterpwrdn(DSU_CLUSTER_PWR_OFF);
118 
119 	/* Configure core LPM state for hotplug. */
120 	struct scmi_lpm_config cpu_lpm_cfg = {
121 		SCMI_CPU_A55_PD(core_id),
122 		SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP,
123 		0U
124 	};
125 	/* Set the default LPM state for cpuidle */
126 	scmi_core_lpm_mode_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id),
127 				1U, &cpu_lpm_cfg);
128 
129 	/*
130 	 * Mask all the GPC IRQ wakeup to make sure no IRQ can wakeup this core
131 	 * so we need to use SW_WAKEUP for hotplug purpose
132 	 */
133 	scmi_core_Irq_wake_set(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id), 0U,
134 			       IMR_NUM, mask_all);
135 
136 	scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id),
137 				 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND);
138 }
139 
140 void imx_pwr_domain_suspend(const psci_power_state_t *target_state)
141 {
142 	uint64_t mpidr = read_mpidr_el1();
143 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
144 	uint32_t sys_mode;
145 
146 	/* do cpu level config */
147 	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
148 		imx_set_cpu_boot_entry(core_id, secure_entrypoint, SCMI_CPU_VEC_FLAGS_RESUME);
149 	}
150 
151 	/* config DSU for cluster power down */
152 	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
153 		/* L3 retention */
154 		if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) {
155 			write_clusterpwrdn(DSU_CLUSTER_PWR_OFF | BIT(1));
156 		} else {
157 			write_clusterpwrdn(DSU_CLUSTER_PWR_OFF);
158 		}
159 	}
160 
161 	if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
162 		imx9_sys_sleep_prepare(core_id);
163 
164 		/* switch to GPC wakeup source, config the target mode to SUSPEND */
165 		scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id),
166 					 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND);
167 
168 		scmi_core_set_sleep_mode(imx9_scmi_handle, IMX9_SCMI_CPU_A55P,
169 					 SCMI_GPC_WAKEUP, SCMI_CPU_SLEEP_SUSPEND);
170 
171 		struct scmi_lpm_config cpu_lpm_cfg[] = {
172 			{
173 				SCMI_PWR_MIX_SLICE_IDX_A55P,
174 				SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP,
175 				0U
176 			},
177 			{
178 				SCMI_PWR_MIX_SLICE_IDX_NOC,
179 				SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP,
180 				BIT_32(SCMI_PWR_MEM_SLICE_IDX_NOC_OCRAM)
181 			},
182 			{
183 				SCMI_PWR_MIX_SLICE_IDX_WAKEUP,
184 				keep_wakeupmix_on ? SCMI_CPU_PD_LPM_ON_ALWAYS :
185 					SCMI_CPU_PD_LPM_ON_RUN_WAIT_STOP,
186 				0U
187 			}
188 		};
189 
190 		/* Set the default LPM state for suspend */
191 		scmi_core_lpm_mode_set(imx9_scmi_handle,
192 				       IMX9_SCMI_CPU_A55P,
193 				       ARRAY_SIZE(cpu_lpm_cfg),
194 				       cpu_lpm_cfg);
195 
196 		/* Set the system sleep config */
197 		sys_mode = SCMI_IMX_SYS_POWER_STATE_MODE_MASK;
198 		if (has_netc_irq) {
199 			sys_mode |= SYS_SLEEP_MODE_H(SM_PERF_LVL_LOW);
200 			scmi_sys_pwr_state_set(imx9_scmi_handle,
201 					       SCMI_SYS_PWR_FORCEFUL_REQ,
202 					       sys_mode);
203 		}
204 	}
205 
206 }
207 void imx_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
208 {
209 	uint64_t mpidr = read_mpidr_el1();
210 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
211 	uint32_t sys_mode;
212 
213 	/* system level */
214 	if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
215 		imx9_sys_sleep_unprepare(core_id);
216 
217 		sys_mode = SCMI_IMX_SYS_POWER_STATE_MODE_MASK;
218 		if (has_netc_irq) {
219 			scmi_sys_pwr_state_set(imx9_scmi_handle,
220 					       SCMI_SYS_PWR_FORCEFUL_REQ,
221 					       sys_mode);
222 		}
223 
224 		/* switch to GIC wakeup source, config the target mode to WAIT */
225 		scmi_core_set_sleep_mode(imx9_scmi_handle, SCMI_CPU_A55_ID(core_id),
226 					 SCMI_GIC_WAKEUP, SCMI_CPU_SLEEP_WAIT);
227 
228 		scmi_core_set_sleep_mode(imx9_scmi_handle, IMX9_SCMI_CPU_A55P,
229 					 SCMI_GIC_WAKEUP, SCMI_CPU_SLEEP_WAIT);
230 
231 		struct scmi_lpm_config cpu_lpm_cfg[] = {
232 			{
233 				SCMI_PWR_MIX_SLICE_IDX_A55P,
234 				SCMI_CPU_PD_LPM_ON_RUN,
235 				BIT_32(SCMI_PWR_MEM_SLICE_IDX_A55L3)
236 			},
237 			{
238 				SCMI_PWR_MIX_SLICE_IDX_NOC,
239 				SCMI_CPU_PD_LPM_ON_ALWAYS,
240 				0U
241 			},
242 			{
243 				SCMI_PWR_MIX_SLICE_IDX_WAKEUP,
244 				SCMI_CPU_PD_LPM_ON_ALWAYS,
245 				0U
246 			}
247 		};
248 
249 		/* Set the default LPM state for RUN MODE */
250 		scmi_core_lpm_mode_set(imx9_scmi_handle,
251 				       IMX9_SCMI_CPU_A55P,
252 				       ARRAY_SIZE(cpu_lpm_cfg),
253 				       cpu_lpm_cfg);
254 	}
255 }
256 
257 void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
258 {
259 	uint32_t i;
260 
261 	for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) {
262 		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
263 	}
264 }
265 
266 void imx_pwr_domain_pwr_down(const psci_power_state_t *target_state)
267 {
268 }
269 
270 void imx_system_off(void)
271 {
272 	int ret;
273 
274 	ret = scmi_sys_pwr_state_set(imx9_scmi_handle,
275 				     SCMI_SYS_PWR_FORCEFUL_REQ,
276 				     SCMI_SYS_PWR_SHUTDOWN);
277 	if (ret) {
278 		NOTICE("%s failed: %d\n", __func__, ret);
279 	}
280 }
281 
282 void imx_system_reset(void)
283 {
284 	int ret;
285 
286 	/* TODO: temp workaround for GIC to let reset done */
287 	gicd_clr_ctlr(PLAT_GICD_BASE,
288 		      CTLR_ENABLE_G0_BIT |
289 		      CTLR_ENABLE_G1S_BIT |
290 		      CTLR_ENABLE_G1NS_BIT,
291 		      RWP_TRUE);
292 
293 	/* Force: work, Gracefull: not work */
294 	ret = scmi_sys_pwr_state_set(imx9_scmi_handle,
295 				     SCMI_SYS_PWR_FORCEFUL_REQ,
296 				     SCMI_SYS_PWR_COLD_RESET);
297 	if (ret) {
298 		VERBOSE("%s failed: %d\n", __func__, ret);
299 	}
300 }
301