xref: /rk3399_ARM-atf/plat/mediatek/lib/pm/armv9_0/pwr_ctrl.c (revision e7be9243d071b37d13d826824ec4bb8c8b39caa2)
1 /*
2  * Copyright (c) 2025, Mediatek Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #ifdef CONFIG_MTK_BOOKER
11 #include <drivers/booker.h>
12 #endif
13 
14 #include <common/debug.h>
15 #include <drivers/arm/gicv3.h>
16 #include <drivers/console.h>
17 #include <lib/psci/psci.h>
18 #include <lib/utils.h>
19 #include <plat/arm/common/plat_arm.h>
20 #include <plat/common/platform.h>
21 #include <platform_def.h>
22 
23 #include <lib/mtk_init/mtk_init.h>
24 #include <lib/pm/mtk_pm.h>
25 #ifdef MTK_PUBEVENT_ENABLE
26 #include <vendor_pubsub_events.h>
27 #endif
28 
29 #define IS_AFFLV_PUBEVENT(_pstate) \
30 	(_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | \
31 		    MT_CPUPM_PWR_DOMAIN_CLUSTER))
32 
33 #ifdef MTK_PUBEVENT_ENABLE
34 #define MT_CPUPM_EVENT_PWR_ON(x) ({ \
35 	PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, \
36 			  (const void *)(x)); })
37 
38 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
39 	PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, \
40 			  (const void *)(x)); })
41 
42 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
43 	PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, \
44 			  (const void *)(x)); })
45 
46 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
47 	PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, \
48 			  (const void *)(x)); })
49 
50 #else
51 #define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
52 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
53 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
54 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
55 #endif
56 
57 /*
58  * The cpu require to cluster power stattus
59  * [0] : The cpu require cluster power down
60  * [1] : The cpu require cluster power on
61  */
62 #define coordinate_cluster(onoff) \
63 	write_clusterpwrdn_el1(onoff)
64 #define coordinate_cluster_pwron() \
65 	coordinate_cluster(1)
66 #define coordinate_cluster_pwroff() \
67 	coordinate_cluster(0)
68 
69 /* default enable all function */
70 #define MTK_CPU_PWR_FN_MASK_DEFAULT	(0)
71 
72 struct mtk_cpu_pwr_ctrl {
73 	unsigned int fn_mask;
74 	struct mtk_cpu_pm_ops *ops;
75 	struct mtk_cpu_smp_ops *smp;
76 };
77 
78 static struct mtk_cpu_pwr_ctrl imtk_cpu_pwr = {
79 	.fn_mask = MTK_CPU_PWR_FN_MASK_DEFAULT,
80 	.ops = NULL,
81 };
82 
83 #define IS_CPUIDLE_FN_ENABLE(x)	(imtk_cpu_pwr.ops && (imtk_cpu_pwr.fn_mask & (x)))
84 #define IS_CPUSMP_FN_ENABLE(x)	(imtk_cpu_pwr.smp && (imtk_cpu_pwr.fn_mask & (x)))
85 
86 /* per-cpu power state */
87 static unsigned int cpu_power_state[PLATFORM_CORE_COUNT];
88 
89 #define get_pwr_stateid(cpu) \
90 	psci_get_pstate_id(cpu_power_state[cpu])
91 
92 #define GET_MEDIATEK_PSTATE(_domain, _psci_state, _state) ({ \
93 	int mret = 0; \
94 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) \
95 		mret = imtk_cpu_pwr.ops->get_pstate( \
96 			_domain, _psci_state, _state); \
97 	mret; })
98 
99 static inline unsigned int get_pwr_afflv(const psci_power_state_t *state)
100 {
101 	for (int i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
102 		if (is_local_state_run(state->pwr_domain_state[i]) == 0)
103 			return (unsigned int) i;
104 	}
105 
106 	return PSCI_INVALID_PWR_LVL;
107 }
108 
109 static void mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
110 {
111 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS))
112 		imtk_cpu_pwr.ops->mcusys_resume(state);
113 }
114 
115 static void mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
116 {
117 #ifdef CONFIG_MTK_BOOKER
118 	booker_flush();
119 #endif
120 
121 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS))
122 		imtk_cpu_pwr.ops->mcusys_suspend(state);
123 
124 }
125 
126 static void cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
127 {
128 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER))
129 		imtk_cpu_pwr.ops->cluster_resume(state);
130 }
131 
132 static void cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
133 {
134 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER))
135 		imtk_cpu_pwr.ops->cluster_suspend(state);
136 }
137 
138 static void cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state,
139 				      unsigned int pstate)
140 {
141 	coordinate_cluster_pwron();
142 
143 	gicv3_rdistif_init(plat_my_core_pos());
144 	gicv3_cpuif_enable(plat_my_core_pos());
145 }
146 
147 static void cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state,
148 				       unsigned int pstate)
149 {
150 	if (pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU)
151 		coordinate_cluster_pwroff();
152 }
153 
154 static void cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state,
155 				   unsigned int pstate)
156 {
157 	cpu_pwr_on_common(state, pstate);
158 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE))
159 		imtk_cpu_pwr.ops->cpu_resume(state);
160 }
161 
162 static void cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state,
163 				    unsigned int pstate)
164 {
165 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE))
166 		imtk_cpu_pwr.ops->cpu_suspend(state);
167 	cpu_pwr_dwn_common(state, pstate);
168 }
169 
170 static void cpu_pwr_on(const struct mtk_cpupm_pwrstate *state,
171 			       unsigned int pstate)
172 {
173 	cpu_pwr_on_common(state, pstate);
174 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON))
175 		imtk_cpu_pwr.smp->cpu_on(state);
176 }
177 
178 static void cpu_pwr_off(const struct mtk_cpupm_pwrstate *state,
179 				unsigned int pstate)
180 {
181 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF))
182 		imtk_cpu_pwr.smp->cpu_off(state);
183 	cpu_pwr_dwn_common(state, pstate);
184 }
185 
186 static int power_domain_on(u_register_t mpidr)
187 {
188 	int ret = PSCI_E_SUCCESS;
189 	int cpu = plat_core_pos_by_mpidr(mpidr);
190 	uintptr_t entry = plat_pm_get_warm_entry();
191 
192 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
193 		int b_ret = MTK_CPUPM_E_FAIL;
194 
195 		b_ret = imtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry);
196 
197 		if (b_ret)
198 			ret = PSCI_E_DENIED;
199 	}
200 	INFO("CPU %u power domain prepare on\n", cpu);
201 	return ret;
202 }
203 
204 static void power_domain_on_finish(const psci_power_state_t *state)
205 {
206 	struct mt_cpupm_event_data nb;
207 	unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
208 			       MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
209 	struct mtk_cpupm_pwrstate pm_state = {
210 		.info = {
211 			.cpuid = plat_my_core_pos(),
212 			.mode = MTK_CPU_PM_SMP,
213 		},
214 		.pwr = {
215 			.afflv = get_pwr_afflv(state),
216 			.state_id = 0x0,
217 		},
218 	};
219 
220 	cpu_pwr_on(&pm_state, pstate);
221 
222 	nb.cpuid = pm_state.info.cpuid;
223 	nb.pwr_domain = pstate;
224 	MT_CPUPM_EVENT_PWR_ON(&nb);
225 	INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
226 }
227 
228 static void power_domain_off(const psci_power_state_t *state)
229 {
230 	struct mt_cpupm_event_data nb;
231 	unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
232 			      MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
233 	struct mtk_cpupm_pwrstate pm_state = {
234 		.info = {
235 			.cpuid = plat_my_core_pos(),
236 			.mode = MTK_CPU_PM_SMP,
237 		},
238 		.pwr = {
239 			.afflv = get_pwr_afflv(state),
240 			.state_id = 0x0,
241 		},
242 	};
243 
244 	cpu_pwr_off(&pm_state, pstate);
245 
246 	gicv3_rdistif_off(plat_my_core_pos());
247 
248 	nb.cpuid = pm_state.info.cpuid;
249 	nb.pwr_domain = pstate;
250 	MT_CPUPM_EVENT_PWR_OFF(&nb);
251 
252 	INFO("CPU %u power domain off\n", pm_state.info.cpuid);
253 }
254 
255 static void power_domain_suspend(const psci_power_state_t *state)
256 {
257 	unsigned int pstate = 0;
258 	struct mt_cpupm_event_data nb;
259 	struct mtk_cpupm_pwrstate pm_state = {
260 		.info = {
261 			.cpuid = plat_my_core_pos(),
262 			.mode = MTK_CPU_PM_CPUIDLE,
263 		},
264 	};
265 
266 	pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
267 	pm_state.pwr.afflv = get_pwr_afflv(state);
268 	pm_state.pwr.raw = state;
269 
270 	pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_OFF,
271 		cpu_power_state[pm_state.info.cpuid], &pm_state);
272 
273 	cpu_pwr_suspend(&pm_state, pstate);
274 
275 	if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
276 		cluster_pwr_dwn_common(&pm_state);
277 
278 	if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
279 		mcusys_pwr_dwn_common(&pm_state);
280 
281 	gicv3_rdistif_off(plat_my_core_pos());
282 
283 	nb.cpuid = pm_state.info.cpuid;
284 	nb.pwr_domain = pstate;
285 	MT_CPUPM_EVENT_PWR_OFF(&nb);
286 
287 	if (IS_AFFLV_PUBEVENT(pstate))
288 		MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
289 }
290 
291 static void power_domain_suspend_finish(const psci_power_state_t *state)
292 {
293 	unsigned int pstate = 0;
294 	struct mt_cpupm_event_data nb;
295 	struct mtk_cpupm_pwrstate pm_state = {
296 		.info = {
297 			.cpuid = plat_my_core_pos(),
298 			.mode = MTK_CPU_PM_CPUIDLE,
299 		},
300 	};
301 
302 	pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
303 	pm_state.pwr.afflv = get_pwr_afflv(state);
304 	pm_state.pwr.raw = state;
305 
306 	pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_ON,
307 			cpu_power_state[pm_state.info.cpuid], &pm_state);
308 
309 	if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
310 		mcusys_pwr_on_common(&pm_state);
311 
312 	if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
313 		cluster_pwr_on_common(&pm_state);
314 
315 	cpu_pwr_resume(&pm_state, pstate);
316 
317 	nb.cpuid = pm_state.info.cpuid;
318 	nb.pwr_domain = pstate;
319 	MT_CPUPM_EVENT_PWR_ON(&nb);
320 
321 	if (IS_AFFLV_PUBEVENT(pstate))
322 		MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
323 }
324 
325 static int validate_power_state(unsigned int power_state,
326 					psci_power_state_t *req_state)
327 {
328 	int i;
329 	unsigned int pstate = psci_get_pstate_type(power_state);
330 	int aff_lvl = psci_get_pstate_pwrlvl(power_state);
331 	unsigned int my_core_pos = plat_my_core_pos();
332 
333 	if (!imtk_cpu_pwr.ops)
334 		return PSCI_E_INVALID_PARAMS;
335 
336 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
337 		int ret = MTK_CPUPM_E_FAIL;
338 
339 		ret = imtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate);
340 		if (ret)
341 			return PSCI_E_INVALID_PARAMS;
342 	}
343 
344 	if (pstate == PSTATE_TYPE_STANDBY)
345 		req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
346 	else {
347 		for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++)
348 			req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
349 	}
350 	cpu_power_state[my_core_pos] = power_state;
351 	return PSCI_E_SUCCESS;
352 }
353 
354 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
355 /* Mediatek PSCI power domain */
356 static void get_sys_suspend_power_state(psci_power_state_t *req_state)
357 {
358 	int lv = 0;
359 	unsigned int my_core_pos = plat_my_core_pos();
360 
361 	for (lv = PSCI_CPU_PWR_LVL; lv <= PLAT_MAX_PWR_LVL; lv++)
362 		req_state->pwr_domain_state[lv] = PLAT_MAX_OFF_STATE;
363 
364 	cpu_power_state[my_core_pos] = psci_make_powerstate(
365 						MT_PLAT_PWR_STATE_SUSPEND,
366 						PSTATE_TYPE_POWERDOWN,
367 						PLAT_MT_SYSTEM_SUSPEND);
368 
369 	flush_dcache_range((uintptr_t)&cpu_power_state[my_core_pos],
370 			   sizeof(cpu_power_state[my_core_pos]));
371 }
372 #endif
373 
374 static void __dead2 pwr_domain_pwr_down_wfi(const psci_power_state_t *req_state)
375 {
376 	unsigned int cpu = plat_my_core_pos();
377 	int ret = MTK_CPUPM_E_NOT_SUPPORT;
378 
379 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_DOMAIN_POWER_DOWN_WFI))
380 		ret = imtk_cpu_pwr.ops->pwr_domain_pwr_down_wfi(cpu);
381 	if (ret == MTK_CPUPM_E_OK)
382 		plat_panic_handler();
383 	else
384 		psci_power_down_wfi();
385 }
386 
387 static void pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
388 {
389 	if (entry_point == 0) {
390 		ERROR("%s, warm_entry_point is null\n", __func__);
391 		panic();
392 	}
393 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT))
394 		imtk_cpu_pwr.smp->init(cpu_id, entry_point);
395 	INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
396 }
397 
398 static struct plat_pm_pwr_ctrl armv9_0_pwr_ops = {
399 	.pwr_domain_suspend = power_domain_suspend,
400 	.pwr_domain_suspend_finish = power_domain_suspend_finish,
401 	.validate_power_state = validate_power_state,
402 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
403 	.get_sys_suspend_power_state = get_sys_suspend_power_state,
404 #endif
405 	.pwr_domain_pwr_down_wfi = pwr_domain_pwr_down_wfi,
406 };
407 
408 struct plat_pm_smp_ctrl armv9_0_smp_ops = {
409 	.init = pm_smp_init,
410 	.pwr_domain_on = power_domain_on,
411 	.pwr_domain_off = power_domain_off,
412 	.pwr_domain_on_finish = power_domain_on_finish,
413 };
414 
415 #define ISSUE_CPU_PM_REG_FAIL(_success) ({ \
416 	_success = 0; assert(0); })
417 
418 #define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _cond_ex, _result, _flag) ({ \
419 	if ((_fns & _id)) { \
420 		if (_ops->_func && _cond_ex) \
421 			_flag |= _id; \
422 		else { \
423 			ISSUE_CPU_PM_REG_FAIL(_result); \
424 		} \
425 	} }) \
426 
427 int plat_pm_invoke_func(enum mtk_cpu_pm_mode mode, unsigned int id, void *priv)
428 {
429 	int ret = MTK_CPUPM_E_ERR;
430 
431 	if ((mode == MTK_CPU_PM_CPUIDLE) && imtk_cpu_pwr.ops &&
432 	    imtk_cpu_pwr.ops->invoke)
433 		ret = imtk_cpu_pwr.ops->invoke(id, priv);
434 	else if ((mode == MTK_CPU_PM_SMP) &&
435 		 imtk_cpu_pwr.smp &&
436 		 imtk_cpu_pwr.smp->invoke)
437 		ret = imtk_cpu_pwr.smp->invoke(id, priv);
438 
439 	return ret;
440 }
441 
442 int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops)
443 {
444 	int success = 1;
445 	unsigned int fns = 0;
446 
447 	if (!ops || imtk_cpu_pwr.ops) {
448 		ERROR("[%s:%d] register cpu_pm fail !!\n", __FILE__, __LINE__);
449 		return MTK_CPUPM_E_ERR;
450 	}
451 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CORE,
452 			cpu_resume, 1, success, fns);
453 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CORE,
454 			cpu_suspend, 1, success, fns);
455 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CLUSTER,
456 			cluster_resume, 1, success, fns);
457 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CLUSTER,
458 			cluster_suspend, 1, success, fns);
459 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_MCUSYS,
460 			mcusys_resume, 1,
461 			success, fns);
462 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_MCUSYS,
463 			mcusys_suspend, 1, success, fns);
464 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_CPUPM_GET_PWR_STATE,
465 			get_pstate, 1, success, fns);
466 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_STATE_VALID,
467 			pwr_state_valid, 1, success, fns);
468 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_INIT,
469 			init, 1, success, fns);
470 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_DOMAIN_POWER_DOWN_WFI,
471 			pwr_domain_pwr_down_wfi, 1, success, fns);
472 	if (success) {
473 		imtk_cpu_pwr.ops = ops;
474 		imtk_cpu_pwr.fn_mask |= fns;
475 		plat_pm_ops_setup_pwr(&armv9_0_pwr_ops);
476 		INFO("[%s:%d] CPU pwr ops register success, support:0x%x\n",
477 					__func__, __LINE__, fns);
478 	} else {
479 		ERROR("[%s:%d] register cpu_pm ops fail !, fn:0x%x\n",
480 		      __func__, __LINE__, fn_flags);
481 		assert(0);
482 	}
483 	return MTK_CPUPM_E_OK;
484 }
485 
486 int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
487 {
488 	int success = 1;
489 	unsigned int fns = 0;
490 
491 	if (!ops || imtk_cpu_pwr.smp) {
492 		ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
493 		return MTK_CPUPM_E_ERR;
494 	}
495 
496 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
497 			init, 1, success, fns);
498 
499 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
500 			cpu_pwr_on_prepare, 1, success, fns);
501 
502 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
503 			cpu_on, 1, success, fns);
504 
505 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
506 			cpu_off, 1, success, fns);
507 
508 	if (success) {
509 		imtk_cpu_pwr.smp = ops;
510 		imtk_cpu_pwr.fn_mask |= fns;
511 		plat_pm_ops_setup_smp(&armv9_0_smp_ops);
512 		INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
513 		     __func__, __LINE__, fns);
514 	} else {
515 		ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
516 		      __func__, __LINE__, fn_flags);
517 		assert(0);
518 	}
519 	return MTK_CPUPM_E_OK;
520 }
521