xref: /rk3399_ARM-atf/plat/mediatek/lib/pm/armv8_2/pwr_ctrl.c (revision 6dc5979a6cb2121e4c16e7bd62e24030e0f42755)
1 /*
2  * Copyright (c) 2022, Mediatek Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <common/debug.h>
11 #include <drivers/arm/gicv3.h>
12 #include <lib/psci/psci.h>
13 #include <lib/utils.h>
14 #ifdef MTK_PUBEVENT_ENABLE
15 #include <mtk_event/mtk_pubsub_events.h>
16 #endif
17 #include <plat/arm/common/plat_arm.h>
18 #include <plat/common/platform.h>
19 
20 #include <dfd.h>
21 #include <lib/mtk_init/mtk_init.h>
22 #include <lib/pm/mtk_pm.h>
23 #include <mt_gic_v3.h>
24 #include <platform_def.h>
25 
26 #define IS_AFFLV_PUBEVENT(_pstate) \
27 	((_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | MT_CPUPM_PWR_DOMAIN_CLUSTER)) != 0)
28 
29 #ifdef MTK_PUBEVENT_ENABLE
30 #define MT_CPUPM_EVENT_PWR_ON(x) ({ \
31 	PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, (const void *)(x)); })
32 
33 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
34 	PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, (const void *)(x)); })
35 
36 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
37 	PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, (const void *)(x)); })
38 
39 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
40 	PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, (const void *)(x)); })
41 
42 #else
43 #define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
44 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
45 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
46 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
47 #endif
48 
49 /*
50  * The cpu require to cluster power stattus
51  * [0] : The cpu require cluster power down
52  * [1] : The cpu require cluster power on
53  */
54 #define coordinate_cluster(onoff) write_clusterpwrdn_el1(onoff)
55 #define coordinate_cluster_pwron() coordinate_cluster(1)
56 #define coordinate_cluster_pwroff() coordinate_cluster(0)
57 
58 /* defaultly disable all functions */
59 #define MTK_CPUPM_FN_MASK_DEFAULT	(0)
60 
61 struct mtk_cpu_pwr_ctrl {
62 	unsigned int fn_mask;
63 	struct mtk_cpu_pm_ops *ops;
64 	struct mtk_cpu_smp_ops *smp;
65 };
66 
67 static struct mtk_cpu_pwr_ctrl mtk_cpu_pwr = {
68 	.fn_mask = MTK_CPUPM_FN_MASK_DEFAULT,
69 	.ops = NULL,
70 };
71 
72 #define IS_CPUIDLE_FN_ENABLE(x)	((mtk_cpu_pwr.ops != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
73 #define IS_CPUSMP_FN_ENABLE(x)	((mtk_cpu_pwr.smp != NULL) && ((mtk_cpu_pwr.fn_mask & x) != 0))
74 
75 /* per-cpu power state */
76 static unsigned int armv8_2_power_state[PLATFORM_CORE_COUNT];
77 
78 #define armv8_2_get_pwr_stateid(cpu) psci_get_pstate_id(armv8_2_power_state[cpu])
79 
80 static unsigned int get_mediatek_pstate(unsigned int domain, unsigned int psci_state,
81 					struct mtk_cpupm_pwrstate *state)
82 {
83 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) {
84 		return mtk_cpu_pwr.ops->get_pstate(domain, psci_state, state);
85 	}
86 
87 	return 0;
88 }
89 
90 unsigned int armv8_2_get_pwr_afflv(const psci_power_state_t *state_info)
91 {
92 	int i;
93 
94 	for (i = (int)PLAT_MAX_PWR_LVL; i >= (int)PSCI_CPU_PWR_LVL; i--) {
95 		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) {
96 			return (unsigned int) i;
97 		}
98 	}
99 
100 	return PSCI_INVALID_PWR_LVL;
101 }
102 
103 /* MediaTek mcusys power on control interface */
104 static void armv8_2_mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
105 {
106 	mt_gic_init();
107 	mt_gic_distif_restore();
108 	gic_sgi_restore_all();
109 
110 	dfd_resume();
111 
112 	/* Add code here that behavior before system enter mcusys'on */
113 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS)) {
114 		mtk_cpu_pwr.ops->mcusys_resume(state);
115 	}
116 }
117 
118 /* MediaTek mcusys power down control interface */
119 static void armv8_2_mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
120 {
121 	mt_gic_distif_save();
122 	gic_sgi_save_all();
123 
124 	/* Add code here that behaves before entering mcusys off */
125 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS)) {
126 		mtk_cpu_pwr.ops->mcusys_suspend(state);
127 	}
128 }
129 
130 /* MediaTek Cluster power on control interface */
131 static void armv8_2_cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
132 {
133 	/* Add code here that behavior before system enter cluster'on */
134 #if defined(MTK_CM_MGR) && !defined(MTK_FPGA_EARLY_PORTING)
135 	/* init cpu stall counter */
136 	init_cpu_stall_counter_all();
137 #endif
138 
139 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER)) {
140 		mtk_cpu_pwr.ops->cluster_resume(state);
141 	}
142 }
143 
144 /* MediaTek Cluster power down control interface */
145 static void armv8_2_cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
146 {
147 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER)) {
148 		mtk_cpu_pwr.ops->cluster_suspend(state);
149 	}
150 }
151 
152 /* MediaTek CPU power on control interface */
153 static void armv8_2_cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
154 {
155 	coordinate_cluster_pwron();
156 
157 	gicv3_rdistif_on(plat_my_core_pos());
158 	gicv3_cpuif_enable(plat_my_core_pos());
159 	mt_gic_rdistif_init();
160 
161 	/* If MCUSYS has been powered down then restore GIC redistributor for all CPUs. */
162 	if (IS_PLAT_SYSTEM_RETENTION(state->pwr.afflv)) {
163 		mt_gic_rdistif_restore_all();
164 	} else {
165 		mt_gic_rdistif_restore();
166 	}
167 }
168 
169 /* MediaTek CPU power down control interface */
170 static void armv8_2_cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
171 {
172 	if ((pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU) != 0) {
173 		coordinate_cluster_pwroff();
174 	}
175 
176 	mt_gic_rdistif_save();
177 	gicv3_cpuif_disable(plat_my_core_pos());
178 	gicv3_rdistif_off(plat_my_core_pos());
179 }
180 
181 static void armv8_2_cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
182 {
183 	armv8_2_cpu_pwr_on_common(state, pstate);
184 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE)) {
185 		mtk_cpu_pwr.ops->cpu_resume(state);
186 	}
187 }
188 
189 static void armv8_2_cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
190 {
191 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE)) {
192 		mtk_cpu_pwr.ops->cpu_suspend(state);
193 	}
194 	armv8_2_cpu_pwr_dwn_common(state, pstate);
195 }
196 
197 static void armv8_2_cpu_pwr_on(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
198 {
199 	armv8_2_cpu_pwr_on_common(state, pstate);
200 
201 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON)) {
202 		mtk_cpu_pwr.smp->cpu_on(state);
203 	}
204 }
205 
206 static void armv8_2_cpu_pwr_off(const struct mtk_cpupm_pwrstate *state, unsigned int pstate)
207 {
208 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF)) {
209 		mtk_cpu_pwr.smp->cpu_off(state);
210 	}
211 	armv8_2_cpu_pwr_dwn_common(state, pstate);
212 }
213 
214 /* MediaTek PSCI power domain */
215 static int armv8_2_power_domain_on(u_register_t mpidr)
216 {
217 	int ret = PSCI_E_SUCCESS;
218 	int cpu = plat_core_pos_by_mpidr(mpidr);
219 	uintptr_t entry = plat_pm_get_warm_entry();
220 
221 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
222 		if (mtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry) != 0) {
223 			ret = PSCI_E_DENIED;
224 		}
225 	}
226 	INFO("CPU %u power domain prepare on\n", cpu);
227 	return ret;
228 }
229 
230 /* MediaTek PSCI power domain */
231 static void armv8_2_power_domain_on_finish(const psci_power_state_t *state)
232 {
233 	struct mt_cpupm_event_data nb;
234 	unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
235 	struct mtk_cpupm_pwrstate pm_state = {
236 		.info = {
237 			.cpuid = plat_my_core_pos(),
238 			.mode = MTK_CPU_PM_SMP,
239 		},
240 		.pwr = {
241 			.afflv = armv8_2_get_pwr_afflv(state),
242 			.state_id = 0x0,
243 		},
244 	};
245 
246 	armv8_2_cpu_pwr_on(&pm_state, pstate);
247 
248 	nb.cpuid = pm_state.info.cpuid;
249 	nb.pwr_domain = pstate;
250 	MT_CPUPM_EVENT_PWR_ON(&nb);
251 
252 	INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
253 }
254 
255 /* MediaTek PSCI power domain */
256 static void armv8_2_power_domain_off(const psci_power_state_t *state)
257 {
258 	struct mt_cpupm_event_data nb;
259 	unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE | MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
260 	struct mtk_cpupm_pwrstate pm_state = {
261 		.info = {
262 			.cpuid = plat_my_core_pos(),
263 			.mode = MTK_CPU_PM_SMP,
264 		},
265 		.pwr = {
266 			.afflv = armv8_2_get_pwr_afflv(state),
267 			.state_id = 0x0,
268 		},
269 	};
270 	armv8_2_cpu_pwr_off(&pm_state, pstate);
271 
272 	nb.cpuid = pm_state.info.cpuid;
273 	nb.pwr_domain = pstate;
274 	MT_CPUPM_EVENT_PWR_OFF(&nb);
275 
276 	INFO("CPU %u power domain off\n", pm_state.info.cpuid);
277 }
278 
279 /* MediaTek PSCI power domain */
280 static void armv8_2_power_domain_suspend(const psci_power_state_t *state)
281 {
282 	unsigned int pstate = 0;
283 	struct mt_cpupm_event_data nb;
284 	struct mtk_cpupm_pwrstate pm_state = {
285 		.info = {
286 			.cpuid = plat_my_core_pos(),
287 			.mode = MTK_CPU_PM_CPUIDLE,
288 		},
289 	};
290 
291 	pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
292 	pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
293 	pm_state.pwr.raw = state;
294 
295 	pstate = get_mediatek_pstate(CPUPM_PWR_OFF,
296 				     armv8_2_power_state[pm_state.info.cpuid], &pm_state);
297 
298 	armv8_2_cpu_pwr_suspend(&pm_state, pstate);
299 
300 	if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
301 		armv8_2_cluster_pwr_dwn_common(&pm_state);
302 	}
303 
304 	if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
305 		armv8_2_mcusys_pwr_dwn_common(&pm_state);
306 	}
307 
308 	nb.cpuid = pm_state.info.cpuid;
309 	nb.pwr_domain = pstate;
310 	MT_CPUPM_EVENT_PWR_OFF(&nb);
311 
312 	if (IS_AFFLV_PUBEVENT(pstate)) {
313 		MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
314 	}
315 }
316 
317 /* MediaTek PSCI power domain */
318 static void armv8_2_power_domain_suspend_finish(const psci_power_state_t *state)
319 {
320 	unsigned int pstate = 0;
321 	struct mt_cpupm_event_data nb;
322 	struct mtk_cpupm_pwrstate pm_state = {
323 		.info = {
324 			.cpuid = plat_my_core_pos(),
325 			.mode = MTK_CPU_PM_CPUIDLE,
326 		},
327 	};
328 
329 	pm_state.pwr.state_id = armv8_2_get_pwr_stateid(pm_state.info.cpuid);
330 	pm_state.pwr.afflv = armv8_2_get_pwr_afflv(state);
331 	pm_state.pwr.raw = state;
332 
333 	pstate = get_mediatek_pstate(CPUPM_PWR_ON,
334 				     armv8_2_power_state[pm_state.info.cpuid], &pm_state);
335 
336 	if ((pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS) != 0) {
337 		armv8_2_mcusys_pwr_on_common(&pm_state);
338 	}
339 
340 	if ((pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER) != 0) {
341 		armv8_2_cluster_pwr_on_common(&pm_state);
342 	}
343 
344 	armv8_2_cpu_pwr_resume(&pm_state, pstate);
345 
346 	nb.cpuid = pm_state.info.cpuid;
347 	nb.pwr_domain = pstate;
348 	MT_CPUPM_EVENT_PWR_ON(&nb);
349 
350 	if (IS_AFFLV_PUBEVENT(pstate)) {
351 		MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
352 	}
353 }
354 
355 /* MediaTek PSCI power domain */
356 static int armv8_2_validate_power_state(unsigned int power_state, psci_power_state_t *req_state)
357 {
358 	unsigned int i;
359 	unsigned int pstate = psci_get_pstate_type(power_state);
360 	unsigned int aff_lvl = psci_get_pstate_pwrlvl(power_state);
361 	unsigned int my_core_pos = plat_my_core_pos();
362 
363 	if (mtk_cpu_pwr.ops == NULL) {
364 		return PSCI_E_INVALID_PARAMS;
365 	}
366 
367 	if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
368 		if (mtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate) != 0) {
369 			return PSCI_E_INVALID_PARAMS;
370 		}
371 	}
372 
373 	if (pstate == PSTATE_TYPE_STANDBY) {
374 		req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
375 	} else {
376 		for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++) {
377 			req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
378 		}
379 	}
380 	armv8_2_power_state[my_core_pos] = power_state;
381 
382 	return PSCI_E_SUCCESS;
383 }
384 
385 /* MediaTek PSCI power domain */
386 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
387 static void armv8_2_get_sys_suspend_power_state(psci_power_state_t *req_state)
388 {
389 	unsigned int i;
390 	int ret;
391 	unsigned int power_state;
392 	unsigned int my_core_pos = plat_my_core_pos();
393 
394 	ret = mtk_cpu_pwr.ops->pwr_state_valid(PLAT_MAX_PWR_LVL,
395 						PSTATE_TYPE_POWERDOWN);
396 
397 	if (ret != MTK_CPUPM_E_OK) {
398 		/* Avoid suspend due to platform is not ready. */
399 		req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] =
400 						PLAT_MAX_RET_STATE;
401 		for (i = PSCI_CPU_PWR_LVL + 1; i <= PLAT_MAX_PWR_LVL; i++) {
402 			req_state->pwr_domain_state[i] = PSCI_LOCAL_STATE_RUN;
403 		}
404 
405 		power_state = psci_make_powerstate(0, PSTATE_TYPE_STANDBY, PSCI_CPU_PWR_LVL);
406 	} else {
407 		for (i = PSCI_CPU_PWR_LVL; i <= PLAT_MAX_PWR_LVL; i++) {
408 			req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
409 		}
410 
411 		power_state = psci_make_powerstate(MT_PLAT_PWR_STATE_SYSTEM_SUSPEND,
412 						   PSTATE_TYPE_POWERDOWN, PLAT_MAX_PWR_LVL);
413 	}
414 
415 	armv8_2_power_state[my_core_pos] = power_state;
416 	flush_dcache_range((uintptr_t)&armv8_2_power_state[my_core_pos],
417 			   sizeof(armv8_2_power_state[my_core_pos]));
418 }
419 #endif
420 static void armv8_2_pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
421 {
422 	if (entry_point == 0) {
423 		ERROR("%s, warm_entry_point is null\n", __func__);
424 		panic();
425 	}
426 	if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT)) {
427 		mtk_cpu_pwr.smp->init(cpu_id, entry_point);
428 	}
429 	INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
430 }
431 
432 static struct plat_pm_pwr_ctrl armv8_2_pwr_ops = {
433 	.pwr_domain_suspend = armv8_2_power_domain_suspend,
434 	.pwr_domain_suspend_finish = armv8_2_power_domain_suspend_finish,
435 	.validate_power_state = armv8_2_validate_power_state,
436 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
437 	.get_sys_suspend_power_state = armv8_2_get_sys_suspend_power_state,
438 #endif
439 };
440 
441 struct plat_pm_smp_ctrl armv8_2_smp_ops = {
442 	.init = armv8_2_pm_smp_init,
443 	.pwr_domain_on = armv8_2_power_domain_on,
444 	.pwr_domain_off = armv8_2_power_domain_off,
445 	.pwr_domain_on_finish = armv8_2_power_domain_on_finish,
446 };
447 
448 #define ISSUE_CPU_PM_REG_FAIL(_success) ({ _success = false; assert(0); })
449 
450 #define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _result, _flag) ({ \
451 	if ((_fns & _id)) { \
452 		if (_ops->_func) \
453 			_flag |= _id; \
454 		else { \
455 			ISSUE_CPU_PM_REG_FAIL(_result); \
456 		} \
457 	} })
458 
459 int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops)
460 {
461 	bool success = true;
462 	unsigned int fns = 0;
463 
464 	if ((ops == NULL) || (mtk_cpu_pwr.ops != NULL)) {
465 		ERROR("[%s:%d] register cpu_pm fail !!\n", __FILE__, __LINE__);
466 		return MTK_CPUPM_E_ERR;
467 	}
468 
469 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CORE,
470 			cpu_resume, success, fns);
471 
472 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CORE,
473 			cpu_suspend, success, fns);
474 
475 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CLUSTER,
476 			cluster_resume, success, fns);
477 
478 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CLUSTER,
479 			cluster_suspend, success, fns);
480 
481 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_MCUSYS,
482 			mcusys_resume, success, fns);
483 
484 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_MCUSYS,
485 			mcusys_suspend, success, fns);
486 
487 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_CPUPM_GET_PWR_STATE,
488 			get_pstate, success, fns);
489 
490 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_STATE_VALID,
491 			pwr_state_valid, success, fns);
492 
493 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_INIT,
494 			init, success, fns);
495 
496 	if (success) {
497 		mtk_cpu_pwr.ops = ops;
498 		mtk_cpu_pwr.fn_mask |= fns;
499 		plat_pm_ops_setup_pwr(&armv8_2_pwr_ops);
500 		INFO("[%s:%d] CPU pwr ops register success, support:0x%x\n",
501 		     __func__, __LINE__, fns);
502 	} else {
503 		ERROR("[%s:%d] register cpu_pm ops fail !, fn:0x%x\n",
504 		      __func__, __LINE__, fn_flags);
505 		assert(0);
506 	}
507 	return MTK_CPUPM_E_OK;
508 }
509 
510 int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
511 {
512 	bool success = true;
513 	unsigned int fns = 0;
514 
515 	if ((ops == NULL) || (mtk_cpu_pwr.smp != NULL)) {
516 		ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
517 		return MTK_CPUPM_E_ERR;
518 	}
519 
520 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
521 			init, success, fns);
522 
523 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
524 			cpu_pwr_on_prepare, success, fns);
525 
526 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
527 			cpu_on, success, fns);
528 
529 	CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
530 			cpu_off, success, fns);
531 
532 	if (success == true) {
533 		mtk_cpu_pwr.smp = ops;
534 		mtk_cpu_pwr.fn_mask |= fns;
535 		plat_pm_ops_setup_smp(&armv8_2_smp_ops);
536 		INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
537 		     __func__, __LINE__, fns);
538 	} else {
539 		ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
540 		      __func__, __LINE__, fn_flags);
541 		assert(0);
542 	}
543 	return MTK_CPUPM_E_OK;
544 }
545