1 /*
2 * Copyright (c) 2025, Mediatek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #ifdef CONFIG_MTK_BOOKER
11 #include <drivers/booker.h>
12 #endif
13
14 #include <common/debug.h>
15 #include <drivers/arm/gicv3.h>
16 #include <drivers/console.h>
17 #include <lib/psci/psci.h>
18 #include <lib/utils.h>
19 #include <mt_gic_v3.h>
20 #include <plat/arm/common/plat_arm.h>
21 #include <plat/common/platform.h>
22 #include <platform_def.h>
23
24 #include <lib/mtk_init/mtk_init.h>
25 #include <lib/pm/mtk_pm.h>
26 #ifdef MTK_PUBEVENT_ENABLE
27 #include <vendor_pubsub_events.h>
28 #endif
29
30 #define IS_AFFLV_PUBEVENT(_pstate) \
31 (_pstate & (MT_CPUPM_PWR_DOMAIN_MCUSYS | \
32 MT_CPUPM_PWR_DOMAIN_CLUSTER))
33
34 #ifdef MTK_PUBEVENT_ENABLE
35 #define MT_CPUPM_EVENT_PWR_ON(x) ({ \
36 PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_on, \
37 (const void *)(x)); })
38
39 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ \
40 PUBLISH_EVENT_ARG(mt_cpupm_publish_pwr_off, \
41 (const void *)(x)); })
42
43 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ \
44 PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_on, \
45 (const void *)(x)); })
46
47 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ \
48 PUBLISH_EVENT_ARG(mt_cpupm_publish_afflv_pwr_off, \
49 (const void *)(x)); })
50
51 #else
52 #define MT_CPUPM_EVENT_PWR_ON(x) ({ (void)x; })
53 #define MT_CPUPM_EVENT_PWR_OFF(x) ({ (void)x; })
54 #define MT_CPUPM_EVENT_AFFLV_PWR_ON(x) ({ (void)x; })
55 #define MT_CPUPM_EVENT_AFFLV_PWR_OFF(x) ({ (void)x; })
56 #endif
57
58 /*
59 * The cpu require to cluster power stattus
60 * [0] : The cpu require cluster power down
61 * [1] : The cpu require cluster power on
62 */
63 #define coordinate_cluster(onoff) \
64 write_clusterpwrdn_el1(onoff)
65 #define coordinate_cluster_pwron() \
66 coordinate_cluster(1)
67 #define coordinate_cluster_pwroff() \
68 coordinate_cluster(0)
69
70 /* default enable all function */
71 #define MTK_CPU_PWR_FN_MASK_DEFAULT (0)
72
73 struct mtk_cpu_pwr_ctrl {
74 unsigned int fn_mask;
75 struct mtk_cpu_pm_ops *ops;
76 struct mtk_cpu_smp_ops *smp;
77 };
78
79 static struct mtk_cpu_pwr_ctrl imtk_cpu_pwr = {
80 .fn_mask = MTK_CPU_PWR_FN_MASK_DEFAULT,
81 .ops = NULL,
82 };
83
84 #define IS_CPUIDLE_FN_ENABLE(x) (imtk_cpu_pwr.ops && (imtk_cpu_pwr.fn_mask & (x)))
85 #define IS_CPUSMP_FN_ENABLE(x) (imtk_cpu_pwr.smp && (imtk_cpu_pwr.fn_mask & (x)))
86
87 /* per-cpu power state */
88 static unsigned int cpu_power_state[PLATFORM_CORE_COUNT];
89
90 #define get_pwr_stateid(cpu) \
91 psci_get_pstate_id(cpu_power_state[cpu])
92
93 #define GET_MEDIATEK_PSTATE(_domain, _psci_state, _state) ({ \
94 int mret = 0; \
95 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_CPUPM_GET_PWR_STATE)) \
96 mret = imtk_cpu_pwr.ops->get_pstate( \
97 _domain, _psci_state, _state); \
98 mret; })
99
get_pwr_afflv(const psci_power_state_t * state)100 static inline unsigned int get_pwr_afflv(const psci_power_state_t *state)
101 {
102 for (int i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
103 if (is_local_state_run(state->pwr_domain_state[i]) == 0)
104 return (unsigned int) i;
105 }
106
107 return PSCI_INVALID_PWR_LVL;
108 }
109
mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate * state)110 static void mcusys_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
111 {
112 mt_gic_distif_restore();
113 mt_gic_rdistif_restore();
114
115 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_MCUSYS))
116 imtk_cpu_pwr.ops->mcusys_resume(state);
117 }
118
mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate * state)119 static void mcusys_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
120 {
121 #ifdef CONFIG_MTK_BOOKER
122 booker_flush();
123 #endif
124
125 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_MCUSYS))
126 imtk_cpu_pwr.ops->mcusys_suspend(state);
127
128 mt_gic_rdistif_save();
129 /* save gic context after cirq enable */
130 mt_gic_distif_save();
131 }
132
cluster_pwr_on_common(const struct mtk_cpupm_pwrstate * state)133 static void cluster_pwr_on_common(const struct mtk_cpupm_pwrstate *state)
134 {
135 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CLUSTER))
136 imtk_cpu_pwr.ops->cluster_resume(state);
137 }
138
cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate * state)139 static void cluster_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state)
140 {
141 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CLUSTER))
142 imtk_cpu_pwr.ops->cluster_suspend(state);
143 }
144
cpu_pwr_on_common(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)145 static void cpu_pwr_on_common(const struct mtk_cpupm_pwrstate *state,
146 unsigned int pstate)
147 {
148 coordinate_cluster_pwron();
149
150 mt_gic_cpuif_enable();
151 }
152
cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)153 static void cpu_pwr_dwn_common(const struct mtk_cpupm_pwrstate *state,
154 unsigned int pstate)
155 {
156 if (pstate & MT_CPUPM_PWR_DOMAIN_PERCORE_DSU)
157 coordinate_cluster_pwroff();
158
159 mt_gic_cpuif_disable();
160 }
161
cpu_pwr_resume(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)162 static void cpu_pwr_resume(const struct mtk_cpupm_pwrstate *state,
163 unsigned int pstate)
164 {
165 cpu_pwr_on_common(state, pstate);
166 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_RESUME_CORE))
167 imtk_cpu_pwr.ops->cpu_resume(state);
168 }
169
cpu_pwr_suspend(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)170 static void cpu_pwr_suspend(const struct mtk_cpupm_pwrstate *state,
171 unsigned int pstate)
172 {
173 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_SUSPEND_CORE))
174 imtk_cpu_pwr.ops->cpu_suspend(state);
175 cpu_pwr_dwn_common(state, pstate);
176 }
177
cpu_pwr_on(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)178 static void cpu_pwr_on(const struct mtk_cpupm_pwrstate *state,
179 unsigned int pstate)
180 {
181 cpu_pwr_on_common(state, pstate);
182 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_ON))
183 imtk_cpu_pwr.smp->cpu_on(state);
184 }
185
cpu_pwr_off(const struct mtk_cpupm_pwrstate * state,unsigned int pstate)186 static void cpu_pwr_off(const struct mtk_cpupm_pwrstate *state,
187 unsigned int pstate)
188 {
189 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_CORE_OFF))
190 imtk_cpu_pwr.smp->cpu_off(state);
191 cpu_pwr_dwn_common(state, pstate);
192 }
193
power_domain_on(u_register_t mpidr)194 static int power_domain_on(u_register_t mpidr)
195 {
196 int ret = PSCI_E_SUCCESS;
197 int cpu = plat_core_pos_by_mpidr(mpidr);
198 uintptr_t entry = plat_pm_get_warm_entry();
199
200 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_PWR_ON_CORE_PREPARE)) {
201 int b_ret = MTK_CPUPM_E_FAIL;
202
203 b_ret = imtk_cpu_pwr.smp->cpu_pwr_on_prepare(cpu, entry);
204
205 if (b_ret)
206 ret = PSCI_E_DENIED;
207 }
208 INFO("CPU %u power domain prepare on\n", cpu);
209 return ret;
210 }
211
power_domain_on_finish(const psci_power_state_t * state)212 static void power_domain_on_finish(const psci_power_state_t *state)
213 {
214 struct mt_cpupm_event_data nb;
215 unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
216 MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
217 struct mtk_cpupm_pwrstate pm_state = {
218 .info = {
219 .cpuid = plat_my_core_pos(),
220 .mode = MTK_CPU_PM_SMP,
221 },
222 .pwr = {
223 .afflv = get_pwr_afflv(state),
224 .state_id = 0x0,
225 },
226 };
227
228 mt_gic_pcpu_init();
229
230 cpu_pwr_on(&pm_state, pstate);
231
232 nb.cpuid = pm_state.info.cpuid;
233 nb.pwr_domain = pstate;
234 MT_CPUPM_EVENT_PWR_ON(&nb);
235 INFO("CPU %u power domain on finished\n", pm_state.info.cpuid);
236 }
237
power_domain_off(const psci_power_state_t * state)238 static void power_domain_off(const psci_power_state_t *state)
239 {
240 struct mt_cpupm_event_data nb;
241 unsigned int pstate = (MT_CPUPM_PWR_DOMAIN_CORE |
242 MT_CPUPM_PWR_DOMAIN_PERCORE_DSU);
243 struct mtk_cpupm_pwrstate pm_state = {
244 .info = {
245 .cpuid = plat_my_core_pos(),
246 .mode = MTK_CPU_PM_SMP,
247 },
248 .pwr = {
249 .afflv = get_pwr_afflv(state),
250 .state_id = 0x0,
251 },
252 };
253
254 cpu_pwr_off(&pm_state, pstate);
255
256 mt_gic_redistif_off();
257
258 nb.cpuid = pm_state.info.cpuid;
259 nb.pwr_domain = pstate;
260 MT_CPUPM_EVENT_PWR_OFF(&nb);
261
262 INFO("CPU %u power domain off\n", pm_state.info.cpuid);
263 }
264
power_domain_suspend(const psci_power_state_t * state)265 static void power_domain_suspend(const psci_power_state_t *state)
266 {
267 unsigned int pstate = 0;
268 struct mt_cpupm_event_data nb;
269 struct mtk_cpupm_pwrstate pm_state = {
270 .info = {
271 .cpuid = plat_my_core_pos(),
272 .mode = MTK_CPU_PM_CPUIDLE,
273 },
274 };
275
276 pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
277 pm_state.pwr.afflv = get_pwr_afflv(state);
278 pm_state.pwr.raw = state;
279
280 pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_OFF,
281 cpu_power_state[pm_state.info.cpuid], &pm_state);
282
283 cpu_pwr_suspend(&pm_state, pstate);
284
285 if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
286 cluster_pwr_dwn_common(&pm_state);
287
288 if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
289 mcusys_pwr_dwn_common(&pm_state);
290
291 nb.cpuid = pm_state.info.cpuid;
292 nb.pwr_domain = pstate;
293 MT_CPUPM_EVENT_PWR_OFF(&nb);
294
295 if (IS_AFFLV_PUBEVENT(pstate))
296 MT_CPUPM_EVENT_AFFLV_PWR_OFF(&nb);
297 }
298
power_domain_suspend_finish(const psci_power_state_t * state)299 static void power_domain_suspend_finish(const psci_power_state_t *state)
300 {
301 unsigned int pstate = 0;
302 struct mt_cpupm_event_data nb;
303 struct mtk_cpupm_pwrstate pm_state = {
304 .info = {
305 .cpuid = plat_my_core_pos(),
306 .mode = MTK_CPU_PM_CPUIDLE,
307 },
308 };
309
310 pm_state.pwr.state_id = get_pwr_stateid(pm_state.info.cpuid);
311 pm_state.pwr.afflv = get_pwr_afflv(state);
312 pm_state.pwr.raw = state;
313
314 pstate = GET_MEDIATEK_PSTATE(CPUPM_PWR_ON,
315 cpu_power_state[pm_state.info.cpuid], &pm_state);
316
317 if (pstate & MT_CPUPM_PWR_DOMAIN_MCUSYS)
318 mcusys_pwr_on_common(&pm_state);
319
320 if (pstate & MT_CPUPM_PWR_DOMAIN_CLUSTER)
321 cluster_pwr_on_common(&pm_state);
322
323 cpu_pwr_resume(&pm_state, pstate);
324
325 nb.cpuid = pm_state.info.cpuid;
326 nb.pwr_domain = pstate;
327 MT_CPUPM_EVENT_PWR_ON(&nb);
328
329 if (IS_AFFLV_PUBEVENT(pstate))
330 MT_CPUPM_EVENT_AFFLV_PWR_ON(&nb);
331 }
332
validate_power_state(unsigned int power_state,psci_power_state_t * req_state)333 static int validate_power_state(unsigned int power_state,
334 psci_power_state_t *req_state)
335 {
336 int i;
337 unsigned int pstate = psci_get_pstate_type(power_state);
338 int aff_lvl = psci_get_pstate_pwrlvl(power_state);
339 unsigned int my_core_pos = plat_my_core_pos();
340
341 if (!imtk_cpu_pwr.ops)
342 return PSCI_E_INVALID_PARAMS;
343
344 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_STATE_VALID)) {
345 int ret = MTK_CPUPM_E_FAIL;
346
347 ret = imtk_cpu_pwr.ops->pwr_state_valid(aff_lvl, pstate);
348 if (ret)
349 return PSCI_E_INVALID_PARAMS;
350 }
351
352 if (pstate == PSTATE_TYPE_STANDBY)
353 req_state->pwr_domain_state[0] = PLAT_MAX_RET_STATE;
354 else {
355 for (i = PSCI_CPU_PWR_LVL; i <= aff_lvl; i++)
356 req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
357 }
358 cpu_power_state[my_core_pos] = power_state;
359 return PSCI_E_SUCCESS;
360 }
361
362 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
363 /* Mediatek PSCI power domain */
get_sys_suspend_power_state(psci_power_state_t * req_state)364 static void get_sys_suspend_power_state(psci_power_state_t *req_state)
365 {
366 int lv = 0;
367 unsigned int my_core_pos = plat_my_core_pos();
368
369 for (lv = PSCI_CPU_PWR_LVL; lv <= PLAT_MAX_PWR_LVL; lv++)
370 req_state->pwr_domain_state[lv] = PLAT_MAX_OFF_STATE;
371
372 cpu_power_state[my_core_pos] = psci_make_powerstate(
373 MT_PLAT_PWR_STATE_SUSPEND,
374 PSTATE_TYPE_POWERDOWN,
375 PLAT_MT_SYSTEM_SUSPEND);
376
377 flush_dcache_range((uintptr_t)&cpu_power_state[my_core_pos],
378 sizeof(cpu_power_state[my_core_pos]));
379 }
380 #endif
381
pwr_domain_pwr_down_wfi(const psci_power_state_t * req_state)382 static void pwr_domain_pwr_down_wfi(const psci_power_state_t *req_state)
383 {
384 unsigned int cpu = plat_my_core_pos();
385 int ret = MTK_CPUPM_E_NOT_SUPPORT;
386
387 if (IS_CPUIDLE_FN_ENABLE(MTK_CPUPM_FN_PWR_DOMAIN_POWER_DOWN_WFI))
388 ret = imtk_cpu_pwr.ops->pwr_domain_pwr_down_wfi(cpu);
389 if (ret == MTK_CPUPM_E_OK)
390 plat_panic_handler();
391 }
392
pm_smp_init(unsigned int cpu_id,uintptr_t entry_point)393 static void pm_smp_init(unsigned int cpu_id, uintptr_t entry_point)
394 {
395 if (entry_point == 0) {
396 ERROR("%s, warm_entry_point is null\n", __func__);
397 panic();
398 }
399 if (IS_CPUSMP_FN_ENABLE(MTK_CPUPM_FN_SMP_INIT))
400 imtk_cpu_pwr.smp->init(cpu_id, entry_point);
401 INFO("[%s:%d] - Initialize finished\n", __func__, __LINE__);
402 }
403
404 static struct plat_pm_pwr_ctrl armv9_0_pwr_ops = {
405 .pwr_domain_suspend = power_domain_suspend,
406 .pwr_domain_suspend_finish = power_domain_suspend_finish,
407 .validate_power_state = validate_power_state,
408 #if CONFIG_MTK_SUPPORT_SYSTEM_SUSPEND
409 .get_sys_suspend_power_state = get_sys_suspend_power_state,
410 #endif
411 .pwr_domain_pwr_down_wfi = pwr_domain_pwr_down_wfi,
412 };
413
414 struct plat_pm_smp_ctrl armv9_0_smp_ops = {
415 .init = pm_smp_init,
416 .pwr_domain_on = power_domain_on,
417 .pwr_domain_off = power_domain_off,
418 .pwr_domain_on_finish = power_domain_on_finish,
419 };
420
421 #define ISSUE_CPU_PM_REG_FAIL(_success) ({ \
422 _success = 0; assert(0); })
423
424 #define CPM_PM_FN_CHECK(_fns, _ops, _id, _func, _cond_ex, _result, _flag) ({ \
425 if ((_fns & _id)) { \
426 if (_ops->_func && _cond_ex) \
427 _flag |= _id; \
428 else { \
429 ISSUE_CPU_PM_REG_FAIL(_result); \
430 } \
431 } }) \
432
plat_pm_invoke_func(enum mtk_cpu_pm_mode mode,unsigned int id,void * priv)433 int plat_pm_invoke_func(enum mtk_cpu_pm_mode mode, unsigned int id, void *priv)
434 {
435 int ret = MTK_CPUPM_E_ERR;
436
437 if ((mode == MTK_CPU_PM_CPUIDLE) && imtk_cpu_pwr.ops &&
438 imtk_cpu_pwr.ops->invoke)
439 ret = imtk_cpu_pwr.ops->invoke(id, priv);
440 else if ((mode == MTK_CPU_PM_SMP) &&
441 imtk_cpu_pwr.smp &&
442 imtk_cpu_pwr.smp->invoke)
443 ret = imtk_cpu_pwr.smp->invoke(id, priv);
444
445 return ret;
446 }
447
register_cpu_pm_ops(unsigned int fn_flags,struct mtk_cpu_pm_ops * ops)448 int register_cpu_pm_ops(unsigned int fn_flags, struct mtk_cpu_pm_ops *ops)
449 {
450 int success = 1;
451 unsigned int fns = 0;
452
453 if (!ops || imtk_cpu_pwr.ops) {
454 ERROR("[%s:%d] register cpu_pm fail !!\n", __FILE__, __LINE__);
455 return MTK_CPUPM_E_ERR;
456 }
457 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CORE,
458 cpu_resume, 1, success, fns);
459 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CORE,
460 cpu_suspend, 1, success, fns);
461 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_CLUSTER,
462 cluster_resume, 1, success, fns);
463 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_CLUSTER,
464 cluster_suspend, 1, success, fns);
465 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_RESUME_MCUSYS,
466 mcusys_resume, 1,
467 success, fns);
468 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SUSPEND_MCUSYS,
469 mcusys_suspend, 1, success, fns);
470 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_CPUPM_GET_PWR_STATE,
471 get_pstate, 1, success, fns);
472 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_STATE_VALID,
473 pwr_state_valid, 1, success, fns);
474 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_INIT,
475 init, 1, success, fns);
476 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_DOMAIN_POWER_DOWN_WFI,
477 pwr_domain_pwr_down_wfi, 1, success, fns);
478 if (success) {
479 imtk_cpu_pwr.ops = ops;
480 imtk_cpu_pwr.fn_mask |= fns;
481 plat_pm_ops_setup_pwr(&armv9_0_pwr_ops);
482 INFO("[%s:%d] CPU pwr ops register success, support:0x%x\n",
483 __func__, __LINE__, fns);
484 } else {
485 ERROR("[%s:%d] register cpu_pm ops fail !, fn:0x%x\n",
486 __func__, __LINE__, fn_flags);
487 assert(0);
488 }
489 return MTK_CPUPM_E_OK;
490 }
491
register_cpu_smp_ops(unsigned int fn_flags,struct mtk_cpu_smp_ops * ops)492 int register_cpu_smp_ops(unsigned int fn_flags, struct mtk_cpu_smp_ops *ops)
493 {
494 int success = 1;
495 unsigned int fns = 0;
496
497 if (!ops || imtk_cpu_pwr.smp) {
498 ERROR("[%s:%d] register cpu_smp fail !!\n", __FILE__, __LINE__);
499 return MTK_CPUPM_E_ERR;
500 }
501
502 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_INIT,
503 init, 1, success, fns);
504
505 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_PWR_ON_CORE_PREPARE,
506 cpu_pwr_on_prepare, 1, success, fns);
507
508 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_ON,
509 cpu_on, 1, success, fns);
510
511 CPM_PM_FN_CHECK(fn_flags, ops, MTK_CPUPM_FN_SMP_CORE_OFF,
512 cpu_off, 1, success, fns);
513
514 if (success) {
515 imtk_cpu_pwr.smp = ops;
516 imtk_cpu_pwr.fn_mask |= fns;
517 plat_pm_ops_setup_smp(&armv9_0_smp_ops);
518 INFO("[%s:%d] CPU smp ops register success, support:0x%x\n",
519 __func__, __LINE__, fns);
520 } else {
521 ERROR("[%s:%d] register cpu_smp ops fail !, fn:0x%x\n",
522 __func__, __LINE__, fn_flags);
523 assert(0);
524 }
525 return MTK_CPUPM_E_OK;
526 }
527