1 /*
2 * Copyright (c) 2025, MediaTek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdint.h>
9
10 #include <lib/spinlock.h>
11 #include <plat/common/platform.h>
12 #include <platform_def.h>
13
14 #include "../inc/pwr_topology.h"
15 #include <lib/pm/mtk_pm.h>
16 #include <lpm/mt_lp_rm.h>
17
18 #define GROUP_CPU_ALL 0x7
19 #define GROUP_0_CPUID_MAX 3
20 #define GROUP_1_CPUID_MAX 6
21 #define GROUP_2_CPUID_MAX 7
22
23 #define IS_ALL_GROUP_OFF(_mask) ((_mask & GROUP_CPU_ALL) == GROUP_CPU_ALL)
24
25 #define GET_GROUPID(_cpuid, _gid) ({ \
26 if (_cpuid <= GROUP_0_CPUID_MAX) \
27 _gid = 0; \
28 else if (_cpuid <= GROUP_1_CPUID_MAX) \
29 _gid = 1; \
30 else \
31 _gid = 2; })
32
33 #define GET_GROUPMASK(_cpuid, _gmask) ({ \
34 if (_cpuid <= GROUP_0_CPUID_MAX) \
35 _gmask = BIT(0); \
36 else if (_cpuid <= GROUP_1_CPUID_MAX) \
37 _gmask = BIT(1);\
38 else if (_cpuid <= GROUP_2_CPUID_MAX) \
39 _gmask = BIT(2); \
40 else \
41 _gmask = 0; })
42
43 #ifdef MT_CPU_PM_USING_BAKERY_LOCK
44 DEFINE_BAKERY_LOCK(mt_pwr_lock);
45 #define plat_pwr_lock_init() bakery_lock_init(&mt_pwr_lock)
46 #define plat_pwr_lock() bakery_lock_get(&mt_pwr_lock)
47 #define plat_pwr_unlock() bakery_lock_release(&mt_pwr_lock)
48 #else
49 spinlock_t mt_pwr_lock;
50 #define plat_pwr_lock_init()
51 #define plat_pwr_lock() spin_lock(&mt_pwr_lock)
52 #define plat_pwr_unlock() spin_unlock(&mt_pwr_lock)
53 #endif /* MT_CPU_PM_USING_BAKERY_LOCK */
54
55 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN && \
56 !CPU_PM_DOMAIN_CORE_ONLY
57 static unsigned int cpu_groupmask;
58 #endif
59
pwr_domain_coordination(enum pwr_domain_status pwr,const mtk_pstate_type psci_state,const struct mtk_cpupm_pwrstate * state,afflv_prepare fn)60 unsigned int pwr_domain_coordination(enum pwr_domain_status pwr,
61 const mtk_pstate_type psci_state,
62 const struct mtk_cpupm_pwrstate *state,
63 afflv_prepare fn)
64 {
65 unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
66
67 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN && \
68 !CPU_PM_DOMAIN_CORE_ONLY
69 struct pwr_toplogy tp;
70
71 if (state->pwr.afflv >= PLAT_MT_CPU_SUSPEND_CLUSTER) {
72 unsigned int fgmask;
73
74 if (state->info.cpuid >= PLATFORM_CORE_COUNT)
75 assert(0);
76
77 GET_GROUPMASK(state->info.cpuid, tp.cur_group_bit);
78
79 plat_pwr_lock();
80 if ((pwr == PWR_DOMAIN_OFF) || (pwr == PWR_DOMAIN_SMP_OFF)) {
81 tp.group = (cpu_groupmask | tp.cur_group_bit);
82 fgmask = tp.group;
83 } else {
84 tp.group = (cpu_groupmask & ~tp.cur_group_bit);
85 fgmask = cpu_groupmask;
86 }
87 fn(1, state, &tp);
88 cpu_groupmask = tp.group;
89
90 plat_pwr_unlock();
91 if (IS_ALL_GROUP_OFF(fgmask))
92 pstate |= MT_CPUPM_PWR_DOMAIN_CLUSTER;
93 }
94
95 /* Skip to process smp */
96 if (pwr > PWR_DOMAIN_OFF)
97 return pstate;
98
99 if (psci_get_pstate_pwrlvl(psci_state) >= PLAT_MT_CPU_SUSPEND_CLUSTER)
100 pstate |= MT_CPUPM_PWR_DOMAIN_PERCORE_DSU;
101
102 if (IS_PLAT_MCUSYSOFF_AFFLV(state->pwr.afflv)) {
103 int ret = MTK_CPUPM_E_OK;
104
105 if (fn)
106 ret = fn(state->pwr.afflv, state, &tp);
107
108 if (ret == MTK_CPUPM_E_OK)
109 pstate |= MT_CPUPM_PWR_DOMAIN_MCUSYS;
110 }
111 #endif
112 return pstate;
113 }
114
pwr_topology_init(void)115 void pwr_topology_init(void)
116 {
117 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN && \
118 !CPU_PM_DOMAIN_CORE_ONLY
119 cpu_groupmask = GROUP_CPU_ALL;
120 #endif
121
122 plat_pwr_lock_init();
123 }
124