1 /*
2 * Copyright (c) 2025, MediaTek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef MT_CPU_PM_H
8 #define MT_CPU_PM_H
9
10 #include <assert.h>
11
12 #include <platform_def.h>
13
14 #include <lib/pm/mtk_pm.h>
15
16 #if !HW_ASSISTED_COHERENCY
17 #define MT_CPU_PM_USING_BAKERY_LOCK
18 #endif /* !HW_ASSISTED_COHERENCY */
19
20 /*
21 * Enable bit of CPU_PM callbacks
22 */
CPU_PM_FN(void)23 static inline unsigned int CPU_PM_FN(void)
24 {
25 return (MTK_CPUPM_FN_CPUPM_GET_PWR_STATE |
26 MTK_CPUPM_FN_PWR_STATE_VALID |
27 MTK_CPUPM_FN_PWR_ON_CORE_PREPARE |
28 MTK_CPUPM_FN_RESUME_CORE |
29 #ifdef CPU_PM_PWR_REQ
30 MTK_CPUPM_FN_SUSPEND_CLUSTER |
31 #endif /* CPU_PM_PWR_REQ */
32 MTK_CPUPM_FN_RESUME_CLUSTER |
33 MTK_CPUPM_FN_SUSPEND_MCUSYS |
34 MTK_CPUPM_FN_RESUME_MCUSYS |
35 MTK_CPUPM_FN_SMP_INIT |
36 MTK_CPUPM_FN_SMP_CORE_ON |
37 MTK_CPUPM_FN_SMP_CORE_OFF);
38 }
39
40 #define CPU_PM_ASSERT(_cond) ({ \
41 if (!(_cond)) { \
42 INFO("[%s:%d] - %s\n", __func__, __LINE__, #_cond); \
43 panic(); \
44 } })
45
46 /* related registers */
47 #define SPM_POWERON_CONFIG_EN (SPM_BASE + 0x000)
48 #define SPM_CPU_PWR_STATUS (SPM_BASE + 0x174)
49
50 /* bit-fields of SPM_POWERON_CONFIG_EN */
51 #define PROJECT_CODE (0xB16U << 16)
52 #define BCLK_CG_EN BIT(0)
53
54 #define CPC_PWR_MASK_MCUSYS_MP0 (0xC001)
55
56 #define PER_CLUSTER_PWR_DATA(_p, _cl) ({ \
57 _p.pwr.ppu_pwpr = CLUSTER_PPU_PWPR_##_cl; \
58 _p.pwr.ppu_pwsr = CLUSTER_PPU_PWSR_##_cl; \
59 _p.pwr.ppu_dcdr0 = CLUSTER_PPU_DCDR0_##_cl; \
60 _p.pwr.ppu_dcdr1 = CLUSTER_PPU_DCDR1_##_cl; \
61 })
62
63 #define PER_CLUSTER_PWR_CTRL(_val, _cl) ({ \
64 switch (_cl) { \
65 case 0: \
66 PER_CLUSTER_PWR_DATA(_val, 0); \
67 break; \
68 default: \
69 assert(0); \
70 break; \
71 } })
72
73 #define PER_CPU_PWR_DATA(_p, _cl, _c) ({ \
74 _p.rvbaraddr_l = CORE_RVBRADDR_##_cl##_##_c##_L; \
75 _p.rvbaraddr_h = CORE_RVBRADDR_##_cl##_##_c##_H; \
76 _p.pwr.ppu_pwpr = CORE_PPU_PWPR_##_cl##_##_c; \
77 _p.pwr.ppu_pwsr = CORE_PPU_PWSR_##_cl##_##_c; \
78 _p.pwr.ppu_dcdr0 = CORE_PPU_DCDR0_##_cl##_##_c; \
79 _p.pwr.ppu_dcdr1 = CORE_PPU_DCDR1_##_cl##_##_c; })
80
81 #define PER_CPU_PWR_CTRL(_val, _cpu) ({ \
82 switch (_cpu) { \
83 case 0: \
84 PER_CPU_PWR_DATA(_val, 0, 0); \
85 break; \
86 case 1: \
87 PER_CPU_PWR_DATA(_val, 0, 1); \
88 break; \
89 case 2: \
90 PER_CPU_PWR_DATA(_val, 0, 2); \
91 break; \
92 case 3: \
93 PER_CPU_PWR_DATA(_val, 0, 3); \
94 break; \
95 case 4: \
96 PER_CPU_PWR_DATA(_val, 0, 4); \
97 break; \
98 case 5: \
99 PER_CPU_PWR_DATA(_val, 0, 5); \
100 break; \
101 case 6: \
102 PER_CPU_PWR_DATA(_val, 0, 6); \
103 break; \
104 case 7: \
105 PER_CPU_PWR_DATA(_val, 0, 7); \
106 break; \
107 default: \
108 assert(0); \
109 break; \
110 } })
111
112 /*
113 * Definition about bootup address for each core
114 * CORE_RVBRADDR_clusterid_cpuid
115 */
116 #define CORE_RVBRADDR_0_0_L (MCUCFG_BASE + 0x00)
117 #define CORE_RVBRADDR_0_1_L (MCUCFG_BASE + 0x08)
118 #define CORE_RVBRADDR_0_2_L (MCUCFG_BASE + 0x10)
119 #define CORE_RVBRADDR_0_3_L (MCUCFG_BASE + 0x18)
120 #define CORE_RVBRADDR_0_4_L (MCUCFG_BASE + 0x20)
121 #define CORE_RVBRADDR_0_5_L (MCUCFG_BASE + 0x28)
122 #define CORE_RVBRADDR_0_6_L (MCUCFG_BASE + 0x30)
123 #define CORE_RVBRADDR_0_7_L (MCUCFG_BASE + 0x38)
124
125 #define CORE_RVBRADDR_0_0_H (MCUCFG_BASE + 0x04)
126 #define CORE_RVBRADDR_0_1_H (MCUCFG_BASE + 0x0C)
127 #define CORE_RVBRADDR_0_2_H (MCUCFG_BASE + 0x14)
128 #define CORE_RVBRADDR_0_3_H (MCUCFG_BASE + 0x1C)
129 #define CORE_RVBRADDR_0_4_H (MCUCFG_BASE + 0x24)
130 #define CORE_RVBRADDR_0_5_H (MCUCFG_BASE + 0x2C)
131 #define CORE_RVBRADDR_0_6_H (MCUCFG_BASE + 0x34)
132 #define CORE_RVBRADDR_0_7_H (MCUCFG_BASE + 0x3C)
133
134 /*
135 * Definition about PPU PWPR for each core
136 * PPU_PWPR_clusterid_cpuid
137 */
138 #define CORE_PPU_PWPR_0_0 (MT_UTILITYBUS_BASE + 0x080000)
139 #define CORE_PPU_PWPR_0_1 (MT_UTILITYBUS_BASE + 0x180000)
140 #define CORE_PPU_PWPR_0_2 (MT_UTILITYBUS_BASE + 0x280000)
141 #define CORE_PPU_PWPR_0_3 (MT_UTILITYBUS_BASE + 0x380000)
142 #define CORE_PPU_PWPR_0_4 (MT_UTILITYBUS_BASE + 0x480000)
143 #define CORE_PPU_PWPR_0_5 (MT_UTILITYBUS_BASE + 0x580000)
144 #define CORE_PPU_PWPR_0_6 (MT_UTILITYBUS_BASE + 0x680000)
145 #define CORE_PPU_PWPR_0_7 (MT_UTILITYBUS_BASE + 0x780000)
146
147 /*
148 * Definition about PPU PWSR for each core
149 * PPU_PWSR_clusterid_cpuid
150 */
151 #define CORE_PPU_PWSR_0_0 (MT_UTILITYBUS_BASE + 0x080008)
152 #define CORE_PPU_PWSR_0_1 (MT_UTILITYBUS_BASE + 0x180008)
153 #define CORE_PPU_PWSR_0_2 (MT_UTILITYBUS_BASE + 0x280008)
154 #define CORE_PPU_PWSR_0_3 (MT_UTILITYBUS_BASE + 0x380008)
155 #define CORE_PPU_PWSR_0_4 (MT_UTILITYBUS_BASE + 0x480008)
156 #define CORE_PPU_PWSR_0_5 (MT_UTILITYBUS_BASE + 0x580008)
157 #define CORE_PPU_PWSR_0_6 (MT_UTILITYBUS_BASE + 0x680008)
158 #define CORE_PPU_PWSR_0_7 (MT_UTILITYBUS_BASE + 0x780008)
159
160 /*
161 * Definition about device delay control 0
162 * PPU_DCDR0_clusterid_cpuid
163 */
164 #define CORE_PPU_DCDR0_0_0 (MT_UTILITYBUS_BASE + 0x080170)
165 #define CORE_PPU_DCDR0_0_1 (MT_UTILITYBUS_BASE + 0x180170)
166 #define CORE_PPU_DCDR0_0_2 (MT_UTILITYBUS_BASE + 0x280170)
167 #define CORE_PPU_DCDR0_0_3 (MT_UTILITYBUS_BASE + 0x380170)
168 #define CORE_PPU_DCDR0_0_4 (MT_UTILITYBUS_BASE + 0x480170)
169 #define CORE_PPU_DCDR0_0_5 (MT_UTILITYBUS_BASE + 0x580170)
170 #define CORE_PPU_DCDR0_0_6 (MT_UTILITYBUS_BASE + 0x680170)
171 #define CORE_PPU_DCDR0_0_7 (MT_UTILITYBUS_BASE + 0x780170)
172
173 /*
174 * Definition about device delay control 1
175 * PPU_DCDR0_clusterid_cpuid
176 */
177 #define CORE_PPU_DCDR1_0_0 (MT_UTILITYBUS_BASE + 0x080174)
178 #define CORE_PPU_DCDR1_0_1 (MT_UTILITYBUS_BASE + 0x180174)
179 #define CORE_PPU_DCDR1_0_2 (MT_UTILITYBUS_BASE + 0x280174)
180 #define CORE_PPU_DCDR1_0_3 (MT_UTILITYBUS_BASE + 0x380174)
181 #define CORE_PPU_DCDR1_0_4 (MT_UTILITYBUS_BASE + 0x480174)
182 #define CORE_PPU_DCDR1_0_5 (MT_UTILITYBUS_BASE + 0x580174)
183 #define CORE_PPU_DCDR1_0_6 (MT_UTILITYBUS_BASE + 0x680174)
184 #define CORE_PPU_DCDR1_0_7 (MT_UTILITYBUS_BASE + 0x780174)
185
186 /*
187 * Definition about PPU PWPR for cluster
188 * PPU_PWPR_clusterid
189 */
190 #define CLUSTER_PPU_PWPR_0 (MT_UTILITYBUS_BASE + 0x030000)
191 #define CLUSTER_PPU_PWSR_0 (MT_UTILITYBUS_BASE + 0x030008)
192 #define CLUSTER_PPU_DCDR0_0 (MT_UTILITYBUS_BASE + 0x030170)
193 #define CLUSTER_PPU_DCDR1_0 (MT_UTILITYBUS_BASE + 0x030174)
194
195 struct ppu_pwr_ctrl {
196 unsigned int ppu_pwpr;
197 unsigned int ppu_pwsr;
198 unsigned int ppu_dcdr0;
199 unsigned int ppu_dcdr1;
200 };
201
202 struct cpu_pwr_ctrl {
203 unsigned int rvbaraddr_l;
204 unsigned int rvbaraddr_h;
205 #ifndef CPU_PM_CORE_ARCH64_ONLY
206 unsigned int arch_addr;
207 #endif /* CPU_PM_CORE_ARCH64_ONLY */
208 struct ppu_pwr_ctrl pwr;
209 unsigned int pwr_ctrl;
210 };
211
212 struct cluster_pwr_ctrl {
213 struct ppu_pwr_ctrl pwr;
214 };
215
216 #define MT_CPUPM_PWR_ON BIT(0)
217 #define MT_CPUPM_PWR_OFF BIT(1)
218
219 #ifdef CPU_PM_SUSPEND_NOTIFY
220 #define PER_CPU_STATUS_S2IDLE BIT(0)
221 #define PER_CPU_STATUS_PDN BIT(1)
222 #define PER_CPU_STATUS_HOTPLUG BIT(2)
223 #define PER_CPU_STATUS_S2IDLE_PDN \
224 (PER_CPU_STATUS_S2IDLE | PER_CPU_STATUS_PDN)
225
226 #define CPUPM_PWR_STATUS(_state, _tar) ((_state & _tar) == _tar)
227 #define IS_CPUPM_SAVE_PWR_STATUS(_state) ( \
228 CPUPM_PWR_STATUS(_state, PER_CPU_STATUS_S2IDLE_PDN) || \
229 (_state & PER_CPU_STATUS_HOTPLUG))
230
231 #ifdef CONFIG_MTK_CPU_ILDO
232 #define CPU_PM_CPU_RET_IS_ENABLED CPU_PM_CPU_RET_MASK
233
234 enum {
235 CPU_PM_RET_SET_SUCCESS = 0,
236 CPU_PM_RET_SET_FAIL
237 };
238
239 #define CPU_EB_RET_STA_REG (CPU_EB_TCM_BASE + CPU_EB_RET_STA_OFFSET)
240 #define CPU_RET_TIMEOUT 100
241 #endif /* CONFIG_MTK_CPU_ILDO */
242
243 struct per_cpu_stage {
244 unsigned int cpu_status;
245 };
246 #endif /* CPU_PM_SUSPEND_NOTIFY */
247
248 #define MCUSYS_STATUS_PDN BIT(0)
249 #define MCUSYS_STATUS_CPUSYS_PROTECT BIT(8)
250 #define MCUSYS_STATUS_MCUSYS_PROTECT BIT(9)
251
252 #ifdef CPU_PM_ACP_FSM
253 #define ACP_FSM_TIMEOUT_MAX (500)
254 #define ACP_FSM_AWARE_TIME (100)
255 #define DO_ACP_FSM_WAIT_TIMEOUT(k_cnt) ({ \
256 if (k_cnt >= ACP_FSM_TIMEOUT_MAX) { \
257 INFO("[%s:%d] - ACP FSM TIMEOUT %u us (> %u)\n", \
258 __func__, __LINE__, k_cnt, ACP_FSM_TIMEOUT_MAX); \
259 panic(); \
260 } else if (k_cnt == ACP_FSM_AWARE_TIME) { \
261 INFO("[%s:%d] - ACP FSM latency exceed %u us\n", \
262 __func__, __LINE__, ACP_FSM_AWARE_TIME); \
263 } \
264 k_cnt++; udelay(1); })
265 #endif /* CPU_PM_ACP_FSM */
266
267 /* cpu_pm function ID */
268 enum mt_cpu_pm_user_id {
269 MCUSYS_STATUS = 0,
270 CPC_COMMAND,
271 };
272
273 /* cpu_pm lp function ID */
274 enum mt_cpu_pm_lp_smc_id {
275 LP_CPC_COMMAND = 0,
276 IRQS_REMAIN_ALLOC,
277 IRQS_REMAIN_CTRL,
278 IRQS_REMAIN_IRQ,
279 IRQS_REMAIN_WAKEUP_CAT,
280 IRQS_REMAIN_WAKEUP_SRC,
281 SUSPEND_SRC,
282 CPU_PM_COUNTER_CTRL,
283 CPU_PM_RECORD_CTRL,
284 SUSPEND_ABORT_REASON,
285 CPU_PM_RET_CTRL
286 };
287
288 enum mt_suspend_abort_reason {
289 MTK_PM_SUSPEND_OK = 0,
290 MTK_PM_SUSPEND_ABORT_PWR_REQ,
291 MTK_PM_SUSPEND_ABORT_LAST_CORE,
292 MTK_PM_SUSPEND_ABORT_RC_INVALID,
293 };
294
295 struct mtk_plat_dev_config {
296 int auto_off;
297 unsigned int auto_thres_us;
298 };
299
300 struct mt_cpu_pm_record {
301 unsigned int cnt;
302 uint64_t name[2];
303 };
304
305 unsigned int cpupm_cpu_retention_control(unsigned int enable);
306 unsigned int cpupu_get_cpu_retention_control(void);
307 void mt_plat_cpu_pm_dev_update(struct mtk_plat_dev_config *config);
308 int mt_plat_cpu_pm_dev_config(struct mtk_plat_dev_config **config);
309 int cpupm_set_suspend_state(unsigned int act, unsigned int cpuid);
310 uint64_t mtk_mcusys_off_record_cnt_get(void);
311 uint64_t mtk_mcusys_off_record_name_get(void);
312 uint64_t mtk_suspend_abort_reason_get(void);
313
314 #endif /* MT_CPU_PM_H */
315