1 /*
2 * Copyright (c) 2025, MediaTek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdint.h>
9
10 #include <drivers/delay_timer.h>
11 #include <lib/spinlock.h>
12 #include <plat/common/platform.h>
13 #include <platform_def.h>
14
15 #include <lib/mtk_init/mtk_init.h>
16 #include <lib/pm/mtk_pm.h>
17 #include <lpm_v2/mt_lp_rm.h>
18 #include "mt_cpu_pm.h"
19 #include "mt_cpu_pm_cpc.h"
20 #include "mt_cpu_pm_mbox.h"
21 #include "mt_cpu_pm_smc.h"
22 #include "mt_lp_irqremain.h"
23 #include "mt_ppu.h"
24 #include "mt_smp.h"
25 #include <mtk_mmap_pool.h>
26 #include <pwr_topology.h>
27
28 /*
29 * The locker must use the bakery locker when cache turn off.
30 * Using spin_lock will has better performance.
31 */
32 #ifdef MT_CPU_PM_USING_BAKERY_LOCK
33 DEFINE_BAKERY_LOCK(mt_cpu_pm_lock);
34 #define plat_cpu_pm_lock_init() bakery_lock_init(&mt_cpu_pm_lock)
35 #define plat_cpu_pm_lock() bakery_lock_get(&mt_cpu_pm_lock)
36 #define plat_cpu_pm_unlock() bakery_lock_release(&mt_cpu_pm_lock)
37 #else
38 spinlock_t mt_cpu_pm_lock;
39 #define plat_cpu_pm_lock_init()
40 #define plat_cpu_pm_lock() spin_lock(&mt_cpu_pm_lock)
41 #define plat_cpu_pm_unlock() spin_unlock(&mt_cpu_pm_lock)
42 #endif /* MT_CPU_PM_USING_BAKERY_LOCK */
43
44 #define cpu_pm_unlikely(x) __builtin_expect(!!(x), 0)
45
46 enum mt_pwr_node {
47 MT_PWR_SYSTEM_MCUSYS = 0,
48 MT_PWR_SYSTEM_VCORE,
49 MT_PWR_MAX
50 };
51
52 #define CPU_PM_DEPD_MASK 0x0000000F
53 #define CPU_PM_DEPD_INIT BIT(0)
54 #define CPU_PM_DEPD_READY BIT(1)
55 #define CPU_PM_PLAT_READY BIT(2)
56
57 #define CPU_PM_AFFLV_CLUSTER_ABORT BIT(0)
58 #define CPU_PM_AFFLV_MCUSYS_ABORT BIT(4)
59
60 enum cpupm_pwr_req_def {
61 CPUPM_PWR_REQ_CLUSTER,
62 CPUPM_PWR_REQ_MCUSYS,
63 CPUPM_PWR_REQ_MAX
64 };
65
66 #ifdef CPU_PM_TINYSYS_SUPPORT
67 #define CPU_PM_LP_READY (CPU_PM_DEPD_INIT | \
68 CPU_PM_DEPD_READY | \
69 CPU_PM_PLAT_READY)
70 #else
71 #define CPU_PM_LP_READY (CPU_PM_PLAT_READY)
72 #endif /* CPU_PM_TINYSYS_SUPPORT */
73
74 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN && \
75 !CPU_PM_DOMAIN_CORE_ONLY
76 static int mt_pwr_nodes[MT_PWR_MAX];
77 static int plat_mt_lp_cpu_rc;
78 static struct mt_cpu_pm_record cpu_pm_record;
79 static uint64_t suspend_abort_reason;
80 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN &&
81 * !CPU_PM_DOMAIN_CORE_ONLY
82 */
83
84 static struct mtk_plat_dev_config plat_dev;
85
86 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
87 #define CPUPM_ARCH_TIME_MS(_ms) ((_ms) * 1000 * SYS_COUNTER_FREQ_IN_MHZ)
88 #define CPUPM_BOOTUP_TIME_THR CPUPM_ARCH_TIME_MS(CPUPM_READY_MS)
89
90 static unsigned int cpu_pm_status;
91
92 #ifdef CPU_PM_PWR_REQ
93 unsigned int cpupm_pwr_reqs[CPUPM_PWR_REQ_MAX];
94 #endif /* CPU_PM_PWR_REQ */
95
96 #ifdef CPU_PM_SUSPEND_NOTIFY
97 #define IS_CPU_SUPEND_SAVE(__cid) (cpu_stage[__cid].cpu_status & \
98 (PER_CPU_STATUS_S2IDLE | PER_CPU_STATUS_HOTPLUG))
99
100 /* make sure all available cores have passed by s2idle flow in kernel */
101 #define IS_PLAT_ALL_ONLINE_CORES_S2IDLE(__st) ({ \
102 int _res = 0; \
103 if (cpu_pm_unlikely(cpu_stage[__st->info.cpuid].cpu_status \
104 & PER_CPU_STATUS_S2IDLE)) { \
105 unsigned int i;\
106 for (i = 0, _res = 1; i < PLATFORM_CORE_COUNT; ++i) \
107 if (!IS_CPU_SUPEND_SAVE(i)) { \
108 _res = 0; \
109 break; \
110 } \
111 } _res; })
112
113 #else
114 #define IS_PLAT_ALL_ONLINE_CORES_S2IDLE(__st) \
115 IS_PLAT_SUSPEND_ID(__st->pwr.state_id)
116
117 #endif /* CPU_PM_SUSPEND_NOTIFY */
118
119 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
120
121 #ifdef CPU_PM_SUSPEND_NOTIFY
122 static struct per_cpu_stage cpu_stage[PLATFORM_CORE_COUNT];
123 #endif /* CPU_PM_SUSPEND_NOTIFY */
124
125 #if CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN
126 #if CONFIG_MTK_PM_SUPPORT
cpupm_cluster_resume_common(void)127 static void cpupm_cluster_resume_common(void)
128 {
129 struct cluster_pwr_ctrl cls_pwr_ctrl;
130
131 PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
132
133 #ifndef CPU_PM_ACP_FSM
134 mt_smp_ppu_pwr_set(&cls_pwr_ctrl.pwr, PPU_PWPR_DYNAMIC_MODE,
135 (plat_dev.auto_off) ? PPU_PWPR_MEM_RET : PPU_PWPR_OFF);
136 #endif /* CPU_PM_ACP_FSM */
137 }
138
139 #ifdef CONFIG_MTK_CPU_ILDO
140
141 #define read_cpupwrctlr() read_cpupwrctlr_el3()
142 #define write_cpupwrctlr(_v) write_cpupwrctlr_el3(_v)
143 #define mt_cpu_retention_enable(_ret_delay) \
144 write_cpupwrctlr((read_cpupwrctlr() & \
145 (~(CPUPWRCTLR_EL3_WFI_RET_MASK << \
146 CPUPWRCTLR_EL3_WFI_RET_SHIFT))) | \
147 ((_ret_delay & CPUPWRCTLR_EL3_WFI_RET_MASK) << \
148 CPUPWRCTLR_EL3_WFI_RET_SHIFT))
149 #define mt_cpu_retention_disable() \
150 write_cpupwrctlr(read_cpupwrctlr() & \
151 (~(CPUPWRCTLR_EL3_WFI_RET_MASK << \
152 CPUPWRCTLR_EL3_WFI_RET_SHIFT)))
153
154 static unsigned int cpu_retention_enable[PLATFORM_CORE_COUNT];
155
cpupm_cpu_retention_init(void)156 static void cpupm_cpu_retention_init(void)
157 {
158 unsigned int i;
159
160 for (i = 0; i < PLATFORM_CORE_COUNT; i++)
161 cpu_retention_enable[i] = 0;
162 INFO("[CPU_PM]: CPU_RET_MASK: 0x%x\n", CPU_PM_CPU_RET_MASK);
163 }
164
cpupm_cpu_retention_set(unsigned int ret_delay)165 static void cpupm_cpu_retention_set(unsigned int ret_delay)
166 {
167 mt_cpu_retention_enable(ret_delay);
168 }
169
cpupm_cpu_ildo_state_valid(unsigned int cpu)170 static unsigned int cpupm_cpu_ildo_state_valid(unsigned int cpu)
171 {
172 unsigned int timeout = 0, ret_sta_reg;
173
174 if (!cpu_retention_enable[cpu])
175 return CPU_PM_RET_SET_FAIL;
176
177 CPU_PM_ASSERT(cpu < PLATFORM_CORE_COUNT);
178 while (timeout < CPU_RET_TIMEOUT) {
179 if (mmio_read_32(CPU_EB_RET_STA_REG) & BIT(cpu)) {
180 cpupm_cpu_retention_set(cpu_retention_enable[cpu]);
181 return CPU_PM_RET_SET_SUCCESS;
182 }
183 udelay(1);
184 timeout++;
185 }
186
187 ERROR("[CPU_RET] wait brisket init timeout, sta:%x\n", ret_sta_reg);
188 return CPU_PM_RET_SET_FAIL;
189 }
190
cpupu_get_cpu_retention_control(void)191 unsigned int cpupu_get_cpu_retention_control(void)
192 {
193 unsigned int i, ret = 0;
194
195 for (i = 0; i < PLATFORM_CORE_COUNT; i++)
196 ret |= cpu_retention_enable[i];
197 return ret;
198 }
199
cpupm_cpu_retention_control(unsigned int enable)200 unsigned int cpupm_cpu_retention_control(unsigned int enable)
201 {
202 unsigned int ret = CPU_PM_RET_SET_FAIL;
203 unsigned int cpu = plat_my_core_pos();
204
205 if ((cpu_pm_status == CPU_PM_LP_READY) &&
206 (CPU_PM_CPU_RET_MASK & BIT(cpu))) {
207 enable &= 0x7;
208 cpu_retention_enable[cpu] = (enable & 0x7);
209 if (enable) {
210 ret = cpupm_cpu_ildo_state_valid(cpu);
211 } else {
212 mt_cpu_retention_disable();
213 ret = CPU_PM_RET_SET_SUCCESS;
214 }
215 }
216 return ret;
217 }
218 #else
219 #define cpupm_cpu_ildo_state_valid(cpu)
220 #endif /* CONFIG_MTK_CPU_ILDO */
221
cpupm_cpu_resume_common(const struct mtk_cpupm_pwrstate * state)222 static void cpupm_cpu_resume_common(const struct mtk_cpupm_pwrstate *state)
223 {
224 CPU_PM_ASSERT(state);
225 mtk_cpc_core_on_hint_clr(state->info.cpuid);
226 cpupm_cpu_ildo_state_valid(state->info.cpuid);
227 }
228 #endif /* CONFIG_MTK_PM_SUPPORT */
229 #endif /* CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN */
230
231 #define RVBARADDR_ONKEEPON_SEL (MCUCFG_BASE + 0x388)
232
233 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
cpupm_cpu_smp_afflv(unsigned int cur_afflv,const struct mtk_cpupm_pwrstate * state,const struct pwr_toplogy * topology)234 static int cpupm_cpu_smp_afflv(unsigned int cur_afflv,
235 const struct mtk_cpupm_pwrstate *state,
236 const struct pwr_toplogy *topology)
237 {
238 #ifdef CPU_PM_TINYSYS_SUPPORT
239 if (topology)
240 mtk_set_mcupm_group_hint(topology->group);
241 #endif /* CPU_PM_TINYSYS_SUPPORT */
242 return 0;
243 }
244
cpupm_cpu_pwr_on_prepare(unsigned int cpu,uintptr_t entry)245 static int cpupm_cpu_pwr_on_prepare(unsigned int cpu, uintptr_t entry)
246 {
247 struct cpu_pwr_ctrl pwr_ctrl = {};
248 int ret = MTK_CPUPM_E_OK;
249
250 if (mmio_read_32(RVBARADDR_ONKEEPON_SEL) == 0x1) {
251 ERROR("ONKEEPON_SEL=%x, CPC_FLOW_CTRL_CFG=%x\n",
252 mmio_read_32(RVBARADDR_ONKEEPON_SEL),
253 mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG));
254 mmio_write_32(RVBARADDR_ONKEEPON_SEL, 0x1);
255 }
256
257 PER_CPU_PWR_CTRL(pwr_ctrl, cpu);
258 mt_smp_core_bootup_address_set(0, cpu, &pwr_ctrl, entry);
259 mt_smp_core_init_arch(0, cpu, 1, &pwr_ctrl);
260 ret = mt_smp_power_core_on(cpu, &pwr_ctrl);
261
262 return ret;
263 }
264
cpupm_cpu_resume_smp(const struct mtk_cpupm_pwrstate * state)265 void cpupm_cpu_resume_smp(const struct mtk_cpupm_pwrstate *state)
266 {
267 CPU_PM_ASSERT(state);
268
269 cpupm_cpu_resume_common(state);
270 #ifdef CPU_PM_SUSPEND_NOTIFY
271 cpu_stage[state->info.cpuid].cpu_status &= ~PER_CPU_STATUS_HOTPLUG;
272 #endif /* CPU_PM_SUSPEND_NOTIFY */
273 pwr_domain_coordination(PWR_DOMAIN_SMP_ON,
274 0,
275 state,
276 cpupm_cpu_smp_afflv);
277 }
278
cpupm_cpu_suspend_smp(const struct mtk_cpupm_pwrstate * state)279 void cpupm_cpu_suspend_smp(const struct mtk_cpupm_pwrstate *state)
280 {
281 struct cpu_pwr_ctrl pwr_ctrl = {};
282
283 CPU_PM_ASSERT(state);
284
285 PER_CPU_PWR_CTRL(pwr_ctrl, state->info.cpuid);
286 mt_smp_power_core_off(state->info.cpuid, &pwr_ctrl);
287
288 #ifdef CPU_PM_SUSPEND_NOTIFY
289 cpu_stage[state->info.cpuid].cpu_status |= PER_CPU_STATUS_HOTPLUG;
290 #endif /* CPU_PM_SUSPEND_NOTIFY */
291 pwr_domain_coordination(PWR_DOMAIN_SMP_OFF,
292 0,
293 state,
294 cpupm_cpu_smp_afflv);
295 }
296
cpupm_smp_init(unsigned int cpu,uintptr_t sec_entrypoint)297 static void cpupm_smp_init(unsigned int cpu, uintptr_t sec_entrypoint)
298 {
299 unsigned int reg;
300 struct mtk_cpupm_pwrstate state = {
301 .info = {
302 .cpuid = cpu,
303 .mode = MTK_CPU_PM_SMP,
304 },
305 .pwr = {
306 .afflv = 0,
307 .state_id = 0,
308 },
309 };
310
311 struct cluster_pwr_ctrl cls_pwr_ctrl;
312
313 reg = mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG);
314 if (reg & CPC_MCUSYS_CPC_RESET_PWR_ON_EN) {
315 INFO("[%s:%d][CPU_PM] reset pwr on is enabled and clear it!\n",
316 __func__, __LINE__);
317 mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
318 CPC_MCUSYS_CPC_RESET_PWR_ON_EN);
319 }
320
321 PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
322
323 mt_smp_ppu_op_set(&cls_pwr_ctrl.pwr,
324 PPU_PWPR_OP_DYNAMIC_MODE,
325 PPU_PWPR_OP_ONE_SLICE_SF_ONLY);
326 cpupm_cluster_resume_common();
327 cpupm_cpu_pwr_on_prepare(cpu, sec_entrypoint);
328 cpupm_cpu_resume_smp(&state);
329 }
330 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
331
332 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
333
334 #if !CPU_PM_DOMAIN_CORE_ONLY
335 static unsigned int plat_prev_stateid;
mcusys_prepare_suspend(unsigned int cur_afflv,const struct mtk_cpupm_pwrstate * state,const struct pwr_toplogy * topology)336 static int mcusys_prepare_suspend(unsigned int cur_afflv,
337 const struct mtk_cpupm_pwrstate *state,
338 const struct pwr_toplogy *topology)
339 {
340 unsigned int stateid;
341
342 if (!state)
343 return MTK_CPUPM_E_FAIL;
344
345 stateid = state->pwr.state_id;
346
347 #ifdef CPU_PM_TINYSYS_SUPPORT
348 if (topology)
349 mtk_set_mcupm_group_hint(topology->group);
350 #endif /* CPU_PM_TINYSYS_SUPPORT */
351
352 if (!IS_PLAT_MCUSYSOFF_AFFLV(cur_afflv))
353 return MTK_CPUPM_E_OK;
354
355 #ifdef CPU_PM_PWR_REQ
356 if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER] ||
357 cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS]) {
358 suspend_abort_reason = MTK_PM_SUSPEND_ABORT_PWR_REQ;
359 goto mt_pwr_mcusysoff_break;
360 }
361 #endif /* CPU_PM_PWR_REQ */
362
363 if (mtk_cpc_mcusys_off_prepare() != CPC_SUCCESS) {
364 suspend_abort_reason = MTK_PM_SUSPEND_ABORT_LAST_CORE;
365 goto mt_pwr_mcusysoff_break;
366 }
367
368 if (IS_PLAT_ALL_ONLINE_CORES_S2IDLE(state))
369 stateid = MT_PLAT_PWR_STATE_SUSPEND;
370 else if (mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS] != 0)
371 stateid = MT_PLAT_PWR_STATE_MCUSYS;
372 else if (mt_pwr_nodes[MT_PWR_SYSTEM_VCORE] != 0)
373 stateid = MT_PLAT_PWR_STATE_SYSTEM_VCORE;
374 else
375 stateid = MT_PLAT_PWR_STATE_MCUSYS;
376
377 plat_prev_stateid = stateid;
378 plat_mt_lp_cpu_rc =
379 mt_lp_rm_find_constraint(0, state->info.cpuid, stateid, NULL);
380
381 if (plat_mt_lp_cpu_rc < 0) {
382 suspend_abort_reason = MTK_PM_SUSPEND_ABORT_RC_INVALID;
383 goto mt_pwr_mcusysoff_reflect;
384 }
385
386 #ifdef CPU_PM_TINYSYS_SUPPORT
387 mtk_set_cpu_pm_preffered_cpu(state->info.cpuid);
388 #endif /* CPU_PM_TINYSYS_SUPPORT */
389 suspend_abort_reason = MTK_PM_SUSPEND_OK;
390 return MTK_CPUPM_E_OK;
391
392 mt_pwr_mcusysoff_reflect:
393 mtk_cpc_mcusys_off_reflect();
394 mt_pwr_mcusysoff_break:
395 plat_mt_lp_cpu_rc = -1;
396
397 if (IS_PLAT_ALL_ONLINE_CORES_S2IDLE(state)) {
398 /* set SPM pending if s2idle fail to turn mcusys off */
399 if (suspend_abort_reason == MTK_PM_SUSPEND_ABORT_PWR_REQ)
400 NOTICE("[LPM] PWR_REQ is held\n");
401 else if (suspend_abort_reason == MTK_PM_SUSPEND_ABORT_LAST_CORE)
402 NOTICE("[LPM] suspend last core prot fail\n");
403 else if (suspend_abort_reason ==
404 MTK_PM_SUSPEND_ABORT_RC_INVALID)
405 NOTICE("[LPM] no available RC\n");
406 }
407
408 return MTK_CPUPM_E_FAIL;
409 }
410
411 #define RECORD_NAME_LEN (16)
412 #define RECORD_NAME_LEN_SMC (8)
mtk_cpu_pm_mcusys_record(const struct mtk_cpupm_pwrstate * state)413 void mtk_cpu_pm_mcusys_record(const struct mtk_cpupm_pwrstate *state)
414 {
415 unsigned int i = 0, j = 0;
416 unsigned int stateid = state->pwr.state_id;
417 char name[RECORD_NAME_LEN];
418 int ret = 0;
419 uint64_t tran = 0;
420
421 memset(name, 0, sizeof(name));
422 switch (stateid) {
423 case MT_PLAT_PWR_STATE_MCUSYS:
424 case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
425 ret = snprintf(name, RECORD_NAME_LEN-1, "mcusys_off");
426 break;
427 case MT_PLAT_PWR_STATE_SYSTEM_MEM:
428 ret = snprintf(name, RECORD_NAME_LEN-1, "system_mem");
429 break;
430 case MT_PLAT_PWR_STATE_SYSTEM_PLL:
431 ret = snprintf(name, RECORD_NAME_LEN-1, "system_pll");
432 break;
433 case MT_PLAT_PWR_STATE_SYSTEM_BUS:
434 ret = snprintf(name, RECORD_NAME_LEN-1, "system_bus");
435 break;
436 case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
437 ret = snprintf(name, RECORD_NAME_LEN-1, "system_vcore");
438 break;
439 case MT_PLAT_PWR_STATE_SUSPEND:
440 ret = snprintf(name, RECORD_NAME_LEN-1, "suspend");
441 break;
442 default:
443 ret = snprintf(name, RECORD_NAME_LEN-1, "Unknown_State");
444 break;
445 }
446
447 if (ret < 0) {
448 INFO("[%s]snprintf error%d\n", __func__, ret);
449 return;
450 }
451
452 memset(cpu_pm_record.name, 0, sizeof(cpu_pm_record.name));
453 while ((i < RECORD_NAME_LEN) && (*(name + i) != '\0')) {
454 if (i == RECORD_NAME_LEN_SMC)
455 ++j;
456 tran = (uint64_t)(*(name + i) & 0xFF);
457 cpu_pm_record.name[j] |= (tran <<
458 ((i - (RECORD_NAME_LEN_SMC * j)) << 3));
459 if (name[i] == '\0')
460 break;
461 i++;
462 }
463
464 cpu_pm_record.cnt++;
465 }
466
mtk_mcusys_off_record_cnt_get(void)467 uint64_t mtk_mcusys_off_record_cnt_get(void)
468 {
469 return cpu_pm_record.cnt;
470 }
471
mtk_mcusys_off_record_name_get(void)472 uint64_t mtk_mcusys_off_record_name_get(void)
473 {
474 static unsigned int idx;
475 uint64_t ret = 0;
476
477 ret = cpu_pm_record.name[idx];
478 idx = !idx;
479
480 return ret;
481 }
482
mcusys_prepare_resume(unsigned int cur_afflv,const struct mtk_cpupm_pwrstate * state,const struct pwr_toplogy * topology)483 static int mcusys_prepare_resume(unsigned int cur_afflv,
484 const struct mtk_cpupm_pwrstate *state,
485 const struct pwr_toplogy *topology)
486 {
487 uint32_t cpu = plat_my_core_pos();
488
489 if (!state)
490 return MTK_CPUPM_E_FAIL;
491
492 #ifdef CPU_PM_TINYSYS_SUPPORT
493 if (topology)
494 mtk_set_mcupm_group_hint(topology->group);
495 #endif /* CPU_PM_TINYSYS_SUPPORT */
496
497 if (!IS_PLAT_MCUSYSOFF_AFFLV(cur_afflv))
498 return MTK_CPUPM_E_OK;
499
500 #ifdef CPU_PM_PWR_REQ
501 if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER] ||
502 cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS])
503 return MTK_CPUPM_E_FAIL;
504 #endif /* CPU_PM_PWR_REQ */
505
506 if (plat_mt_lp_cpu_rc < 0)
507 return MTK_CPUPM_E_FAIL;
508
509 mt_lp_rm_reset_constraint(plat_mt_lp_cpu_rc,
510 state->info.cpuid,
511 plat_prev_stateid);
512 mtk_cpc_mcusys_off_reflect();
513
514 mtk_cpu_pm_mcusys_record(state);
515
516 /* Clear EXT_INT_WAKEUP_REQ of the first-on CPU */
517 mmio_write_32(SPM_EXT_INT_WAKEUP_REQ_CLR, BIT(cpu));
518 if (mmio_read_32(SPM_EXT_INT_WAKEUP_REQ)) {
519 NOTICE("EXT_INT_WAKEUP_REQ(%u) is not cleared. CPU: %lu\n",
520 mmio_read_32(SPM_EXT_INT_WAKEUP_REQ),
521 BIT(cpu));
522 CPU_PM_ASSERT(0);
523 }
524
525 return MTK_CPUPM_E_OK;
526 }
527
mtk_suspend_abort_reason_get(void)528 uint64_t mtk_suspend_abort_reason_get(void)
529 {
530 return suspend_abort_reason;
531 }
532 #endif /* CPU_PM_DOMAIN_CORE_ONLY */
533
cpupm_do_pstate_off(const mtk_pstate_type psci_state,const struct mtk_cpupm_pwrstate * state)534 static unsigned int cpupm_do_pstate_off(const mtk_pstate_type psci_state,
535 const struct mtk_cpupm_pwrstate *state)
536 {
537 unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
538
539 #ifdef CPU_PM_DOMAIN_CORE_ONLY
540 pstate &= ~(MT_CPUPM_PWR_DOMAIN_CLUSTER |
541 MT_CPUPM_PWR_DOMAIN_PERCORE_DSU |
542 MT_CPUPM_PWR_DOMAIN_MCUSYS);
543 #else
544 if (!state || (state->pwr.afflv > PLAT_MAX_PWR_LVL)) {
545 CPU_PM_ASSERT(state);
546 CPU_PM_ASSERT(state->pwr.afflv <= PLAT_MAX_PWR_LVL);
547 }
548
549 /*
550 * If all core afflv is higher than PLAT_MAX_RET_STATE
551 * and state's id is MT_PLAT_PWR_STATE_MCUSYS
552 */
553 switch (state->pwr.state_id) {
554 case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
555 mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS]++;
556 break;
557 case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
558 mt_pwr_nodes[MT_PWR_SYSTEM_VCORE]++;
559 break;
560 default:
561 break;
562 }
563 pstate |= pwr_domain_coordination(PWR_DOMAIN_OFF,
564 psci_state,
565 state,
566 mcusys_prepare_suspend);
567
568 #endif /* CPU_PM_DOMAIN_CORE_ONLY */
569
570 return pstate;
571 }
572
cpupm_do_pstate_on(const mtk_pstate_type psci_state,const struct mtk_cpupm_pwrstate * state)573 static unsigned int cpupm_do_pstate_on(const mtk_pstate_type psci_state,
574 const struct mtk_cpupm_pwrstate *state)
575 {
576 unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
577
578 #ifdef CPU_PM_DOMAIN_CORE_ONLY
579 pstate &= ~(MT_CPUPM_PWR_DOMAIN_CLUSTER |
580 MT_CPUPM_PWR_DOMAIN_PERCORE_DSU |
581 MT_CPUPM_PWR_DOMAIN_MCUSYS);
582 #else
583 CPU_PM_ASSERT(state);
584
585 if (state->pwr.afflv > PLAT_MAX_PWR_LVL)
586 CPU_PM_ASSERT(0);
587
588 switch (state->pwr.state_id) {
589 case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
590 mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS]--;
591 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS] >= 0);
592 break;
593 case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
594 mt_pwr_nodes[MT_PWR_SYSTEM_VCORE]--;
595 CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_VCORE] >= 0);
596 break;
597 default:
598 break;
599 }
600
601 pstate |= pwr_domain_coordination(PWR_DOMAIN_ON,
602 psci_state,
603 state,
604 mcusys_prepare_resume);
605 #endif /* CPU_PM_DOMAIN_CORE_ONLY */
606 return pstate;
607 }
608
cpupm_cpu_resume(const struct mtk_cpupm_pwrstate * state)609 static void cpupm_cpu_resume(const struct mtk_cpupm_pwrstate *state)
610 {
611 cpupm_cpu_resume_common(state);
612 }
613
cpupm_cluster_resume(const struct mtk_cpupm_pwrstate * state)614 static void cpupm_cluster_resume(const struct mtk_cpupm_pwrstate *state)
615 {
616 cpupm_cluster_resume_common();
617 mtk_cpu_pm_save_cpc_latency(DEV_TYPE_CPUSYS);
618 }
619
620 #if CPU_PM_PWR_REQ || CPU_PM_ACP_FSM
cpupm_cluster_suspend(const struct mtk_cpupm_pwrstate * state)621 static void cpupm_cluster_suspend(const struct mtk_cpupm_pwrstate *state)
622 {
623 struct cluster_pwr_ctrl cls_pwr_ctrl;
624
625 PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
626
627 #ifdef CPU_PM_PWR_REQ
628 if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER]) {
629 mt_smp_ppu_pwr_dynamic_set(&cls_pwr_ctrl.pwr,
630 PPU_PWPR_ON);
631 return;
632 }
633 #endif /* CPU_PM_PWR_REQ */
634 #ifdef CPU_PM_ACP_FSM
635 unsigned int val, pwsr, timeout_cnt = 0;
636
637 do {
638 val = mmio_read_32(MCUSYS_ACP_UTB_FSM);
639 DO_ACP_FSM_WAIT_TIMEOUT(timeout_cnt);
640 } while ((val & ACP_PWR_CTRL_OP_STATUS) != ACP_PWR_CTRL_OP_ST_IDLE);
641
642 mt_smp_ppu_set(&cls_pwr_ctrl.pwr,
643 PPU_PWPR_OP_DYNAMIC_MODE,
644 DSU_PPU_PWPR_OP_MODE_DEF,
645 PPU_PWPR_DYNAMIC_MODE,
646 (plat_dev.auto_off) ?
647 PPU_PWPR_MEM_RET :
648 PPU_PWPR_OFF);
649
650 timeout_cnt = 0;
651 do {
652 pwsr = mmio_read_32(cls_pwr_ctrl.pwr.ppu_pwsr);
653 DO_ACP_FSM_WAIT_TIMEOUT(timeout_cnt);
654 } while ((pwsr & PPU_PWSR_OP_STATUS) == PPU_OP_ST_SF_ONLY);
655 #endif /* CPU_PM_ACP_FSM */
656 }
657 #endif /* CPU_PM_PWR_REQ || CPU_PM_ACP_FSM */
658
cpupm_mcusys_resume(const struct mtk_cpupm_pwrstate * state)659 static void cpupm_mcusys_resume(const struct mtk_cpupm_pwrstate *state)
660 {
661 #ifdef CPU_PM_IRQ_REMAIN_ENABLE
662 mt_lp_irqremain_release();
663 #endif /* CPU_PM_IRQ_REMAIN_ENABLE */
664 mtk_cpu_pm_save_cpc_latency(DEV_TYPE_MCUSYS);
665 }
666
cpupm_mcusys_suspend(const struct mtk_cpupm_pwrstate * state)667 static void cpupm_mcusys_suspend(const struct mtk_cpupm_pwrstate *state)
668 {
669 #if !CPU_PM_DOMAIN_CORE_ONLY
670 struct cluster_pwr_ctrl cls_pwr_ctrl;
671
672 assert(state);
673
674 if (plat_mt_lp_cpu_rc < 0)
675 return;
676
677 mt_lp_rm_do_constraint(plat_mt_lp_cpu_rc,
678 state->info.cpuid,
679 plat_prev_stateid);
680
681 #ifdef CPU_PM_IRQ_REMAIN_ENABLE
682 mt_lp_irqremain_aquire();
683 #endif /* CPU_PM_IRQ_REMAIN_ENABLE */
684 if (plat_dev.auto_off) {
685 /*
686 * The DSU ppu setting is DYN_MEM_RET when auto dormant enable.
687 * Need to set PPU to DYN_OFF when mcusys off.
688 *
689 */
690 PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
691 mt_smp_ppu_pwr_set(&cls_pwr_ctrl.pwr,
692 PPU_PWPR_DYNAMIC_MODE,
693 PPU_PWPR_OFF);
694 }
695 #endif /* CPU_PM_DOMAIN_CORE_ONLY */
696 }
697
cpupm_get_pstate(enum mt_cpupm_pwr_domain domain,const mtk_pstate_type psci_state,const struct mtk_cpupm_pwrstate * state)698 static unsigned int cpupm_get_pstate(enum mt_cpupm_pwr_domain domain,
699 const mtk_pstate_type psci_state,
700 const struct mtk_cpupm_pwrstate *state)
701 {
702 unsigned int pstate = 0;
703
704 if (!state)
705 return 0;
706
707 if (state->info.mode == MTK_CPU_PM_SMP)
708 pstate = MT_CPUPM_PWR_DOMAIN_CORE;
709 else {
710 if (domain == CPUPM_PWR_OFF)
711 pstate = cpupm_do_pstate_off(psci_state, state);
712 else if (domain == CPUPM_PWR_ON)
713 pstate = cpupm_do_pstate_on(psci_state, state);
714 else {
715 INFO("[%s:%d][CPU_PM] unknown pwr domain :%d\n",
716 __func__, __LINE__, domain);
717 assert(0);
718 }
719 }
720 return pstate;
721 }
722
723 #define CPUPM_READY_MS (40000)
cpupm_pwr_state_valid(unsigned int afflv,unsigned int state)724 static int cpupm_pwr_state_valid(unsigned int afflv, unsigned int state)
725 {
726 if (cpu_pm_status == CPU_PM_LP_READY)
727 return MTK_CPUPM_E_OK;
728
729 if (cpu_pm_status != CPU_PM_LP_READY) {
730 #ifdef CPU_PM_TINYSYS_SUPPORT
731 int status = 0;
732
733 if (!(cpu_pm_status & CPU_PM_DEPD_INIT)) {
734 status = mtk_lp_depd_condition(
735 CPUPM_MBOX_WAIT_DEV_INIT);
736 if (status == 0) {
737 plat_cpu_pm_lock();
738 cpu_pm_status |= CPU_PM_DEPD_INIT;
739 plat_cpu_pm_unlock();
740 }
741 } else if (!(cpu_pm_status & CPU_PM_DEPD_READY)) {
742 status = mtk_lp_depd_condition(
743 CPUPM_MBOX_WAIT_TASK_READY);
744 if (status == 0) {
745 plat_cpu_pm_lock();
746 cpu_pm_status |= CPU_PM_DEPD_READY;
747 plat_cpu_pm_unlock();
748 }
749 } else {
750 #endif /* CPU_PM_TINYSYS_SUPPORT */
751 uint64_t arch_time = read_cntpct_el0();
752
753 if (arch_time > (uint64_t)CPUPM_BOOTUP_TIME_THR) {
754 plat_cpu_pm_lock();
755 cpu_pm_status |= CPU_PM_PLAT_READY;
756 plat_cpu_pm_unlock();
757 }
758 #ifdef CPU_PM_TINYSYS_SUPPORT
759 }
760 #endif /* CPU_PM_TINYSYS_SUPPORT */
761 return MTK_CPUPM_E_FAIL;
762 }
763 return MTK_CPUPM_E_OK;
764 }
765 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
766
767 #define CPUPM_PWR_STAT_REQ_UID_MAGIC (0xbacd1103)
768
769 #define IS_VALID_CPUPM_PWR_STAT_REQ(mg) \
770 ((mg & CPUPM_PWR_STAT_REQ_UID_MAGIC) == CPUPM_PWR_STAT_REQ_UID_MAGIC)
771
772 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
cpupm_invoke(unsigned int func_id,void * priv)773 static int cpupm_invoke(unsigned int func_id, void *priv)
774 {
775 int ret = MTK_CPUPM_E_OK;
776 #ifdef CPU_PM_SUSPEND_NOTIFY
777 int i, reverse = 0;
778 struct cpupm_invoke_data *save_status = (struct cpupm_invoke_data *) priv;
779 unsigned int cpu_status;
780 #endif
781 struct cpupm_pwr_req *req = (struct cpupm_pwr_req *)priv;
782 unsigned int pwr_req = req->req;
783
784 switch (func_id) {
785 #ifdef CPU_PM_SUSPEND_NOTIFY
786 case CPUPM_INVOKE_WAKED_CPU:
787 if (priv) {
788 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
789 cpu_status = cpu_stage[i].cpu_status;
790 if (IS_CPUPM_SAVE_PWR_STATUS(cpu_status))
791 reverse |= BIT(i);
792 }
793 save_status->val.v_u32 = ~reverse;
794 } else
795 ret = MTK_CPUPM_E_ERR;
796 break;
797 #endif /* CPU_PM_SUSPEND_NOTIFY */
798
799 #ifdef CPU_PM_PWR_REQ
800 case CPUPM_INVOKE_PWR_REQ_ACTIVE:
801 if (priv) {
802 if (req->stat.uid == CPUPM_PWR_REQ_UID_MAGIC)
803 req->stat.uid = CPUPM_PWR_STAT_REQ_UID_MAGIC;
804 else
805 ret = MTK_CPUPM_E_ERR;
806 } else
807 ret = MTK_CPUPM_E_ERR;
808 break;
809 case CPUPM_INVOKE_PWR_REQ_ACQUIRE:
810 case CPUPM_INVOKE_PWR_REQ_RELASE:
811 if (priv && (IS_VALID_CPUPM_PWR_STAT_REQ(req->stat.uid))) {
812 plat_cpu_pm_lock();
813 if (func_id == CPUPM_INVOKE_PWR_REQ_ACQUIRE) {
814 if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
815 pwr_req |=
816 MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER;
817
818 pwr_req = pwr_req & ~req->stat.sta_req;
819
820 if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
821 cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER]++;
822
823 if ((pwr_req & MT_CPUPM_MCUSYS_REQ) &&
824 !(req->stat.sta_req & MT_CPUPM_MCUSYS_REQ))
825 cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS]++;
826
827 req->stat.sta_req |= pwr_req;
828 } else {
829 if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
830 pwr_req |=
831 MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER;
832
833 pwr_req = pwr_req & req->stat.sta_req;
834 req->stat.sta_req &= ~pwr_req;
835
836 if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER) {
837 if (cpupm_pwr_reqs[
838 CPUPM_PWR_REQ_CLUSTER] > 0)
839 cpupm_pwr_reqs[
840 CPUPM_PWR_REQ_CLUSTER]--;
841 }
842
843 if ((pwr_req & MT_CPUPM_MCUSYS_REQ) &&
844 !(req->stat.sta_req &
845 MT_CPUPM_MCUSYS_REQ)) {
846 if (cpupm_pwr_reqs[
847 CPUPM_PWR_REQ_MCUSYS] > 0)
848 cpupm_pwr_reqs[
849 CPUPM_PWR_REQ_MCUSYS]--;
850 }
851 }
852
853 plat_cpu_pm_unlock();
854 } else
855 ret = MTK_CPUPM_E_ERR;
856 break;
857 #endif /* CPU_PM_PWR_REQ */
858 default:
859 ret = MTK_CPUPM_E_ERR;
860 break;
861 }
862 return ret;
863 }
864 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
865
mt_plat_cpu_pm_dev_update(struct mtk_plat_dev_config * config)866 void mt_plat_cpu_pm_dev_update(struct mtk_plat_dev_config *config)
867 {
868 if (!config)
869 return;
870 plat_dev.auto_off = config->auto_off;
871 plat_dev.auto_thres_us = config->auto_thres_us;
872 }
873
mt_plat_cpu_pm_dev_config(struct mtk_plat_dev_config ** config)874 int mt_plat_cpu_pm_dev_config(struct mtk_plat_dev_config **config)
875 {
876 if (!config)
877 return MTK_CPUPM_E_FAIL;
878 *config = &plat_dev;
879 return 0;
880 }
881
882 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
883 static struct mtk_cpu_smp_ops cpcv5_0_cpu_smp = {
884 .init = cpupm_smp_init,
885 .cpu_pwr_on_prepare = cpupm_cpu_pwr_on_prepare,
886 .cpu_on = cpupm_cpu_resume_smp,
887 .cpu_off = cpupm_cpu_suspend_smp,
888 };
889 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
890
891 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
892 static struct mtk_cpu_pm_ops cpcv5_0_mcdi = {
893 .get_pstate = cpupm_get_pstate,
894 .pwr_state_valid = cpupm_pwr_state_valid,
895 .cpu_resume = cpupm_cpu_resume,
896 #if CPU_PM_PWR_REQ || CPU_PM_ACP_FSM
897 .cluster_suspend = cpupm_cluster_suspend,
898 #endif /* CPU_PM_PWR_REQ || CPU_PM_ACP_FSM */
899 .cluster_resume = cpupm_cluster_resume,
900 .mcusys_suspend = cpupm_mcusys_suspend,
901 .mcusys_resume = cpupm_mcusys_resume,
902 .invoke = cpupm_invoke,
903 };
904 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
905
906 /* Init cpu_state.cpu_status as Hotplugged for non-boot CPUs. */
mtk_cpu_status_init(void)907 static void mtk_cpu_status_init(void)
908 {
909 #ifdef CPU_PM_SUSPEND_NOTIFY
910 for (int i = 1 ; i < PLATFORM_CORE_COUNT; i++)
911 cpu_stage[i].cpu_status |= PER_CPU_STATUS_HOTPLUG;
912 #endif /* CPU_PM_SUSPEND_NOTIFY */
913 }
914
915 /*
916 * Depend on mtk pm methodology, the psci op init must
917 * be invoked after cpu pm to avoid initialization fail.
918 */
mt_plat_cpu_pm_init(void)919 int mt_plat_cpu_pm_init(void)
920 {
921 plat_cpu_pm_lock_init();
922 pwr_topology_init();
923 mtk_cpc_init();
924 mtk_cpu_status_init();
925 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
926 register_cpu_pm_ops(CPU_PM_FN(), &cpcv5_0_mcdi);
927 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
928 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
929 register_cpu_smp_ops(CPU_PM_FN(), &cpcv5_0_cpu_smp);
930 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
931 #ifdef CPU_PM_IRQ_REMAIN_ENABLE
932 mt_lp_irqremain_init();
933 #endif /* CPU_PM_IRQ_REMAIN_ENABLE */
934 cpupm_smc_init();
935 #ifdef CONFIG_MTK_CPU_ILDO
936 cpupm_cpu_retention_init();
937 #endif /* CONFIG_MTK_CPU_ILDO */
938 INFO("[%s:%d] - MCDI finished\n", __func__, __LINE__);
939 return 0;
940 }
941 MTK_ARCH_INIT(mt_plat_cpu_pm_init);
942
943 static const mmap_region_t cpu_pm_mmap[] MTK_MMAP_SECTION = {
944 MAP_REGION_FLAT(MT_UTILITYBUS_BASE,
945 MT_UTILITYBUS_SIZE,
946 MT_DEVICE | MT_RW | MT_SECURE),
947 #ifdef CPU_PM_TINYSYS_SUPPORT
948 #if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
949 MAP_REGION_FLAT(CPU_EB_TCM_BASE,
950 CPU_EB_TCM_SIZE,
951 MT_DEVICE | MT_RW | MT_SECURE),
952 #ifdef CPU_EB_TCM_CNT_BASE
953 MAP_REGION_FLAT(CPU_EB_TCM_CNT_BASE,
954 CPU_EB_TCM_SIZE,
955 MT_DEVICE | MT_RW | MT_SECURE),
956 #endif /* CPU_EB_TCM_CNT_BASE */
957 #endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
958 #endif /* CPU_PM_TINYSYS_SUPPORT */
959 {0},
960 };
961 DECLARE_MTK_MMAP_REGIONS(cpu_pm_mmap);
962
cpupm_core_pwr_handler(const void * arg,unsigned int act)963 static void *cpupm_core_pwr_handler(const void *arg, unsigned int act)
964 {
965 struct mt_cpupm_event_data *nb =
966 (struct mt_cpupm_event_data *)arg;
967
968 if (!arg || (nb->cpuid >= PLATFORM_CORE_COUNT))
969 return (void *)arg;
970
971 if (act & MT_CPUPM_PWR_ON) {
972 #ifdef CPU_PM_SUSPEND_NOTIFY
973 cpu_stage[nb->cpuid].cpu_status &= ~PER_CPU_STATUS_PDN;
974 #endif /* CPU_PM_SUSPEND_NOTIFY */
975 mtk_cpu_pm_counter_update(nb->cpuid);
976 mtk_cpu_pm_save_cpc_latency(nb->cpuid);
977 } else {
978 #ifdef CPU_PM_SUSPEND_NOTIFY
979 cpu_stage[nb->cpuid].cpu_status |= PER_CPU_STATUS_PDN;
980 #endif /* CPU_PM_SUSPEND_NOTIFY */
981 }
982
983 return (void *)arg;
984 }
985
cpupm_core_pwr_off_handler(const void * arg)986 void *cpupm_core_pwr_off_handler(const void *arg)
987 {
988 return cpupm_core_pwr_handler(arg, MT_CPUPM_PWR_OFF);
989 }
990 MT_CPUPM_SUBCRIBE_EVENT_PWR_OFF(cpupm_core_pwr_off_handler);
991
cpupm_core_pwr_on_handler(const void * arg)992 void *cpupm_core_pwr_on_handler(const void *arg)
993 {
994 return cpupm_core_pwr_handler(arg, MT_CPUPM_PWR_ON);
995 }
996 MT_CPUPM_SUBCRIBE_EVENT_PWR_ON(cpupm_core_pwr_on_handler);
997
998 #ifdef CPU_PM_SUSPEND_NOTIFY
cpupm_set_suspend_state(unsigned int act,unsigned int cpuid)999 int cpupm_set_suspend_state(unsigned int act, unsigned int cpuid)
1000 {
1001 if (cpuid >= PLATFORM_CORE_COUNT)
1002 return MTK_CPUPM_E_ERR;
1003
1004 if (act & MT_LPM_SMC_ACT_SET)
1005 cpu_stage[cpuid].cpu_status |= PER_CPU_STATUS_S2IDLE;
1006 else
1007 cpu_stage[cpuid].cpu_status &= ~PER_CPU_STATUS_S2IDLE;
1008
1009 return 0;
1010 }
1011 #endif /* CPU_PM_SUSPEND_NOTIFY */
1012