xref: /rk3399_ARM-atf/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.c (revision cf2df874cd09305ac7282fadb0fef6be597dfffb)
1 /*
2  * Copyright (c) 2025, MediaTek Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <string.h>
8 
9 #include <drivers/delay_timer.h>
10 #include <lib/spinlock.h>
11 
12 #include <lib/pm/mtk_pm.h>
13 #include <mcucfg.h>
14 #include "mt_cpu_pm.h"
15 #include "mt_cpu_pm_cpc.h"
16 #include "mt_smp.h"
17 #include <mt_timer.h>
18 
19 #define CHECK_GIC_SGI_PENDING		(0)
20 #define MTK_SYS_TIMER_SYNC_SUPPORT	(1)
21 #define MCUSYS_CLUSTER_DORMANT_MASK	0xFFFF
22 
23 struct mtk_cpc_lat_data {
24 	unsigned int on_sum;
25 	unsigned int on_min;
26 	unsigned int on_max;
27 	unsigned int off_sum;
28 	unsigned int off_min;
29 	unsigned int off_max;
30 	unsigned int on_cnt;
31 	unsigned int off_cnt;
32 };
33 
34 struct mtk_cpc_device {
35 	union {
36 		struct mtk_cpc_lat_data p[DEV_TYPE_NUM];
37 		struct {
38 			struct mtk_cpc_lat_data cpu[PLATFORM_CORE_COUNT];
39 			struct mtk_cpc_lat_data cluster;
40 			struct mtk_cpc_lat_data mcusys;
41 		};
42 	};
43 };
44 
45 static struct mtk_cpc_device cpc_dev;
46 
47 static bool cpu_pm_counter_enabled;
48 static bool cpu_cpc_prof_enabled;
49 
mtk_cpc_auto_dormant_en(unsigned int en)50 static void mtk_cpc_auto_dormant_en(unsigned int en)
51 {
52 	struct mtk_plat_dev_config *cfg = NULL;
53 
54 	if (en)
55 		mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_AUTO_OFF_EN);
56 	else
57 		mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_AUTO_OFF_EN);
58 
59 	mt_plat_cpu_pm_dev_config(&cfg);
60 
61 	if (cfg) {
62 		cfg->auto_off = !!en;
63 		mt_plat_cpu_pm_dev_update(cfg);
64 	}
65 }
66 
mtk_cpc_auto_dormant_tick(unsigned int us)67 static void mtk_cpc_auto_dormant_tick(unsigned int us)
68 {
69 	struct mtk_plat_dev_config *cfg = NULL;
70 
71 	mmio_write_32(CPC_MCUSYS_CPC_OFF_THRES, US_TO_TICKS(us));
72 
73 	mt_plat_cpu_pm_dev_config(&cfg);
74 
75 	if (cfg) {
76 		cfg->auto_thres_us = us;
77 		mt_plat_cpu_pm_dev_update(cfg);
78 	}
79 }
80 
mtk_cpu_pm_mcusys_prot_release(void)81 static void mtk_cpu_pm_mcusys_prot_release(void)
82 {
83 	mmio_write_32(CPC_MCUSYS_PWR_ON_MASK, MCUSYS_PROT_CLR);
84 }
85 
mtk_cpc_last_core_prot(int prot_req,int resp_reg,int resp_ofs)86 static int mtk_cpc_last_core_prot(int prot_req, int resp_reg, int resp_ofs)
87 {
88 	unsigned int sta, retry;
89 
90 	retry = 0;
91 
92 	while (retry < RETRY_CNT_MAX) {
93 		mmio_write_32(CPC_MCUSYS_LAST_CORE_REQ, prot_req);
94 		udelay(1);
95 		sta = (mmio_read_32(resp_reg) >> resp_ofs) & CPC_PROT_RESP_MASK;
96 
97 		if (sta == PROT_GIVEUP)
98 			return CPC_ERR_FAIL;
99 
100 		if (sta == PROT_SUCCESS) {
101 			if (mmio_read_32(CPC_WAKEUP_REQ) ==
102 			    CPC_WAKEUP_STAT_NONE)
103 				return CPC_SUCCESS;
104 
105 			mtk_cpu_pm_mcusys_prot_release();
106 		}
107 
108 		retry++;
109 	}
110 
111 	return CPC_ERR_TIMEOUT;
112 }
113 
mtk_cpu_pm_mcusys_prot_aquire(void)114 static int mtk_cpu_pm_mcusys_prot_aquire(void)
115 {
116 	return mtk_cpc_last_core_prot(MCUSYS_PROT_SET,
117 				      CPC_MCUSYS_LAST_CORE_RESP,
118 				      MCUSYS_RESP_OFS);
119 }
120 
mtk_cpu_pm_cluster_prot_aquire(int cluster)121 int mtk_cpu_pm_cluster_prot_aquire(int cluster)
122 {
123 	return mtk_cpc_last_core_prot(CPUSYS_PROT_SET,
124 				      CPC_MCUSYS_MP_LAST_CORE_RESP,
125 				      CPUSYS_RESP_OFS);
126 }
127 
mtk_cpu_pm_cluster_prot_release(int cluster)128 void mtk_cpu_pm_cluster_prot_release(int cluster)
129 {
130 	mmio_write_32(CPC_MCUSYS_PWR_ON_MASK, CPUSYS_PROT_CLR);
131 }
132 
is_cpu_pm_counter_enabled(void)133 static bool is_cpu_pm_counter_enabled(void)
134 {
135 	return cpu_pm_counter_enabled;
136 }
137 
mtk_cpc_cluster_cnt_backup(void)138 static void mtk_cpc_cluster_cnt_backup(void)
139 {
140 	int backup_cnt;
141 	int curr_cnt;
142 
143 	if (is_cpu_pm_counter_enabled() == false)
144 		return;
145 
146 	/* Single Cluster */
147 	backup_cnt = mmio_read_32(SYSRAM_CLUSTER_CNT_BACKUP);
148 	curr_cnt = mmio_read_32(CPC_MCUSYS_CLUSTER_COUNTER);
149 
150 	/* Get off count if dormant count is 0 */
151 	if ((curr_cnt & MCUSYS_CLUSTER_DORMANT_MASK) == 0)
152 		curr_cnt = (curr_cnt >> 16) & MCUSYS_CLUSTER_DORMANT_MASK;
153 	else
154 		curr_cnt = curr_cnt & MCUSYS_CLUSTER_DORMANT_MASK;
155 
156 	mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, backup_cnt + curr_cnt);
157 	mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
158 }
159 
mtk_cpc_mcusys_off_en(void)160 static inline void mtk_cpc_mcusys_off_en(void)
161 {
162 	mmio_setbits_32(CPC_MCUSYS_PWR_CTRL, CPC_MCUSYS_OFF_EN);
163 }
164 
mtk_cpc_mcusys_off_dis(void)165 static inline void mtk_cpc_mcusys_off_dis(void)
166 {
167 	mmio_clrbits_32(CPC_MCUSYS_PWR_CTRL, CPC_MCUSYS_OFF_EN);
168 }
169 
mtk_cpc_mcusys_off_reflect(void)170 void mtk_cpc_mcusys_off_reflect(void)
171 {
172 	mtk_cpc_mcusys_off_dis();
173 	mtk_cpu_pm_mcusys_prot_release();
174 }
175 
mtk_cpc_mcusys_off_prepare(void)176 int mtk_cpc_mcusys_off_prepare(void)
177 {
178 	if (mtk_cpu_pm_mcusys_prot_aquire() != CPC_SUCCESS)
179 		return CPC_ERR_FAIL;
180 
181 #if CHECK_GIC_SGI_PENDING
182 	if (!!(gicr_get_sgi_pending())) {
183 		mtk_cpu_pm_mcusys_prot_release();
184 		return CPC_ERR_FAIL;
185 	}
186 #endif /* CHECK_GIC_SGI_PENDING */
187 	mtk_cpc_cluster_cnt_backup();
188 	mtk_cpc_mcusys_off_en();
189 
190 	return CPC_SUCCESS;
191 }
192 
mtk_cpc_core_on_hint_set(int cpu)193 void mtk_cpc_core_on_hint_set(int cpu)
194 {
195 	mmio_write_32(CPC_MCUSYS_CPU_ON_SW_HINT_SET, BIT(cpu));
196 }
197 
mtk_cpc_core_on_hint_clr(int cpu)198 void mtk_cpc_core_on_hint_clr(int cpu)
199 {
200 	mmio_write_32(CPC_MCUSYS_CPU_ON_SW_HINT_CLR, BIT(cpu));
201 }
202 
mtk_cpc_dump_timestamp(void)203 static void mtk_cpc_dump_timestamp(void)
204 {
205 	unsigned int id;
206 
207 	for (id = 0; id < CPC_TRACE_ID_NUM; id++) {
208 		mmio_write_32(CPC_MCUSYS_TRACE_SEL, id);
209 
210 		memcpy((void *)(uintptr_t)CPC_TRACE_SRAM(id),
211 		       (const void *)(uintptr_t)CPC_MCUSYS_TRACE_DATA,
212 		       CPC_TRACE_SIZE);
213 	}
214 }
215 
mtk_cpc_time_sync(void)216 void mtk_cpc_time_sync(void)
217 {
218 #if MTK_SYS_TIMER_SYNC_SUPPORT
219 	uint64_t kt;
220 	uint32_t systime_l, systime_h;
221 
222 	kt = sched_clock();
223 	systime_l = mmio_read_32(CNTSYS_L_REG);
224 	systime_h = mmio_read_32(CNTSYS_H_REG);
225 
226 	/* sync kernel timer to cpc */
227 	mmio_write_32(CPC_MCUSYS_CPC_KERNEL_TIME_L_BASE, (uint32_t)kt);
228 	mmio_write_32(CPC_MCUSYS_CPC_KERNEL_TIME_H_BASE, (uint32_t)(kt >> 32));
229 
230 	/* sync system timer to cpc */
231 	mmio_write_32(CPC_MCUSYS_CPC_SYSTEM_TIME_L_BASE, systime_l);
232 	mmio_write_32(CPC_MCUSYS_CPC_SYSTEM_TIME_H_BASE, systime_h);
233 #endif /* MTK_SYS_TIMER_SYNC_SUPPORT */
234 }
235 
mtk_cpc_time_freeze(bool is_freeze)236 static void mtk_cpc_time_freeze(bool is_freeze)
237 {
238 #if MTK_SYS_TIMER_SYNC_SUPPORT
239 	mtk_cpc_time_sync();
240 	if (is_freeze)
241 		mmio_setbits_32(CPC_MCUSYS_CPC_DBG_SETTING, CPC_FREEZE);
242 	else
243 		mmio_clrbits_32(CPC_MCUSYS_CPC_DBG_SETTING, CPC_FREEZE);
244 #endif /* MTK_SYS_TIMER_SYNC_SUPPORT */
245 }
246 
mtk_cpc_el3_timesync_handler(const void * arg)247 static void *mtk_cpc_el3_timesync_handler(const void *arg)
248 {
249 	if (arg) {
250 		unsigned int *is_time_sync = (unsigned int *)arg;
251 
252 		if (*is_time_sync)
253 			mtk_cpc_time_freeze(false);
254 		else
255 			mtk_cpc_time_freeze(true);
256 	}
257 	return (void *)arg;
258 }
259 MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(mtk_cpc_el3_timesync_handler);
260 
mtk_cpc_config(unsigned int cfg,unsigned int data)261 static void mtk_cpc_config(unsigned int cfg, unsigned int data)
262 {
263 	unsigned int reg = 0;
264 
265 	switch (cfg) {
266 	case CPC_SMC_CONFIG_PROF:
267 		reg = CPC_MCUSYS_CPC_DBG_SETTING;
268 		if (data)
269 			mmio_setbits_32(reg, CPC_PROF_EN);
270 		else
271 			mmio_clrbits_32(reg, CPC_PROF_EN);
272 		break;
273 	case CPC_SMC_CONFIG_CNT_CLR:
274 		reg = CPC_MCUSYS_CLUSTER_COUNTER_CLR;
275 		mmio_write_32(reg, 0x3);
276 		break;
277 	case CPC_SMC_CONFIG_TIME_SYNC:
278 		mtk_cpc_time_sync();
279 		break;
280 	default:
281 		break;
282 	}
283 }
284 
mtk_cpc_read_config(unsigned int cfg)285 static unsigned int mtk_cpc_read_config(unsigned int cfg)
286 {
287 	unsigned int res = 0;
288 
289 	switch (cfg) {
290 	case CPC_SMC_CONFIG_PROF:
291 		res = mmio_read_32(CPC_MCUSYS_CPC_DBG_SETTING) & CPC_PROF_EN
292 			? 1 : 0;
293 		break;
294 	default:
295 		break;
296 	}
297 
298 	return res;
299 }
300 
301 #define PROF_DEV_NAME_LEN	8
mtk_cpc_prof_dev_name(unsigned int dev_id)302 uint64_t mtk_cpc_prof_dev_name(unsigned int dev_id)
303 {
304 	uint64_t ret = 0, tran = 0;
305 	unsigned int i = 0;
306 	static const char *prof_dev_name[DEV_TYPE_NUM] = {
307 		"CPU0",
308 		"CPU1",
309 		"CPU2",
310 		"CPU3",
311 		"CPU4",
312 		"CPU5",
313 		"CPU6",
314 		"CPU7",
315 		"CPUSYS",
316 		"MCUSYS"
317 	};
318 
319 	while ((prof_dev_name[dev_id][i] != '\0') && (i < PROF_DEV_NAME_LEN)) {
320 		tran = (uint64_t)(prof_dev_name[dev_id][i] & 0xFF);
321 		ret |= (tran  << (i << 3));
322 		i++;
323 	}
324 
325 	return ret;
326 }
327 
mtk_cpc_prof_clr(void)328 static void mtk_cpc_prof_clr(void)
329 {
330 	int i;
331 
332 	for (i = 0; i < DEV_TYPE_NUM; i++)
333 		memset((char *)&cpc_dev.p[i], 0,
334 			sizeof(struct mtk_cpc_lat_data));
335 }
336 
mtk_cpc_prof_enable(bool enable)337 void mtk_cpc_prof_enable(bool enable)
338 {
339 	unsigned int reg = 0;
340 
341 	reg = CPC_MCUSYS_CPC_DBG_SETTING;
342 	if (enable)
343 		mmio_setbits_32(reg, CPC_PROF_EN);
344 	else
345 		mmio_clrbits_32(reg, CPC_PROF_EN);
346 
347 	if ((cpu_cpc_prof_enabled == false) && (enable == true))
348 		mtk_cpc_prof_clr();
349 	cpu_cpc_prof_enabled = enable;
350 }
351 
mtk_cpc_prof_is_enabled(void)352 bool mtk_cpc_prof_is_enabled(void)
353 {
354 	return cpu_cpc_prof_enabled;
355 }
356 
mtk_cpc_prof_dev_num(void)357 uint64_t mtk_cpc_prof_dev_num(void)
358 {
359 	return DEV_TYPE_NUM;
360 }
361 
362 #define cpc_tick_to_us(val) ((val) / 13)
mtk_cpc_prof_read(unsigned int prof_act,unsigned int dev_type)363 uint64_t mtk_cpc_prof_read(unsigned int prof_act, unsigned int dev_type)
364 {
365 	uint64_t ret = 0;
366 	struct mtk_cpc_lat_data *lat_data;
367 
368 	if (dev_type >= DEV_TYPE_NUM)
369 		return CPC_ERR_FAIL;
370 
371 	lat_data = &cpc_dev.p[dev_type];
372 
373 	switch (prof_act) {
374 	case CPC_PROF_OFF_CNT:
375 		ret = lat_data->off_cnt;
376 		break;
377 	case CPC_PROF_OFF_AVG:
378 		ret = cpc_tick_to_us(lat_data->off_sum / lat_data->off_cnt);
379 		break;
380 	case CPC_PROF_OFF_MAX:
381 		ret = cpc_tick_to_us(lat_data->off_max);
382 		break;
383 	case CPC_PROF_OFF_MIN:
384 		ret = cpc_tick_to_us(lat_data->off_min);
385 		break;
386 	case CPC_PROF_ON_CNT:
387 		ret = lat_data->on_cnt;
388 		break;
389 	case CPC_PROF_ON_AVG:
390 		ret = cpc_tick_to_us(lat_data->on_sum / lat_data->on_cnt);
391 		break;
392 	case CPC_PROF_ON_MAX:
393 		ret = cpc_tick_to_us(lat_data->on_max);
394 		break;
395 	case CPC_PROF_ON_MIN:
396 		ret = cpc_tick_to_us(lat_data->on_min);
397 		break;
398 	default:
399 		break;
400 	}
401 
402 	return ret;
403 }
404 
mtk_cpc_prof_latency(unsigned int prof_act,unsigned int arg)405 uint64_t mtk_cpc_prof_latency(unsigned int prof_act, unsigned int arg)
406 {
407 	uint64_t res = 0;
408 
409 	switch (prof_act) {
410 	case CPC_PROF_ENABLE:
411 		mtk_cpc_prof_enable((bool)arg);
412 		break;
413 	case CPC_PROF_ENABLED:
414 		res = (uint64_t)mtk_cpc_prof_is_enabled();
415 		break;
416 	case CPC_PROF_DEV_NUM:
417 		res = mtk_cpc_prof_dev_num();
418 		break;
419 	case CPC_PROF_DEV_NAME:
420 		res = mtk_cpc_prof_dev_name(arg);
421 		break;
422 	case CPC_PROF_OFF_CNT:
423 	case CPC_PROF_OFF_AVG:
424 	case CPC_PROF_OFF_MAX:
425 	case CPC_PROF_OFF_MIN:
426 	case CPC_PROF_ON_CNT:
427 	case CPC_PROF_ON_AVG:
428 	case CPC_PROF_ON_MAX:
429 	case CPC_PROF_ON_MIN:
430 		res = (uint64_t)mtk_cpc_prof_read(prof_act, arg);
431 		break;
432 
433 	default:
434 		break;
435 	}
436 
437 	return res;
438 }
439 
mtk_cpc_handler(uint64_t act,uint64_t arg1,uint64_t arg2)440 uint64_t mtk_cpc_handler(uint64_t act, uint64_t arg1, uint64_t arg2)
441 {
442 	uint64_t res = 0;
443 
444 	switch (act) {
445 	case CPC_SMC_EVENT_GIC_DPG_SET:
446 		/* isolated_status = x2; */
447 		break;
448 	case CPC_SMC_EVENT_CPC_CONFIG:
449 		mtk_cpc_config((unsigned int)arg1, (unsigned int)arg2);
450 		break;
451 	case CPC_SMC_EVENT_READ_CONFIG:
452 		res = mtk_cpc_read_config((unsigned int)arg1);
453 		break;
454 	case CPC_SMC_EVENT_PROF_LATENCY:
455 		res = mtk_cpc_prof_latency((unsigned int)arg1,
456 					   (unsigned int)arg2);
457 		break;
458 	default:
459 		break;
460 	}
461 
462 	return res;
463 }
464 
mtk_cpc_trace_dump(uint64_t act,uint64_t arg1,uint64_t arg2)465 uint64_t mtk_cpc_trace_dump(uint64_t act, uint64_t arg1, uint64_t arg2)
466 {
467 	uint64_t res = 0;
468 
469 	switch (act) {
470 	case CPC_SMC_EVENT_DUMP_TRACE_DATA:
471 		mtk_cpc_dump_timestamp();
472 		break;
473 	default:
474 		break;
475 	}
476 
477 	return res;
478 }
479 
mtk_cpu_pm_counter_clear(void)480 void mtk_cpu_pm_counter_clear(void)
481 {
482 	unsigned int cpu = 0;
483 
484 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
485 		mmio_write_32(SYSRAM_RECENT_CPU_CNT(cpu), 0);
486 
487 	mmio_write_32(SYSRAM_RECENT_CLUSTER_CNT, 0);
488 	mmio_write_32(SYSRAM_RECENT_MCUSYS_CNT, 0);
489 	mmio_write_32(SYSRAM_CPUSYS_CNT, 0);
490 	mmio_write_32(SYSRAM_MCUSYS_CNT, 0);
491 	mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
492 	mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, 0x0);
493 	mmio_write_32(SYSRAM_RECENT_CNT_TS_H, 0x0);
494 	mmio_write_32(SYSRAM_RECENT_CNT_TS_L, 0x0);
495 }
496 
mtk_cpu_pm_counter_enable(bool enable)497 void mtk_cpu_pm_counter_enable(bool enable)
498 {
499 	cpu_pm_counter_enabled = enable;
500 	if (cpu_pm_counter_enabled == false)
501 		mtk_cpu_pm_counter_clear();
502 }
503 
mtk_cpu_pm_counter_enabled(void)504 bool mtk_cpu_pm_counter_enabled(void)
505 {
506 	return cpu_pm_counter_enabled;
507 }
508 
509 #define sec_to_us(v)	((v) * 1000 * 1000ULL)
510 #define DUMP_INTERVAL	sec_to_us(5)
mtk_cpu_pm_counter_update(unsigned int cpu)511 void mtk_cpu_pm_counter_update(unsigned int cpu)
512 {
513 #ifdef CONFIG_MTK_CPU_SUSPEND_EN
514 	unsigned int cnt = 0, curr_mcusys_cnt = 0, mcusys_cnt = 0;
515 	static unsigned int prev_mcusys_cnt = 0,
516 			    cpu_cnt[PLATFORM_CORE_COUNT] = {0};
517 	uint64_t curr_us = 0;
518 	static uint64_t last_dump_us;
519 	static bool reset;
520 
521 	if (is_cpu_pm_counter_enabled() == false) {
522 		reset = true;
523 		return;
524 	}
525 
526 	if (reset == true) {
527 		last_dump_us = sched_clock() / 1000;
528 		prev_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
529 		mtk_cpu_pm_counter_clear();
530 		cpu_cnt[cpu] = 0;
531 		reset = false;
532 	}
533 
534 	cpu_cnt[cpu]++;
535 
536 	curr_us = sched_clock() / 1000;
537 	if (curr_us - last_dump_us > DUMP_INTERVAL) {
538 		last_dump_us = curr_us;
539 
540 		/* CPU off count */
541 		for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
542 			mmio_write_32(SYSRAM_RECENT_CPU_CNT(cpu),
543 				      cpu_cnt[cpu]);
544 			cpu_cnt[cpu] = 0;
545 		}
546 
547 		/* Cluster off count */
548 		curr_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
549 		if (curr_mcusys_cnt >= prev_mcusys_cnt)
550 			mcusys_cnt = curr_mcusys_cnt - prev_mcusys_cnt;
551 		else
552 			mcusys_cnt = curr_mcusys_cnt;
553 		prev_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
554 
555 		cnt = mmio_read_32(CPC_MCUSYS_CLUSTER_COUNTER);
556 		/**
557 		 * bit[0:15] : memory retention
558 		 * bit[16:31] : memory off
559 		 */
560 		if ((cnt & MCUSYS_CLUSTER_DORMANT_MASK) == 0)
561 			cnt = ((cnt >> 16) & MCUSYS_CLUSTER_DORMANT_MASK);
562 		else
563 			cnt = cnt & MCUSYS_CLUSTER_DORMANT_MASK;
564 		cnt += mmio_read_32(SYSRAM_CLUSTER_CNT_BACKUP);
565 		cnt += mcusys_cnt;
566 
567 		mmio_write_32(SYSRAM_RECENT_CLUSTER_CNT, cnt);
568 		mmio_write_32(SYSRAM_CPUSYS_CNT,
569 			      cnt + mmio_read_32(SYSRAM_CPUSYS_CNT));
570 		mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
571 		mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, 0x0);
572 
573 		/* MCUSYS off count */
574 		mmio_write_32(SYSRAM_RECENT_MCUSYS_CNT,
575 			      mcusys_cnt);
576 
577 		mmio_write_32(SYSRAM_MCUSYS_CNT,
578 			      mmio_read_32(SYSRAM_MCUSYS_CNT) + mcusys_cnt);
579 
580 		mmio_write_32(SYSRAM_RECENT_CNT_TS_H,
581 			      (unsigned int)((last_dump_us >> 32) & 0xFFFFFFFF));
582 
583 		mmio_write_32(SYSRAM_RECENT_CNT_TS_L,
584 			      (unsigned int)(last_dump_us & 0xFFFFFFFF));
585 	}
586 #endif /* CONFIG_MTK_CPU_SUSPEND_EN */
587 }
588 
589 #define __mtk_cpc_record_lat(sum, min, max, lat)\
590 	do {					\
591 		if (lat > max)			\
592 			max = lat;		\
593 		if ((lat < min) || (min == 0))	\
594 			min = lat;		\
595 		(sum) += (lat);			\
596 	} while (0)
597 
598 #ifdef MT_CPU_PM_USING_BAKERY_LOCK
599 DEFINE_BAKERY_LOCK(mt_cpu_pm_cpc_lock);
600 #define plat_cpu_pm_cpc_lock_init() bakery_lock_init(&mt_cpu_pm_cpc_lock)
601 #define plat_cpu_pm_cpc_lock() bakery_lock_get(&mt_cpu_pm_cpc_lock)
602 #define plat_cpu_pm_cpc_unlock() bakery_lock_release(&mt_cpu_pm_cpc_lock)
603 #else
604 spinlock_t mt_cpu_pm_cpc_lock;
605 #define plat_cpu_pm_cpc_lock_init()
606 #define plat_cpu_pm_cpc_lock() spin_lock(&mt_cpu_pm_cpc_lock)
607 #define plat_cpu_pm_cpc_unlock() spin_unlock(&mt_cpu_pm_cpc_lock)
608 #endif /* MT_CPU_PM_USING_BAKERY_LOCK */
609 
mtk_cpc_record_lat(struct mtk_cpc_lat_data * lat,unsigned int on_ticks,unsigned int off_ticks)610 static void mtk_cpc_record_lat(struct mtk_cpc_lat_data *lat,
611 			       unsigned int on_ticks, unsigned int off_ticks)
612 {
613 	if ((on_ticks == 0) || (off_ticks == 0))
614 		return;
615 
616 	__mtk_cpc_record_lat(lat->on_sum, lat->on_min, lat->on_max, on_ticks);
617 	lat->on_cnt++;
618 	__mtk_cpc_record_lat(lat->off_sum, lat->off_min,
619 			     lat->off_max, off_ticks);
620 	lat->off_cnt++;
621 }
622 
623 #define CPC_CPU_LATENCY_MASK	0xFFFF
mtk_cpu_pm_save_cpc_latency(enum dev_type dev_type)624 void mtk_cpu_pm_save_cpc_latency(enum dev_type dev_type)
625 {
626 	unsigned int lat = 0, lat_on = 0, lat_off = 0;
627 	struct mtk_cpc_lat_data *lat_data = NULL;
628 
629 	if (mtk_cpc_prof_is_enabled() == false)
630 		return;
631 
632 	plat_cpu_pm_cpc_lock();
633 
634 	if (dev_type < DEV_TYPE_CPUSYS) {
635 		lat = mmio_read_32(CPC_CPU_ON_LATENCY(dev_type));
636 		lat_on = lat & CPC_CPU_LATENCY_MASK;
637 		lat = mmio_read_32(CPC_CPU_OFF_LATENCY(dev_type));
638 		lat_off = lat & CPC_CPU_LATENCY_MASK;
639 		lat_data = &cpc_dev.cpu[dev_type];
640 	} else if (dev_type == DEV_TYPE_CPUSYS) {
641 		lat_on = mmio_read_32(CPC_CLUSTER_ON_LATENCY);
642 		lat_on = lat_on & CPC_CPU_LATENCY_MASK;
643 		lat_off = mmio_read_32(CPC_CLUSTER_OFF_LATENCY);
644 		lat_off = lat_off & CPC_CPU_LATENCY_MASK;
645 		lat_data = &cpc_dev.cluster;
646 	} else if (dev_type == DEV_TYPE_MCUSYS) {
647 		lat = mmio_read_32(CPC_MCUSYS_ON_LATENCY);
648 		lat_on = lat & CPC_CPU_LATENCY_MASK;
649 		lat = mmio_read_32(CPC_MCUSYS_OFF_LATENCY);
650 		lat_off = lat & CPC_CPU_LATENCY_MASK;
651 		lat_data = &cpc_dev.mcusys;
652 	}
653 
654 	if (lat_data)
655 		mtk_cpc_record_lat(lat_data, lat_on, lat_off);
656 
657 	plat_cpu_pm_cpc_unlock();
658 }
659 
660 #define RVBARADDR_ONKEEPON_SEL			(MCUCFG_BASE + 0x388)
661 
mtk_cpc_init(void)662 void mtk_cpc_init(void)
663 {
664 	struct mtk_plat_dev_config cfg = {
665 #ifndef CPU_PM_ACP_FSM
666 		.auto_off = 1,
667 #else
668 		.auto_off = 0,
669 #endif /* CPU_PM_ACP_FSM */
670 		.auto_thres_us = MTK_CPC_AUTO_DORMANT_THR_US,
671 	};
672 
673 	if (mmio_read_32(RVBARADDR_ONKEEPON_SEL) == 0x1) {
674 		ERROR("ONKEEPON_SEL=%x, CPC_FLOW_CTRL_CFG=%x\n",
675 		      mmio_read_32(RVBARADDR_ONKEEPON_SEL),
676 		      mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG));
677 		mmio_write_32(RVBARADDR_ONKEEPON_SEL, 0x1);
678 	}
679 
680 #if CONFIG_MTK_SMP_EN
681 	mt_smp_init();
682 #endif /* CONFIG_MTK_SMP_EN */
683 
684 #if CONFIG_MTK_CPU_SUSPEND_EN
685 	mtk_cpu_pm_counter_clear();
686 #endif /* CONFIG_MTK_CPU_SUSPEND_EN */
687 
688 	mtk_cpc_auto_dormant_en(cfg.auto_off);
689 	mtk_cpc_auto_dormant_tick(cfg.auto_thres_us);
690 
691 	mmio_setbits_32(CPC_MCUSYS_CPC_DBG_SETTING,
692 			CPC_DBG_EN | CPC_CALC_EN);
693 
694 	mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
695 			CPC_OFF_PRE_EN);
696 
697 	/* enable CPC */
698 	mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_CTRL_ENABLE);
699 
700 	plat_cpu_pm_cpc_lock_init();
701 }
702