xref: /OK3568_Linux_fs/kernel/drivers/soc/rockchip/rockchip_performance.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
4  */
5 #include <linux/kernel.h>
6 #include <linux/proc_fs.h>
7 #include <linux/seq_file.h>
8 #include <soc/rockchip/rockchip_performance.h>
9 #include <../../kernel/sched/sched.h>
10 
11 static int perf_level = CONFIG_ROCKCHIP_PERFORMANCE_LEVEL;
12 static cpumask_var_t cpul_mask, cpub_mask;
13 static bool perf_init_done;
14 static DEFINE_MUTEX(update_mutex);
15 
16 #ifdef CONFIG_UCLAMP_TASK
set_uclamp_util_min_rt(unsigned int util)17 static inline void set_uclamp_util_min_rt(unsigned int util)
18 {
19 	sysctl_sched_uclamp_util_min_rt_default = util;
20 	static_branch_enable(&sched_uclamp_used);
21 	rockchip_perf_uclamp_sync_util_min_rt_default();
22 }
23 #else
set_uclamp_util_min_rt(unsigned int util)24 static inline void set_uclamp_util_min_rt(unsigned int util) { };
25 #endif
26 
update_perf_level_locked(int level)27 static void update_perf_level_locked(int level)
28 {
29 	struct em_perf_domain *em;
30 	unsigned long target_cost, target_freq, max_freq;
31 	unsigned long scale_cpu0 = arch_scale_cpu_capacity(0);
32 	unsigned int uclamp_util_min_rt = scale_cpu0 * 2 / 3;
33 	int i;
34 
35 	if (perf_init_done && perf_level == level)
36 		return;
37 
38 	perf_level = level;
39 
40 	if (level == 0) {
41 		set_uclamp_util_min_rt(0);
42 		return;
43 	}
44 
45 	if ((level == 1) || (level == 2)) {
46 		set_uclamp_util_min_rt(SCHED_CAPACITY_SCALE);
47 		return;
48 	}
49 
50 	/* find a better efficient frequency and consider performance */
51 	em = em_cpu_get(0);
52 	if (em) {
53 		target_cost = em->table[0].cost + (em->table[0].cost >> 2);
54 
55 		for (i = 1; i < em->nr_perf_states; i++) {
56 			if (em->table[i].cost >= target_cost)
57 				break;
58 		}
59 		target_freq = em->table[i-1].frequency;
60 		max_freq = em->table[em->nr_perf_states-1].frequency;
61 		uclamp_util_min_rt = scale_cpu0 * target_freq / max_freq;
62 	}
63 
64 	/* schedutil will reserve 20% util, and we need more 5% for debounce */
65 	uclamp_util_min_rt = uclamp_util_min_rt * 3 / 4;
66 	set_uclamp_util_min_rt(uclamp_util_min_rt);
67 }
68 
update_perf_level(int level)69 static void update_perf_level(int level)
70 {
71 	mutex_lock(&update_mutex);
72 	update_perf_level_locked(level);
73 	mutex_unlock(&update_mutex);
74 }
75 
param_set_level(const char * buf,const struct kernel_param * kp)76 static int param_set_level(const char *buf, const struct kernel_param *kp)
77 {
78 	int ret, level;
79 
80 	ret = kstrtoint(buf, 10, &level);
81 	if (ret || (level < 0) || (level > 2))
82 		return -EINVAL;
83 
84 	if (!perf_init_done)
85 		return 0;
86 
87 	update_perf_level(level);
88 
89 	return 0;
90 }
91 
92 static const struct kernel_param_ops level_param_ops = {
93 	.set = param_set_level,
94 	.get = param_get_int,
95 };
96 module_param_cb(level, &level_param_ops, &perf_level, 0644);
97 
rockchip_perf_init(void)98 static __init int rockchip_perf_init(void)
99 {
100 	int cpu;
101 	int cpub_min_cap = SCHED_CAPACITY_SCALE - (SCHED_CAPACITY_SCALE >> 3);
102 
103 	if (!zalloc_cpumask_var(&cpul_mask, GFP_KERNEL))
104 		return -ENOMEM;
105 	if (!zalloc_cpumask_var(&cpub_mask, GFP_KERNEL))
106 		return -ENOMEM;
107 
108 	for_each_possible_cpu(cpu) {
109 		if (arch_scale_cpu_capacity(cpu) > cpub_min_cap)
110 			cpumask_set_cpu(cpu, cpub_mask);
111 		else
112 			cpumask_set_cpu(cpu, cpul_mask);
113 	}
114 
115 	update_perf_level(perf_level);
116 
117 	perf_init_done = true;
118 
119 	return 0;
120 }
121 late_initcall_sync(rockchip_perf_init);
122 
rockchip_perf_get_level(void)123 int rockchip_perf_get_level(void)
124 {
125 	return perf_level;
126 }
127 
rockchip_perf_get_cpul_mask(void)128 struct cpumask *rockchip_perf_get_cpul_mask(void)
129 {
130 	if (static_branch_unlikely(&sched_asym_cpucapacity))
131 		return cpul_mask;
132 
133 	return NULL;
134 }
135 
rockchip_perf_get_cpub_mask(void)136 struct cpumask *rockchip_perf_get_cpub_mask(void)
137 {
138 	if (static_branch_unlikely(&sched_asym_cpucapacity))
139 		return cpub_mask;
140 
141 	return NULL;
142 }
143 
144 #ifdef CONFIG_SMP
rockchip_perf_select_rt_cpu(int prev_cpu,struct cpumask * lowest_mask)145 int rockchip_perf_select_rt_cpu(int prev_cpu, struct cpumask *lowest_mask)
146 {
147 	struct cpumask target_mask;
148 	int cpu = nr_cpu_ids;
149 
150 	if (!perf_init_done)
151 		return prev_cpu;
152 
153 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
154 		if (perf_level == 0)
155 			cpumask_and(&target_mask, lowest_mask, cpul_mask);
156 		if (perf_level == 2)
157 			cpumask_and(&target_mask, lowest_mask, cpub_mask);
158 
159 		if (cpumask_test_cpu(prev_cpu, &target_mask))
160 			return prev_cpu;
161 
162 		cpu = cpumask_first(&target_mask);
163 
164 		if (cpu < nr_cpu_ids)
165 			return cpu;
166 	}
167 
168 	return prev_cpu;
169 }
170 
rockchip_perf_misfit_rt(int cpu)171 bool rockchip_perf_misfit_rt(int cpu)
172 {
173 	if (!perf_init_done)
174 		return false;
175 
176 	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
177 		if ((perf_level == 0) && cpumask_test_cpu(cpu, cpub_mask))
178 			return true;
179 		if ((perf_level == 2) && cpumask_test_cpu(cpu, cpul_mask))
180 			return true;
181 	}
182 
183 	return false;
184 }
185 #endif /* CONFIG_SMP */
186