xref: /OK3568_Linux_fs/kernel/include/linux/sched/topology.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_TOPOLOGY_H
3*4882a593Smuzhiyun #define _LINUX_SCHED_TOPOLOGY_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/topology.h>
6*4882a593Smuzhiyun #include <linux/android_kabi.h>
7*4882a593Smuzhiyun #include <linux/android_vendor.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/sched/idle.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * sched-domains (multiprocessor balancing) declarations:
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun #ifdef CONFIG_SMP
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* Generate SD flag indexes */
17*4882a593Smuzhiyun #define SD_FLAG(name, mflags) __##name,
18*4882a593Smuzhiyun enum {
19*4882a593Smuzhiyun 	#include <linux/sched/sd_flags.h>
20*4882a593Smuzhiyun 	__SD_FLAG_CNT,
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun #undef SD_FLAG
23*4882a593Smuzhiyun /* Generate SD flag bits */
24*4882a593Smuzhiyun #define SD_FLAG(name, mflags) name = 1 << __##name,
25*4882a593Smuzhiyun enum {
26*4882a593Smuzhiyun 	#include <linux/sched/sd_flags.h>
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun #undef SD_FLAG
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #ifdef CONFIG_SCHED_DEBUG
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct sd_flag_debug {
33*4882a593Smuzhiyun 	unsigned int meta_flags;
34*4882a593Smuzhiyun 	char *name;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun extern const struct sd_flag_debug sd_flag_debug[];
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #ifdef CONFIG_SCHED_SMT
cpu_smt_flags(void)41*4882a593Smuzhiyun static inline int cpu_smt_flags(void)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #ifdef CONFIG_SCHED_MC
cpu_core_flags(void)48*4882a593Smuzhiyun static inline int cpu_core_flags(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	return SD_SHARE_PKG_RESOURCES;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #ifdef CONFIG_NUMA
cpu_numa_flags(void)55*4882a593Smuzhiyun static inline int cpu_numa_flags(void)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	return SD_NUMA;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun extern int arch_asym_cpu_priority(int cpu);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun struct sched_domain_attr {
64*4882a593Smuzhiyun 	int relax_domain_level;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
68*4882a593Smuzhiyun 	.relax_domain_level = -1,			\
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun extern int sched_domain_level_max;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun struct sched_group;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct sched_domain_shared {
76*4882a593Smuzhiyun 	atomic_t	ref;
77*4882a593Smuzhiyun 	atomic_t	nr_busy_cpus;
78*4882a593Smuzhiyun 	int		has_idle_cores;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	ANDROID_VENDOR_DATA(1);
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun struct sched_domain {
84*4882a593Smuzhiyun 	/* These fields must be setup */
85*4882a593Smuzhiyun 	struct sched_domain __rcu *parent;	/* top domain must be null terminated */
86*4882a593Smuzhiyun 	struct sched_domain __rcu *child;	/* bottom domain must be null terminated */
87*4882a593Smuzhiyun 	struct sched_group *groups;	/* the balancing groups of the domain */
88*4882a593Smuzhiyun 	unsigned long min_interval;	/* Minimum balance interval ms */
89*4882a593Smuzhiyun 	unsigned long max_interval;	/* Maximum balance interval ms */
90*4882a593Smuzhiyun 	unsigned int busy_factor;	/* less balancing by factor if busy */
91*4882a593Smuzhiyun 	unsigned int imbalance_pct;	/* No balance until over watermark */
92*4882a593Smuzhiyun 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	int nohz_idle;			/* NOHZ IDLE status */
95*4882a593Smuzhiyun 	int flags;			/* See SD_* */
96*4882a593Smuzhiyun 	int level;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* Runtime fields. */
99*4882a593Smuzhiyun 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
100*4882a593Smuzhiyun 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
101*4882a593Smuzhiyun 	unsigned int nr_balance_failed; /* initialise to 0 */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* idle_balance() stats */
104*4882a593Smuzhiyun 	u64 max_newidle_lb_cost;
105*4882a593Smuzhiyun 	unsigned long next_decay_max_lb_cost;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	u64 avg_scan_cost;		/* select_idle_sibling */
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #ifdef CONFIG_SCHEDSTATS
110*4882a593Smuzhiyun 	/* load_balance() stats */
111*4882a593Smuzhiyun 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
112*4882a593Smuzhiyun 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
113*4882a593Smuzhiyun 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
114*4882a593Smuzhiyun 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
115*4882a593Smuzhiyun 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
116*4882a593Smuzhiyun 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
117*4882a593Smuzhiyun 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
118*4882a593Smuzhiyun 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* Active load balancing */
121*4882a593Smuzhiyun 	unsigned int alb_count;
122*4882a593Smuzhiyun 	unsigned int alb_failed;
123*4882a593Smuzhiyun 	unsigned int alb_pushed;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* SD_BALANCE_EXEC stats */
126*4882a593Smuzhiyun 	unsigned int sbe_count;
127*4882a593Smuzhiyun 	unsigned int sbe_balanced;
128*4882a593Smuzhiyun 	unsigned int sbe_pushed;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* SD_BALANCE_FORK stats */
131*4882a593Smuzhiyun 	unsigned int sbf_count;
132*4882a593Smuzhiyun 	unsigned int sbf_balanced;
133*4882a593Smuzhiyun 	unsigned int sbf_pushed;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* try_to_wake_up() stats */
136*4882a593Smuzhiyun 	unsigned int ttwu_wake_remote;
137*4882a593Smuzhiyun 	unsigned int ttwu_move_affine;
138*4882a593Smuzhiyun 	unsigned int ttwu_move_balance;
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun #ifdef CONFIG_SCHED_DEBUG
141*4882a593Smuzhiyun 	char *name;
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun 	union {
144*4882a593Smuzhiyun 		void *private;		/* used during construction */
145*4882a593Smuzhiyun 		struct rcu_head rcu;	/* used during destruction */
146*4882a593Smuzhiyun 	};
147*4882a593Smuzhiyun 	struct sched_domain_shared *shared;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	unsigned int span_weight;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
152*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * Span of all CPUs in this domain.
156*4882a593Smuzhiyun 	 *
157*4882a593Smuzhiyun 	 * NOTE: this field is variable length. (Allocated dynamically
158*4882a593Smuzhiyun 	 * by attaching extra space to the end of the structure,
159*4882a593Smuzhiyun 	 * depending on how many CPUs the kernel has booted up with)
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	unsigned long span[];
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
sched_domain_span(struct sched_domain * sd)164*4882a593Smuzhiyun static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return to_cpumask(sd->span);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun extern void partition_sched_domains_locked(int ndoms_new,
170*4882a593Smuzhiyun 					   cpumask_var_t doms_new[],
171*4882a593Smuzhiyun 					   struct sched_domain_attr *dattr_new);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
174*4882a593Smuzhiyun 				    struct sched_domain_attr *dattr_new);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /* Allocate an array of sched domains, for partition_sched_domains(). */
177*4882a593Smuzhiyun cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
178*4882a593Smuzhiyun void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun bool cpus_share_cache(int this_cpu, int that_cpu);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
183*4882a593Smuzhiyun typedef int (*sched_domain_flags_f)(void);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #define SDTL_OVERLAP	0x01
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct sd_data {
188*4882a593Smuzhiyun 	struct sched_domain *__percpu *sd;
189*4882a593Smuzhiyun 	struct sched_domain_shared *__percpu *sds;
190*4882a593Smuzhiyun 	struct sched_group *__percpu *sg;
191*4882a593Smuzhiyun 	struct sched_group_capacity *__percpu *sgc;
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun struct sched_domain_topology_level {
195*4882a593Smuzhiyun 	sched_domain_mask_f mask;
196*4882a593Smuzhiyun 	sched_domain_flags_f sd_flags;
197*4882a593Smuzhiyun 	int		    flags;
198*4882a593Smuzhiyun 	int		    numa_level;
199*4882a593Smuzhiyun 	struct sd_data      data;
200*4882a593Smuzhiyun #ifdef CONFIG_SCHED_DEBUG
201*4882a593Smuzhiyun 	char                *name;
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun extern void set_sched_topology(struct sched_domain_topology_level *tl);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun #ifdef CONFIG_SCHED_DEBUG
208*4882a593Smuzhiyun # define SD_INIT_NAME(type)		.name = #type
209*4882a593Smuzhiyun #else
210*4882a593Smuzhiyun # define SD_INIT_NAME(type)
211*4882a593Smuzhiyun #endif
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #else /* CONFIG_SMP */
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun struct sched_domain_attr;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun static inline void
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)218*4882a593Smuzhiyun partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
219*4882a593Smuzhiyun 			       struct sched_domain_attr *dattr_new)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun static inline void
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)224*4882a593Smuzhiyun partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
225*4882a593Smuzhiyun 			struct sched_domain_attr *dattr_new)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
cpus_share_cache(int this_cpu,int that_cpu)229*4882a593Smuzhiyun static inline bool cpus_share_cache(int this_cpu, int that_cpu)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return true;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun #endif	/* !CONFIG_SMP */
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun #ifndef arch_scale_cpu_capacity
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun  * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
239*4882a593Smuzhiyun  * @cpu: the CPU in question.
240*4882a593Smuzhiyun  *
241*4882a593Smuzhiyun  * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
242*4882a593Smuzhiyun  *
243*4882a593Smuzhiyun  *             max_perf(cpu)
244*4882a593Smuzhiyun  *      ----------------------------- * SCHED_CAPACITY_SCALE
245*4882a593Smuzhiyun  *      max(max_perf(c) : c \in CPUs)
246*4882a593Smuzhiyun  */
247*4882a593Smuzhiyun static __always_inline
arch_scale_cpu_capacity(int cpu)248*4882a593Smuzhiyun unsigned long arch_scale_cpu_capacity(int cpu)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	return SCHED_CAPACITY_SCALE;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #ifndef arch_scale_thermal_pressure
255*4882a593Smuzhiyun static __always_inline
arch_scale_thermal_pressure(int cpu)256*4882a593Smuzhiyun unsigned long arch_scale_thermal_pressure(int cpu)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	return 0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun #endif
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #ifndef arch_set_thermal_pressure
263*4882a593Smuzhiyun static __always_inline
arch_set_thermal_pressure(const struct cpumask * cpus,unsigned long th_pressure)264*4882a593Smuzhiyun void arch_set_thermal_pressure(const struct cpumask *cpus,
265*4882a593Smuzhiyun 			       unsigned long th_pressure)
266*4882a593Smuzhiyun { }
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun 
task_node(const struct task_struct * p)269*4882a593Smuzhiyun static inline int task_node(const struct task_struct *p)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return cpu_to_node(task_cpu(p));
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #endif /* _LINUX_SCHED_TOPOLOGY_H */
275