1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_CPUSET_H
3*4882a593Smuzhiyun #define _LINUX_CPUSET_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * cpuset interface
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2003 BULL SA
8*4882a593Smuzhiyun * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/sched/topology.h>
14*4882a593Smuzhiyun #include <linux/sched/task.h>
15*4882a593Smuzhiyun #include <linux/cpumask.h>
16*4882a593Smuzhiyun #include <linux/nodemask.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <linux/mmu_context.h>
19*4882a593Smuzhiyun #include <linux/jump_label.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifdef CONFIG_CPUSETS
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Static branch rewrites can happen in an arbitrary order for a given
25*4882a593Smuzhiyun * key. In code paths where we need to loop with read_mems_allowed_begin() and
26*4882a593Smuzhiyun * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27*4882a593Smuzhiyun * to ensure that begin() always gets rewritten before retry() in the
28*4882a593Smuzhiyun * disabled -> enabled transition. If not, then if local irqs are disabled
29*4882a593Smuzhiyun * around the loop, we can deadlock since retry() would always be
30*4882a593Smuzhiyun * comparing the latest value of the mems_allowed seqcount against 0 as
31*4882a593Smuzhiyun * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32*4882a593Smuzhiyun * transition should happen in reverse order for the same reasons (want to stop
33*4882a593Smuzhiyun * looking at real value of mems_allowed.sequence in retry() first).
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun extern struct static_key_false cpusets_pre_enable_key;
36*4882a593Smuzhiyun extern struct static_key_false cpusets_enabled_key;
cpusets_enabled(void)37*4882a593Smuzhiyun static inline bool cpusets_enabled(void)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun return static_branch_unlikely(&cpusets_enabled_key);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
cpuset_inc(void)42*4882a593Smuzhiyun static inline void cpuset_inc(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
45*4882a593Smuzhiyun static_branch_inc_cpuslocked(&cpusets_enabled_key);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
cpuset_dec(void)48*4882a593Smuzhiyun static inline void cpuset_dec(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun static_branch_dec_cpuslocked(&cpusets_enabled_key);
51*4882a593Smuzhiyun static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun extern int cpuset_init(void);
55*4882a593Smuzhiyun extern void cpuset_init_smp(void);
56*4882a593Smuzhiyun extern void cpuset_force_rebuild(void);
57*4882a593Smuzhiyun extern void cpuset_update_active_cpus(void);
58*4882a593Smuzhiyun extern void cpuset_update_active_cpus_affine(int cpu);
59*4882a593Smuzhiyun extern void cpuset_wait_for_hotplug(void);
60*4882a593Smuzhiyun extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
61*4882a593Smuzhiyun extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
62*4882a593Smuzhiyun extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
63*4882a593Smuzhiyun #define cpuset_current_mems_allowed (current->mems_allowed)
64*4882a593Smuzhiyun void cpuset_init_current_mems_allowed(void);
65*4882a593Smuzhiyun int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
68*4882a593Smuzhiyun
cpuset_node_allowed(int node,gfp_t gfp_mask)69*4882a593Smuzhiyun static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun if (cpusets_enabled())
72*4882a593Smuzhiyun return __cpuset_node_allowed(node, gfp_mask);
73*4882a593Smuzhiyun return true;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)76*4882a593Smuzhiyun static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)81*4882a593Smuzhiyun static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun if (cpusets_enabled())
84*4882a593Smuzhiyun return __cpuset_zone_allowed(z, gfp_mask);
85*4882a593Smuzhiyun return true;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
89*4882a593Smuzhiyun const struct task_struct *tsk2);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define cpuset_memory_pressure_bump() \
92*4882a593Smuzhiyun do { \
93*4882a593Smuzhiyun if (cpuset_memory_pressure_enabled) \
94*4882a593Smuzhiyun __cpuset_memory_pressure_bump(); \
95*4882a593Smuzhiyun } while (0)
96*4882a593Smuzhiyun extern int cpuset_memory_pressure_enabled;
97*4882a593Smuzhiyun extern void __cpuset_memory_pressure_bump(void);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun extern void cpuset_task_status_allowed(struct seq_file *m,
100*4882a593Smuzhiyun struct task_struct *task);
101*4882a593Smuzhiyun extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
102*4882a593Smuzhiyun struct pid *pid, struct task_struct *tsk);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun extern int cpuset_mem_spread_node(void);
105*4882a593Smuzhiyun extern int cpuset_slab_spread_node(void);
106*4882a593Smuzhiyun
cpuset_do_page_mem_spread(void)107*4882a593Smuzhiyun static inline int cpuset_do_page_mem_spread(void)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return task_spread_page(current);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
cpuset_do_slab_mem_spread(void)112*4882a593Smuzhiyun static inline int cpuset_do_slab_mem_spread(void)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun return task_spread_slab(current);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun extern bool current_cpuset_is_being_rebound(void);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun extern void rebuild_sched_domains(void);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun extern void cpuset_print_current_mems_allowed(void);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * read_mems_allowed_begin is required when making decisions involving
125*4882a593Smuzhiyun * mems_allowed such as during page allocation. mems_allowed can be updated in
126*4882a593Smuzhiyun * parallel and depending on the new value an operation can fail potentially
127*4882a593Smuzhiyun * causing process failure. A retry loop with read_mems_allowed_begin and
128*4882a593Smuzhiyun * read_mems_allowed_retry prevents these artificial failures.
129*4882a593Smuzhiyun */
read_mems_allowed_begin(void)130*4882a593Smuzhiyun static inline unsigned int read_mems_allowed_begin(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun if (!static_branch_unlikely(&cpusets_pre_enable_key))
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun return read_seqcount_begin(¤t->mems_allowed_seq);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * If this returns true, the operation that took place after
140*4882a593Smuzhiyun * read_mems_allowed_begin may have failed artificially due to a concurrent
141*4882a593Smuzhiyun * update of mems_allowed. It is up to the caller to retry the operation if
142*4882a593Smuzhiyun * appropriate.
143*4882a593Smuzhiyun */
read_mems_allowed_retry(unsigned int seq)144*4882a593Smuzhiyun static inline bool read_mems_allowed_retry(unsigned int seq)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun if (!static_branch_unlikely(&cpusets_enabled_key))
147*4882a593Smuzhiyun return false;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return read_seqcount_retry(¤t->mems_allowed_seq, seq);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
set_mems_allowed(nodemask_t nodemask)152*4882a593Smuzhiyun static inline void set_mems_allowed(nodemask_t nodemask)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun unsigned long flags;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun task_lock(current);
157*4882a593Smuzhiyun local_irq_save(flags);
158*4882a593Smuzhiyun write_seqcount_begin(¤t->mems_allowed_seq);
159*4882a593Smuzhiyun current->mems_allowed = nodemask;
160*4882a593Smuzhiyun write_seqcount_end(¤t->mems_allowed_seq);
161*4882a593Smuzhiyun local_irq_restore(flags);
162*4882a593Smuzhiyun task_unlock(current);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun extern void cpuset_hotplug_workfn(struct work_struct *work);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun #else /* !CONFIG_CPUSETS */
168*4882a593Smuzhiyun
cpusets_enabled(void)169*4882a593Smuzhiyun static inline bool cpusets_enabled(void) { return false; }
170*4882a593Smuzhiyun
cpuset_init(void)171*4882a593Smuzhiyun static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)172*4882a593Smuzhiyun static inline void cpuset_init_smp(void) {}
173*4882a593Smuzhiyun
cpuset_force_rebuild(void)174*4882a593Smuzhiyun static inline void cpuset_force_rebuild(void) { }
175*4882a593Smuzhiyun
cpuset_update_active_cpus_affine(int cpu)176*4882a593Smuzhiyun static inline void cpuset_update_active_cpus_affine(int cpu) {}
177*4882a593Smuzhiyun
cpuset_update_active_cpus(void)178*4882a593Smuzhiyun static inline void cpuset_update_active_cpus(void)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun partition_sched_domains(1, NULL, NULL);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
cpuset_wait_for_hotplug(void)183*4882a593Smuzhiyun static inline void cpuset_wait_for_hotplug(void) { }
184*4882a593Smuzhiyun
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)185*4882a593Smuzhiyun static inline void cpuset_cpus_allowed(struct task_struct *p,
186*4882a593Smuzhiyun struct cpumask *mask)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun cpumask_copy(mask, task_cpu_possible_mask(p));
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
cpuset_cpus_allowed_fallback(struct task_struct * p)191*4882a593Smuzhiyun static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
cpuset_mems_allowed(struct task_struct * p)195*4882a593Smuzhiyun static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun return node_possible_map;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)201*4882a593Smuzhiyun static inline void cpuset_init_current_mems_allowed(void) {}
202*4882a593Smuzhiyun
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)203*4882a593Smuzhiyun static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun return 1;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
cpuset_node_allowed(int node,gfp_t gfp_mask)208*4882a593Smuzhiyun static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return true;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)213*4882a593Smuzhiyun static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun return true;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)218*4882a593Smuzhiyun static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun return true;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)223*4882a593Smuzhiyun static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
224*4882a593Smuzhiyun const struct task_struct *tsk2)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun return 1;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
cpuset_memory_pressure_bump(void)229*4882a593Smuzhiyun static inline void cpuset_memory_pressure_bump(void) {}
230*4882a593Smuzhiyun
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)231*4882a593Smuzhiyun static inline void cpuset_task_status_allowed(struct seq_file *m,
232*4882a593Smuzhiyun struct task_struct *task)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
cpuset_mem_spread_node(void)236*4882a593Smuzhiyun static inline int cpuset_mem_spread_node(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
cpuset_slab_spread_node(void)241*4882a593Smuzhiyun static inline int cpuset_slab_spread_node(void)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
cpuset_do_page_mem_spread(void)246*4882a593Smuzhiyun static inline int cpuset_do_page_mem_spread(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
cpuset_do_slab_mem_spread(void)251*4882a593Smuzhiyun static inline int cpuset_do_slab_mem_spread(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
current_cpuset_is_being_rebound(void)256*4882a593Smuzhiyun static inline bool current_cpuset_is_being_rebound(void)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun return false;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
rebuild_sched_domains(void)261*4882a593Smuzhiyun static inline void rebuild_sched_domains(void)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun partition_sched_domains(1, NULL, NULL);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
cpuset_print_current_mems_allowed(void)266*4882a593Smuzhiyun static inline void cpuset_print_current_mems_allowed(void)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
set_mems_allowed(nodemask_t nodemask)270*4882a593Smuzhiyun static inline void set_mems_allowed(nodemask_t nodemask)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
read_mems_allowed_begin(void)274*4882a593Smuzhiyun static inline unsigned int read_mems_allowed_begin(void)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun return 0;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
read_mems_allowed_retry(unsigned int seq)279*4882a593Smuzhiyun static inline bool read_mems_allowed_retry(unsigned int seq)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return false;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
cpuset_hotplug_workfn(struct work_struct * work)284*4882a593Smuzhiyun static inline void cpuset_hotplug_workfn(struct work_struct *work) {}
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #endif /* !CONFIG_CPUSETS */
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun #endif /* _LINUX_CPUSET_H */
289