1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __LINUX_CPUMASK_H
3*4882a593Smuzhiyun #define __LINUX_CPUMASK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Cpumasks provide a bitmap suitable for representing the
7*4882a593Smuzhiyun * set of CPU's in a system, one bit position per CPU number. In general,
8*4882a593Smuzhiyun * only nr_cpu_ids (<= NR_CPUS) bits are valid.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/threads.h>
12*4882a593Smuzhiyun #include <linux/bitmap.h>
13*4882a593Smuzhiyun #include <linux/atomic.h>
14*4882a593Smuzhiyun #include <linux/bug.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Don't assign or return these: may not be this big! */
17*4882a593Smuzhiyun typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun * cpumask_bits - get the bits in a cpumask
21*4882a593Smuzhiyun * @maskp: the struct cpumask *
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * You should only assume nr_cpu_ids bits of this mask are valid. This is
24*4882a593Smuzhiyun * a macro so it's const-correct.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define cpumask_bits(maskp) ((maskp)->bits)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun * cpumask_pr_args - printf args to output a cpumask
30*4882a593Smuzhiyun * @maskp: cpumask to be printed
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #if NR_CPUS == 1
37*4882a593Smuzhiyun #define nr_cpu_ids 1U
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun extern unsigned int nr_cpu_ids;
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #ifdef CONFIG_CPUMASK_OFFSTACK
43*4882a593Smuzhiyun /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
44*4882a593Smuzhiyun * not all bits may be allocated. */
45*4882a593Smuzhiyun #define nr_cpumask_bits nr_cpu_ids
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun #define nr_cpumask_bits ((unsigned int)NR_CPUS)
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * The following particular system cpumasks and operations manage
52*4882a593Smuzhiyun * possible, present, active and online cpus.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
55*4882a593Smuzhiyun * cpu_present_mask - has bit 'cpu' set iff cpu is populated
56*4882a593Smuzhiyun * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
57*4882a593Smuzhiyun * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
62*4882a593Smuzhiyun * that it is possible might ever be plugged in at anytime during the
63*4882a593Smuzhiyun * life of that system boot. The cpu_present_mask is dynamic(*),
64*4882a593Smuzhiyun * representing which CPUs are currently plugged in. And
65*4882a593Smuzhiyun * cpu_online_mask is the dynamic subset of cpu_present_mask,
66*4882a593Smuzhiyun * indicating those CPUs available for scheduling.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
69*4882a593Smuzhiyun * all NR_CPUS bits set, otherwise it is just the set of CPUs that
70*4882a593Smuzhiyun * ACPI reports present at boot.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
73*4882a593Smuzhiyun * depending on what ACPI reports as currently plugged in, otherwise
74*4882a593Smuzhiyun * cpu_present_mask is just a copy of cpu_possible_mask.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
77*4882a593Smuzhiyun * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * Subtleties:
80*4882a593Smuzhiyun * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
81*4882a593Smuzhiyun * assumption that their single CPU is online. The UP
82*4882a593Smuzhiyun * cpu_{online,possible,present}_masks are placebos. Changing them
83*4882a593Smuzhiyun * will have no useful affect on the following num_*_cpus()
84*4882a593Smuzhiyun * and cpu_*() macros in the UP case. This ugliness is a UP
85*4882a593Smuzhiyun * optimization - don't waste any instructions or memory references
86*4882a593Smuzhiyun * asking if you're online or how many CPUs there are if there is
87*4882a593Smuzhiyun * only one CPU.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun extern struct cpumask __cpu_possible_mask;
91*4882a593Smuzhiyun extern struct cpumask __cpu_online_mask;
92*4882a593Smuzhiyun extern struct cpumask __cpu_present_mask;
93*4882a593Smuzhiyun extern struct cpumask __cpu_active_mask;
94*4882a593Smuzhiyun #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
95*4882a593Smuzhiyun #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
96*4882a593Smuzhiyun #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
97*4882a593Smuzhiyun #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun extern atomic_t __num_online_cpus;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #if NR_CPUS > 1
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * num_online_cpus() - Read the number of online CPUs
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Despite the fact that __num_online_cpus is of type atomic_t, this
106*4882a593Smuzhiyun * interface gives only a momentary snapshot and is not protected against
107*4882a593Smuzhiyun * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
108*4882a593Smuzhiyun * region.
109*4882a593Smuzhiyun */
num_online_cpus(void)110*4882a593Smuzhiyun static inline unsigned int num_online_cpus(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun return atomic_read(&__num_online_cpus);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
115*4882a593Smuzhiyun #define num_present_cpus() cpumask_weight(cpu_present_mask)
116*4882a593Smuzhiyun #define num_active_cpus() cpumask_weight(cpu_active_mask)
117*4882a593Smuzhiyun #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
118*4882a593Smuzhiyun #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
119*4882a593Smuzhiyun #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
120*4882a593Smuzhiyun #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
121*4882a593Smuzhiyun #else
122*4882a593Smuzhiyun #define num_online_cpus() 1U
123*4882a593Smuzhiyun #define num_possible_cpus() 1U
124*4882a593Smuzhiyun #define num_present_cpus() 1U
125*4882a593Smuzhiyun #define num_active_cpus() 1U
126*4882a593Smuzhiyun #define cpu_online(cpu) ((cpu) == 0)
127*4882a593Smuzhiyun #define cpu_possible(cpu) ((cpu) == 0)
128*4882a593Smuzhiyun #define cpu_present(cpu) ((cpu) == 0)
129*4882a593Smuzhiyun #define cpu_active(cpu) ((cpu) == 0)
130*4882a593Smuzhiyun #endif
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun extern cpumask_t cpus_booted_once_mask;
133*4882a593Smuzhiyun
cpu_max_bits_warn(unsigned int cpu,unsigned int bits)134*4882a593Smuzhiyun static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PER_CPU_MAPS
137*4882a593Smuzhiyun WARN_ON_ONCE(cpu >= bits);
138*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* verify cpu argument to cpumask_* operators */
cpumask_check(unsigned int cpu)142*4882a593Smuzhiyun static inline unsigned int cpumask_check(unsigned int cpu)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun cpu_max_bits_warn(cpu, nr_cpumask_bits);
145*4882a593Smuzhiyun return cpu;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #if NR_CPUS == 1
149*4882a593Smuzhiyun /* Uniprocessor. Assume all masks are "1". */
cpumask_first(const struct cpumask * srcp)150*4882a593Smuzhiyun static inline unsigned int cpumask_first(const struct cpumask *srcp)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
cpumask_last(const struct cpumask * srcp)155*4882a593Smuzhiyun static inline unsigned int cpumask_last(const struct cpumask *srcp)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Valid inputs for n are -1 and 0. */
cpumask_next(int n,const struct cpumask * srcp)161*4882a593Smuzhiyun static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return n+1;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
cpumask_next_zero(int n,const struct cpumask * srcp)166*4882a593Smuzhiyun static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun return n+1;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
cpumask_next_and(int n,const struct cpumask * srcp,const struct cpumask * andp)171*4882a593Smuzhiyun static inline unsigned int cpumask_next_and(int n,
172*4882a593Smuzhiyun const struct cpumask *srcp,
173*4882a593Smuzhiyun const struct cpumask *andp)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun return n+1;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)178*4882a593Smuzhiyun static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
179*4882a593Smuzhiyun int start, bool wrap)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
182*4882a593Smuzhiyun return (wrap && n == 0);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* cpu must be a valid cpu, ie 0, so there's no other choice. */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)186*4882a593Smuzhiyun static inline unsigned int cpumask_any_but(const struct cpumask *mask,
187*4882a593Smuzhiyun unsigned int cpu)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun return 1;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
cpumask_local_spread(unsigned int i,int node)192*4882a593Smuzhiyun static inline unsigned int cpumask_local_spread(unsigned int i, int node)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)197*4882a593Smuzhiyun static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
198*4882a593Smuzhiyun const struct cpumask *src2p) {
199*4882a593Smuzhiyun return cpumask_next_and(-1, src1p, src2p);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun #define for_each_cpu(cpu, mask) \
203*4882a593Smuzhiyun for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
204*4882a593Smuzhiyun #define for_each_cpu_not(cpu, mask) \
205*4882a593Smuzhiyun for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
206*4882a593Smuzhiyun #define for_each_cpu_wrap(cpu, mask, start) \
207*4882a593Smuzhiyun for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
208*4882a593Smuzhiyun #define for_each_cpu_and(cpu, mask1, mask2) \
209*4882a593Smuzhiyun for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
210*4882a593Smuzhiyun #else
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun * cpumask_first - get the first cpu in a cpumask
213*4882a593Smuzhiyun * @srcp: the cpumask pointer
214*4882a593Smuzhiyun *
215*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no cpus set.
216*4882a593Smuzhiyun */
cpumask_first(const struct cpumask * srcp)217*4882a593Smuzhiyun static inline unsigned int cpumask_first(const struct cpumask *srcp)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * cpumask_last - get the last CPU in a cpumask
224*4882a593Smuzhiyun * @srcp: - the cpumask pointer
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Returns >= nr_cpumask_bits if no CPUs set.
227*4882a593Smuzhiyun */
cpumask_last(const struct cpumask * srcp)228*4882a593Smuzhiyun static inline unsigned int cpumask_last(const struct cpumask *srcp)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun unsigned int cpumask_next(int n, const struct cpumask *srcp);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun * cpumask_next_zero - get the next unset cpu in a cpumask
237*4882a593Smuzhiyun * @n: the cpu prior to the place to search (ie. return will be > @n)
238*4882a593Smuzhiyun * @srcp: the cpumask pointer
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no further cpus unset.
241*4882a593Smuzhiyun */
cpumask_next_zero(int n,const struct cpumask * srcp)242*4882a593Smuzhiyun static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun /* -1 is a legal arg here. */
245*4882a593Smuzhiyun if (n != -1)
246*4882a593Smuzhiyun cpumask_check(n);
247*4882a593Smuzhiyun return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
251*4882a593Smuzhiyun int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
252*4882a593Smuzhiyun unsigned int cpumask_local_spread(unsigned int i, int node);
253*4882a593Smuzhiyun int cpumask_any_and_distribute(const struct cpumask *src1p,
254*4882a593Smuzhiyun const struct cpumask *src2p);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun * for_each_cpu - iterate over every cpu in a mask
258*4882a593Smuzhiyun * @cpu: the (optionally unsigned) integer iterator
259*4882a593Smuzhiyun * @mask: the cpumask pointer
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * After the loop, cpu is >= nr_cpu_ids.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun #define for_each_cpu(cpu, mask) \
264*4882a593Smuzhiyun for ((cpu) = -1; \
265*4882a593Smuzhiyun (cpu) = cpumask_next((cpu), (mask)), \
266*4882a593Smuzhiyun (cpu) < nr_cpu_ids;)
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * for_each_cpu_not - iterate over every cpu in a complemented mask
270*4882a593Smuzhiyun * @cpu: the (optionally unsigned) integer iterator
271*4882a593Smuzhiyun * @mask: the cpumask pointer
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * After the loop, cpu is >= nr_cpu_ids.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun #define for_each_cpu_not(cpu, mask) \
276*4882a593Smuzhiyun for ((cpu) = -1; \
277*4882a593Smuzhiyun (cpu) = cpumask_next_zero((cpu), (mask)), \
278*4882a593Smuzhiyun (cpu) < nr_cpu_ids;)
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
284*4882a593Smuzhiyun * @cpu: the (optionally unsigned) integer iterator
285*4882a593Smuzhiyun * @mask: the cpumask poiter
286*4882a593Smuzhiyun * @start: the start location
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * The implementation does not assume any bit in @mask is set (including @start).
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * After the loop, cpu is >= nr_cpu_ids.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun #define for_each_cpu_wrap(cpu, mask, start) \
293*4882a593Smuzhiyun for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
294*4882a593Smuzhiyun (cpu) < nr_cpumask_bits; \
295*4882a593Smuzhiyun (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * for_each_cpu_and - iterate over every cpu in both masks
299*4882a593Smuzhiyun * @cpu: the (optionally unsigned) integer iterator
300*4882a593Smuzhiyun * @mask1: the first cpumask pointer
301*4882a593Smuzhiyun * @mask2: the second cpumask pointer
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * This saves a temporary CPU mask in many places. It is equivalent to:
304*4882a593Smuzhiyun * struct cpumask tmp;
305*4882a593Smuzhiyun * cpumask_and(&tmp, &mask1, &mask2);
306*4882a593Smuzhiyun * for_each_cpu(cpu, &tmp)
307*4882a593Smuzhiyun * ...
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * After the loop, cpu is >= nr_cpu_ids.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun #define for_each_cpu_and(cpu, mask1, mask2) \
312*4882a593Smuzhiyun for ((cpu) = -1; \
313*4882a593Smuzhiyun (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
314*4882a593Smuzhiyun (cpu) < nr_cpu_ids;)
315*4882a593Smuzhiyun #endif /* SMP */
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #define CPU_BITS_NONE \
318*4882a593Smuzhiyun { \
319*4882a593Smuzhiyun [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun #define CPU_BITS_CPU0 \
323*4882a593Smuzhiyun { \
324*4882a593Smuzhiyun [0] = 1UL \
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun * cpumask_set_cpu - set a cpu in a cpumask
329*4882a593Smuzhiyun * @cpu: cpu number (< nr_cpu_ids)
330*4882a593Smuzhiyun * @dstp: the cpumask pointer
331*4882a593Smuzhiyun */
cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)332*4882a593Smuzhiyun static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun set_bit(cpumask_check(cpu), cpumask_bits(dstp));
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
__cpumask_set_cpu(unsigned int cpu,struct cpumask * dstp)337*4882a593Smuzhiyun static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun __set_bit(cpumask_check(cpu), cpumask_bits(dstp));
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /**
344*4882a593Smuzhiyun * cpumask_clear_cpu - clear a cpu in a cpumask
345*4882a593Smuzhiyun * @cpu: cpu number (< nr_cpu_ids)
346*4882a593Smuzhiyun * @dstp: the cpumask pointer
347*4882a593Smuzhiyun */
cpumask_clear_cpu(int cpu,struct cpumask * dstp)348*4882a593Smuzhiyun static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
__cpumask_clear_cpu(int cpu,struct cpumask * dstp)353*4882a593Smuzhiyun static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun __clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun * cpumask_test_cpu - test for a cpu in a cpumask
360*4882a593Smuzhiyun * @cpu: cpu number (< nr_cpu_ids)
361*4882a593Smuzhiyun * @cpumask: the cpumask pointer
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Returns 1 if @cpu is set in @cpumask, else returns 0
364*4882a593Smuzhiyun */
cpumask_test_cpu(int cpu,const struct cpumask * cpumask)365*4882a593Smuzhiyun static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /**
371*4882a593Smuzhiyun * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
372*4882a593Smuzhiyun * @cpu: cpu number (< nr_cpu_ids)
373*4882a593Smuzhiyun * @cpumask: the cpumask pointer
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
376*4882a593Smuzhiyun *
377*4882a593Smuzhiyun * test_and_set_bit wrapper for cpumasks.
378*4882a593Smuzhiyun */
cpumask_test_and_set_cpu(int cpu,struct cpumask * cpumask)379*4882a593Smuzhiyun static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
386*4882a593Smuzhiyun * @cpu: cpu number (< nr_cpu_ids)
387*4882a593Smuzhiyun * @cpumask: the cpumask pointer
388*4882a593Smuzhiyun *
389*4882a593Smuzhiyun * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * test_and_clear_bit wrapper for cpumasks.
392*4882a593Smuzhiyun */
cpumask_test_and_clear_cpu(int cpu,struct cpumask * cpumask)393*4882a593Smuzhiyun static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
400*4882a593Smuzhiyun * @dstp: the cpumask pointer
401*4882a593Smuzhiyun */
cpumask_setall(struct cpumask * dstp)402*4882a593Smuzhiyun static inline void cpumask_setall(struct cpumask *dstp)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /**
408*4882a593Smuzhiyun * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
409*4882a593Smuzhiyun * @dstp: the cpumask pointer
410*4882a593Smuzhiyun */
cpumask_clear(struct cpumask * dstp)411*4882a593Smuzhiyun static inline void cpumask_clear(struct cpumask *dstp)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /**
417*4882a593Smuzhiyun * cpumask_and - *dstp = *src1p & *src2p
418*4882a593Smuzhiyun * @dstp: the cpumask result
419*4882a593Smuzhiyun * @src1p: the first input
420*4882a593Smuzhiyun * @src2p: the second input
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * If *@dstp is empty, returns 0, else returns 1
423*4882a593Smuzhiyun */
cpumask_and(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)424*4882a593Smuzhiyun static inline int cpumask_and(struct cpumask *dstp,
425*4882a593Smuzhiyun const struct cpumask *src1p,
426*4882a593Smuzhiyun const struct cpumask *src2p)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
429*4882a593Smuzhiyun cpumask_bits(src2p), nr_cpumask_bits);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * cpumask_or - *dstp = *src1p | *src2p
434*4882a593Smuzhiyun * @dstp: the cpumask result
435*4882a593Smuzhiyun * @src1p: the first input
436*4882a593Smuzhiyun * @src2p: the second input
437*4882a593Smuzhiyun */
cpumask_or(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)438*4882a593Smuzhiyun static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
439*4882a593Smuzhiyun const struct cpumask *src2p)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
442*4882a593Smuzhiyun cpumask_bits(src2p), nr_cpumask_bits);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * cpumask_xor - *dstp = *src1p ^ *src2p
447*4882a593Smuzhiyun * @dstp: the cpumask result
448*4882a593Smuzhiyun * @src1p: the first input
449*4882a593Smuzhiyun * @src2p: the second input
450*4882a593Smuzhiyun */
cpumask_xor(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)451*4882a593Smuzhiyun static inline void cpumask_xor(struct cpumask *dstp,
452*4882a593Smuzhiyun const struct cpumask *src1p,
453*4882a593Smuzhiyun const struct cpumask *src2p)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
456*4882a593Smuzhiyun cpumask_bits(src2p), nr_cpumask_bits);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /**
460*4882a593Smuzhiyun * cpumask_andnot - *dstp = *src1p & ~*src2p
461*4882a593Smuzhiyun * @dstp: the cpumask result
462*4882a593Smuzhiyun * @src1p: the first input
463*4882a593Smuzhiyun * @src2p: the second input
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * If *@dstp is empty, returns 0, else returns 1
466*4882a593Smuzhiyun */
cpumask_andnot(struct cpumask * dstp,const struct cpumask * src1p,const struct cpumask * src2p)467*4882a593Smuzhiyun static inline int cpumask_andnot(struct cpumask *dstp,
468*4882a593Smuzhiyun const struct cpumask *src1p,
469*4882a593Smuzhiyun const struct cpumask *src2p)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
472*4882a593Smuzhiyun cpumask_bits(src2p), nr_cpumask_bits);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /**
476*4882a593Smuzhiyun * cpumask_complement - *dstp = ~*srcp
477*4882a593Smuzhiyun * @dstp: the cpumask result
478*4882a593Smuzhiyun * @srcp: the input to invert
479*4882a593Smuzhiyun */
cpumask_complement(struct cpumask * dstp,const struct cpumask * srcp)480*4882a593Smuzhiyun static inline void cpumask_complement(struct cpumask *dstp,
481*4882a593Smuzhiyun const struct cpumask *srcp)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
484*4882a593Smuzhiyun nr_cpumask_bits);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun * cpumask_equal - *src1p == *src2p
489*4882a593Smuzhiyun * @src1p: the first input
490*4882a593Smuzhiyun * @src2p: the second input
491*4882a593Smuzhiyun */
cpumask_equal(const struct cpumask * src1p,const struct cpumask * src2p)492*4882a593Smuzhiyun static inline bool cpumask_equal(const struct cpumask *src1p,
493*4882a593Smuzhiyun const struct cpumask *src2p)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
496*4882a593Smuzhiyun nr_cpumask_bits);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun * cpumask_or_equal - *src1p | *src2p == *src3p
501*4882a593Smuzhiyun * @src1p: the first input
502*4882a593Smuzhiyun * @src2p: the second input
503*4882a593Smuzhiyun * @src3p: the third input
504*4882a593Smuzhiyun */
cpumask_or_equal(const struct cpumask * src1p,const struct cpumask * src2p,const struct cpumask * src3p)505*4882a593Smuzhiyun static inline bool cpumask_or_equal(const struct cpumask *src1p,
506*4882a593Smuzhiyun const struct cpumask *src2p,
507*4882a593Smuzhiyun const struct cpumask *src3p)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
510*4882a593Smuzhiyun cpumask_bits(src3p), nr_cpumask_bits);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * cpumask_intersects - (*src1p & *src2p) != 0
515*4882a593Smuzhiyun * @src1p: the first input
516*4882a593Smuzhiyun * @src2p: the second input
517*4882a593Smuzhiyun */
cpumask_intersects(const struct cpumask * src1p,const struct cpumask * src2p)518*4882a593Smuzhiyun static inline bool cpumask_intersects(const struct cpumask *src1p,
519*4882a593Smuzhiyun const struct cpumask *src2p)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
522*4882a593Smuzhiyun nr_cpumask_bits);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun * cpumask_subset - (*src1p & ~*src2p) == 0
527*4882a593Smuzhiyun * @src1p: the first input
528*4882a593Smuzhiyun * @src2p: the second input
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
531*4882a593Smuzhiyun */
cpumask_subset(const struct cpumask * src1p,const struct cpumask * src2p)532*4882a593Smuzhiyun static inline int cpumask_subset(const struct cpumask *src1p,
533*4882a593Smuzhiyun const struct cpumask *src2p)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
536*4882a593Smuzhiyun nr_cpumask_bits);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun * cpumask_empty - *srcp == 0
541*4882a593Smuzhiyun * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
542*4882a593Smuzhiyun */
cpumask_empty(const struct cpumask * srcp)543*4882a593Smuzhiyun static inline bool cpumask_empty(const struct cpumask *srcp)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun * cpumask_full - *srcp == 0xFFFFFFFF...
550*4882a593Smuzhiyun * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
551*4882a593Smuzhiyun */
cpumask_full(const struct cpumask * srcp)552*4882a593Smuzhiyun static inline bool cpumask_full(const struct cpumask *srcp)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun * cpumask_weight - Count of bits in *srcp
559*4882a593Smuzhiyun * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
560*4882a593Smuzhiyun */
cpumask_weight(const struct cpumask * srcp)561*4882a593Smuzhiyun static inline unsigned int cpumask_weight(const struct cpumask *srcp)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * cpumask_shift_right - *dstp = *srcp >> n
568*4882a593Smuzhiyun * @dstp: the cpumask result
569*4882a593Smuzhiyun * @srcp: the input to shift
570*4882a593Smuzhiyun * @n: the number of bits to shift by
571*4882a593Smuzhiyun */
cpumask_shift_right(struct cpumask * dstp,const struct cpumask * srcp,int n)572*4882a593Smuzhiyun static inline void cpumask_shift_right(struct cpumask *dstp,
573*4882a593Smuzhiyun const struct cpumask *srcp, int n)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
576*4882a593Smuzhiyun nr_cpumask_bits);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /**
580*4882a593Smuzhiyun * cpumask_shift_left - *dstp = *srcp << n
581*4882a593Smuzhiyun * @dstp: the cpumask result
582*4882a593Smuzhiyun * @srcp: the input to shift
583*4882a593Smuzhiyun * @n: the number of bits to shift by
584*4882a593Smuzhiyun */
cpumask_shift_left(struct cpumask * dstp,const struct cpumask * srcp,int n)585*4882a593Smuzhiyun static inline void cpumask_shift_left(struct cpumask *dstp,
586*4882a593Smuzhiyun const struct cpumask *srcp, int n)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
589*4882a593Smuzhiyun nr_cpumask_bits);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun * cpumask_copy - *dstp = *srcp
594*4882a593Smuzhiyun * @dstp: the result
595*4882a593Smuzhiyun * @srcp: the input cpumask
596*4882a593Smuzhiyun */
cpumask_copy(struct cpumask * dstp,const struct cpumask * srcp)597*4882a593Smuzhiyun static inline void cpumask_copy(struct cpumask *dstp,
598*4882a593Smuzhiyun const struct cpumask *srcp)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * cpumask_any - pick a "random" cpu from *srcp
605*4882a593Smuzhiyun * @srcp: the input cpumask
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no cpus set.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun #define cpumask_any(srcp) cpumask_first(srcp)
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
613*4882a593Smuzhiyun * @src1p: the first input
614*4882a593Smuzhiyun * @src2p: the second input
615*4882a593Smuzhiyun *
616*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
622*4882a593Smuzhiyun * @mask1: the first input cpumask
623*4882a593Smuzhiyun * @mask2: the second input cpumask
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no cpus set.
626*4882a593Smuzhiyun */
627*4882a593Smuzhiyun #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /**
630*4882a593Smuzhiyun * cpumask_of - the cpumask containing just a given cpu
631*4882a593Smuzhiyun * @cpu: the cpu (<= nr_cpu_ids)
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun #define cpumask_of(cpu) (get_cpu_mask(cpu))
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun * cpumask_parse_user - extract a cpumask from a user string
637*4882a593Smuzhiyun * @buf: the buffer to extract from
638*4882a593Smuzhiyun * @len: the length of the buffer
639*4882a593Smuzhiyun * @dstp: the cpumask to set.
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * Returns -errno, or 0 for success.
642*4882a593Smuzhiyun */
cpumask_parse_user(const char __user * buf,int len,struct cpumask * dstp)643*4882a593Smuzhiyun static inline int cpumask_parse_user(const char __user *buf, int len,
644*4882a593Smuzhiyun struct cpumask *dstp)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /**
650*4882a593Smuzhiyun * cpumask_parselist_user - extract a cpumask from a user string
651*4882a593Smuzhiyun * @buf: the buffer to extract from
652*4882a593Smuzhiyun * @len: the length of the buffer
653*4882a593Smuzhiyun * @dstp: the cpumask to set.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Returns -errno, or 0 for success.
656*4882a593Smuzhiyun */
cpumask_parselist_user(const char __user * buf,int len,struct cpumask * dstp)657*4882a593Smuzhiyun static inline int cpumask_parselist_user(const char __user *buf, int len,
658*4882a593Smuzhiyun struct cpumask *dstp)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
661*4882a593Smuzhiyun nr_cpumask_bits);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /**
665*4882a593Smuzhiyun * cpumask_parse - extract a cpumask from a string
666*4882a593Smuzhiyun * @buf: the buffer to extract from
667*4882a593Smuzhiyun * @dstp: the cpumask to set.
668*4882a593Smuzhiyun *
669*4882a593Smuzhiyun * Returns -errno, or 0 for success.
670*4882a593Smuzhiyun */
cpumask_parse(const char * buf,struct cpumask * dstp)671*4882a593Smuzhiyun static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * cpulist_parse - extract a cpumask from a user string of ranges
678*4882a593Smuzhiyun * @buf: the buffer to extract from
679*4882a593Smuzhiyun * @dstp: the cpumask to set.
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * Returns -errno, or 0 for success.
682*4882a593Smuzhiyun */
cpulist_parse(const char * buf,struct cpumask * dstp)683*4882a593Smuzhiyun static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /**
689*4882a593Smuzhiyun * cpumask_size - size to allocate for a 'struct cpumask' in bytes
690*4882a593Smuzhiyun */
cpumask_size(void)691*4882a593Smuzhiyun static inline unsigned int cpumask_size(void)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun * cpumask_var_t: struct cpumask for stack usage.
698*4882a593Smuzhiyun *
699*4882a593Smuzhiyun * Oh, the wicked games we play! In order to make kernel coding a
700*4882a593Smuzhiyun * little more difficult, we typedef cpumask_var_t to an array or a
701*4882a593Smuzhiyun * pointer: doing &mask on an array is a noop, so it still works.
702*4882a593Smuzhiyun *
703*4882a593Smuzhiyun * ie.
704*4882a593Smuzhiyun * cpumask_var_t tmpmask;
705*4882a593Smuzhiyun * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
706*4882a593Smuzhiyun * return -ENOMEM;
707*4882a593Smuzhiyun *
708*4882a593Smuzhiyun * ... use 'tmpmask' like a normal struct cpumask * ...
709*4882a593Smuzhiyun *
710*4882a593Smuzhiyun * free_cpumask_var(tmpmask);
711*4882a593Smuzhiyun *
712*4882a593Smuzhiyun *
713*4882a593Smuzhiyun * However, one notable exception is there. alloc_cpumask_var() allocates
714*4882a593Smuzhiyun * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
715*4882a593Smuzhiyun * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
716*4882a593Smuzhiyun *
717*4882a593Smuzhiyun * cpumask_var_t tmpmask;
718*4882a593Smuzhiyun * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
719*4882a593Smuzhiyun * return -ENOMEM;
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * var = *tmpmask;
722*4882a593Smuzhiyun *
723*4882a593Smuzhiyun * This code makes NR_CPUS length memcopy and brings to a memory corruption.
724*4882a593Smuzhiyun * cpumask_copy() provide safe copy functionality.
725*4882a593Smuzhiyun *
726*4882a593Smuzhiyun * Note that there is another evil here: If you define a cpumask_var_t
727*4882a593Smuzhiyun * as a percpu variable then the way to obtain the address of the cpumask
728*4882a593Smuzhiyun * structure differently influences what this_cpu_* operation needs to be
729*4882a593Smuzhiyun * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
730*4882a593Smuzhiyun * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
731*4882a593Smuzhiyun * other type of cpumask_var_t implementation is configured.
732*4882a593Smuzhiyun *
733*4882a593Smuzhiyun * Please also note that __cpumask_var_read_mostly can be used to declare
734*4882a593Smuzhiyun * a cpumask_var_t variable itself (not its content) as read mostly.
735*4882a593Smuzhiyun */
736*4882a593Smuzhiyun #ifdef CONFIG_CPUMASK_OFFSTACK
737*4882a593Smuzhiyun typedef struct cpumask *cpumask_var_t;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
740*4882a593Smuzhiyun #define __cpumask_var_read_mostly __read_mostly
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
743*4882a593Smuzhiyun bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
744*4882a593Smuzhiyun bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
745*4882a593Smuzhiyun bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
746*4882a593Smuzhiyun void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
747*4882a593Smuzhiyun void free_cpumask_var(cpumask_var_t mask);
748*4882a593Smuzhiyun void free_bootmem_cpumask_var(cpumask_var_t mask);
749*4882a593Smuzhiyun
cpumask_available(cpumask_var_t mask)750*4882a593Smuzhiyun static inline bool cpumask_available(cpumask_var_t mask)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun return mask != NULL;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun #else
756*4882a593Smuzhiyun typedef struct cpumask cpumask_var_t[1];
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
759*4882a593Smuzhiyun #define __cpumask_var_read_mostly
760*4882a593Smuzhiyun
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)761*4882a593Smuzhiyun static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun return true;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)766*4882a593Smuzhiyun static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
767*4882a593Smuzhiyun int node)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun return true;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)772*4882a593Smuzhiyun static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun cpumask_clear(*mask);
775*4882a593Smuzhiyun return true;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)778*4882a593Smuzhiyun static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
779*4882a593Smuzhiyun int node)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun cpumask_clear(*mask);
782*4882a593Smuzhiyun return true;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
alloc_bootmem_cpumask_var(cpumask_var_t * mask)785*4882a593Smuzhiyun static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
free_cpumask_var(cpumask_var_t mask)789*4882a593Smuzhiyun static inline void free_cpumask_var(cpumask_var_t mask)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
free_bootmem_cpumask_var(cpumask_var_t mask)793*4882a593Smuzhiyun static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
cpumask_available(cpumask_var_t mask)797*4882a593Smuzhiyun static inline bool cpumask_available(cpumask_var_t mask)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun return true;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun #endif /* CONFIG_CPUMASK_OFFSTACK */
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* It's common to want to use cpu_all_mask in struct member initializers,
804*4882a593Smuzhiyun * so it has to refer to an address rather than a pointer. */
805*4882a593Smuzhiyun extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
806*4882a593Smuzhiyun #define cpu_all_mask to_cpumask(cpu_all_bits)
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* First bits of cpu_bit_bitmap are in fact unset. */
809*4882a593Smuzhiyun #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
812*4882a593Smuzhiyun #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
813*4882a593Smuzhiyun #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* Wrappers for arch boot code to manipulate normally-constant masks */
816*4882a593Smuzhiyun void init_cpu_present(const struct cpumask *src);
817*4882a593Smuzhiyun void init_cpu_possible(const struct cpumask *src);
818*4882a593Smuzhiyun void init_cpu_online(const struct cpumask *src);
819*4882a593Smuzhiyun
reset_cpu_possible_mask(void)820*4882a593Smuzhiyun static inline void reset_cpu_possible_mask(void)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun static inline void
set_cpu_possible(unsigned int cpu,bool possible)826*4882a593Smuzhiyun set_cpu_possible(unsigned int cpu, bool possible)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun if (possible)
829*4882a593Smuzhiyun cpumask_set_cpu(cpu, &__cpu_possible_mask);
830*4882a593Smuzhiyun else
831*4882a593Smuzhiyun cpumask_clear_cpu(cpu, &__cpu_possible_mask);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun static inline void
set_cpu_present(unsigned int cpu,bool present)835*4882a593Smuzhiyun set_cpu_present(unsigned int cpu, bool present)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun if (present)
838*4882a593Smuzhiyun cpumask_set_cpu(cpu, &__cpu_present_mask);
839*4882a593Smuzhiyun else
840*4882a593Smuzhiyun cpumask_clear_cpu(cpu, &__cpu_present_mask);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun void set_cpu_online(unsigned int cpu, bool online);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun static inline void
set_cpu_active(unsigned int cpu,bool active)846*4882a593Smuzhiyun set_cpu_active(unsigned int cpu, bool active)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun if (active)
849*4882a593Smuzhiyun cpumask_set_cpu(cpu, &__cpu_active_mask);
850*4882a593Smuzhiyun else
851*4882a593Smuzhiyun cpumask_clear_cpu(cpu, &__cpu_active_mask);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /**
856*4882a593Smuzhiyun * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
857*4882a593Smuzhiyun * @bitmap: the bitmap
858*4882a593Smuzhiyun *
859*4882a593Smuzhiyun * There are a few places where cpumask_var_t isn't appropriate and
860*4882a593Smuzhiyun * static cpumasks must be used (eg. very early boot), yet we don't
861*4882a593Smuzhiyun * expose the definition of 'struct cpumask'.
862*4882a593Smuzhiyun *
863*4882a593Smuzhiyun * This does the conversion, and can be used as a constant initializer.
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun #define to_cpumask(bitmap) \
866*4882a593Smuzhiyun ((struct cpumask *)(1 ? (bitmap) \
867*4882a593Smuzhiyun : (void *)sizeof(__check_is_bitmap(bitmap))))
868*4882a593Smuzhiyun
__check_is_bitmap(const unsigned long * bitmap)869*4882a593Smuzhiyun static inline int __check_is_bitmap(const unsigned long *bitmap)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun return 1;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * Special-case data structure for "single bit set only" constant CPU masks.
876*4882a593Smuzhiyun *
877*4882a593Smuzhiyun * We pre-generate all the 64 (or 32) possible bit positions, with enough
878*4882a593Smuzhiyun * padding to the left and the right, and return the constant pointer
879*4882a593Smuzhiyun * appropriately offset.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun extern const unsigned long
882*4882a593Smuzhiyun cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
883*4882a593Smuzhiyun
get_cpu_mask(unsigned int cpu)884*4882a593Smuzhiyun static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
887*4882a593Smuzhiyun p -= cpu / BITS_PER_LONG;
888*4882a593Smuzhiyun return to_cpumask(p);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun #if NR_CPUS <= BITS_PER_LONG
894*4882a593Smuzhiyun #define CPU_BITS_ALL \
895*4882a593Smuzhiyun { \
896*4882a593Smuzhiyun [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun #else /* NR_CPUS > BITS_PER_LONG */
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun #define CPU_BITS_ALL \
902*4882a593Smuzhiyun { \
903*4882a593Smuzhiyun [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
904*4882a593Smuzhiyun [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun #endif /* NR_CPUS > BITS_PER_LONG */
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /**
909*4882a593Smuzhiyun * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
910*4882a593Smuzhiyun * as comma-separated list of cpus or hex values of cpumask
911*4882a593Smuzhiyun * @list: indicates whether the cpumap must be list
912*4882a593Smuzhiyun * @mask: the cpumask to copy
913*4882a593Smuzhiyun * @buf: the buffer to copy into
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * Returns the length of the (null-terminated) @buf string, zero if
916*4882a593Smuzhiyun * nothing is copied.
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun static inline ssize_t
cpumap_print_to_pagebuf(bool list,char * buf,const struct cpumask * mask)919*4882a593Smuzhiyun cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
922*4882a593Smuzhiyun nr_cpu_ids);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun #if NR_CPUS <= BITS_PER_LONG
926*4882a593Smuzhiyun #define CPU_MASK_ALL \
927*4882a593Smuzhiyun (cpumask_t) { { \
928*4882a593Smuzhiyun [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
929*4882a593Smuzhiyun } }
930*4882a593Smuzhiyun #else
931*4882a593Smuzhiyun #define CPU_MASK_ALL \
932*4882a593Smuzhiyun (cpumask_t) { { \
933*4882a593Smuzhiyun [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
934*4882a593Smuzhiyun [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
935*4882a593Smuzhiyun } }
936*4882a593Smuzhiyun #endif /* NR_CPUS > BITS_PER_LONG */
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun #define CPU_MASK_NONE \
939*4882a593Smuzhiyun (cpumask_t) { { \
940*4882a593Smuzhiyun [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
941*4882a593Smuzhiyun } }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun #define CPU_MASK_CPU0 \
944*4882a593Smuzhiyun (cpumask_t) { { \
945*4882a593Smuzhiyun [0] = 1UL \
946*4882a593Smuzhiyun } }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun #endif /* __LINUX_CPUMASK_H */
949