1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/slab.h>
3*4882a593Smuzhiyun #include <linux/kernel.h>
4*4882a593Smuzhiyun #include <linux/bitops.h>
5*4882a593Smuzhiyun #include <linux/cpumask.h>
6*4882a593Smuzhiyun #include <linux/export.h>
7*4882a593Smuzhiyun #include <linux/memblock.h>
8*4882a593Smuzhiyun #include <linux/numa.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun * cpumask_next - get the next cpu in a cpumask
12*4882a593Smuzhiyun * @n: the cpu prior to the place to search (ie. return will be > @n)
13*4882a593Smuzhiyun * @srcp: the cpumask pointer
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no further cpus set.
16*4882a593Smuzhiyun */
cpumask_next(int n,const struct cpumask * srcp)17*4882a593Smuzhiyun unsigned int cpumask_next(int n, const struct cpumask *srcp)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun /* -1 is a legal arg here. */
20*4882a593Smuzhiyun if (n != -1)
21*4882a593Smuzhiyun cpumask_check(n);
22*4882a593Smuzhiyun return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_next);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * cpumask_next_and - get the next cpu in *src1p & *src2p
28*4882a593Smuzhiyun * @n: the cpu prior to the place to search (ie. return will be > @n)
29*4882a593Smuzhiyun * @src1p: the first cpumask pointer
30*4882a593Smuzhiyun * @src2p: the second cpumask pointer
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no further cpus set in both.
33*4882a593Smuzhiyun */
cpumask_next_and(int n,const struct cpumask * src1p,const struct cpumask * src2p)34*4882a593Smuzhiyun int cpumask_next_and(int n, const struct cpumask *src1p,
35*4882a593Smuzhiyun const struct cpumask *src2p)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun /* -1 is a legal arg here. */
38*4882a593Smuzhiyun if (n != -1)
39*4882a593Smuzhiyun cpumask_check(n);
40*4882a593Smuzhiyun return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
41*4882a593Smuzhiyun nr_cpumask_bits, n + 1);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_next_and);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * cpumask_any_but - return a "random" in a cpumask, but not this one.
47*4882a593Smuzhiyun * @mask: the cpumask to search
48*4882a593Smuzhiyun * @cpu: the cpu to ignore.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Often used to find any cpu but smp_processor_id() in a mask.
51*4882a593Smuzhiyun * Returns >= nr_cpu_ids if no cpus set.
52*4882a593Smuzhiyun */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)53*4882a593Smuzhiyun int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun unsigned int i;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun cpumask_check(cpu);
58*4882a593Smuzhiyun for_each_cpu(i, mask)
59*4882a593Smuzhiyun if (i != cpu)
60*4882a593Smuzhiyun break;
61*4882a593Smuzhiyun return i;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_any_but);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * cpumask_next_wrap - helper to implement for_each_cpu_wrap
67*4882a593Smuzhiyun * @n: the cpu prior to the place to search
68*4882a593Smuzhiyun * @mask: the cpumask pointer
69*4882a593Smuzhiyun * @start: the start point of the iteration
70*4882a593Smuzhiyun * @wrap: assume @n crossing @start terminates the iteration
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Returns >= nr_cpu_ids on completion
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Note: the @wrap argument is required for the start condition when
75*4882a593Smuzhiyun * we cannot assume @start is set in @mask.
76*4882a593Smuzhiyun */
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)77*4882a593Smuzhiyun int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun int next;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun again:
82*4882a593Smuzhiyun next = cpumask_next(n, mask);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (wrap && n < start && next >= start) {
85*4882a593Smuzhiyun return nr_cpumask_bits;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun } else if (next >= nr_cpumask_bits) {
88*4882a593Smuzhiyun wrap = true;
89*4882a593Smuzhiyun n = -1;
90*4882a593Smuzhiyun goto again;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return next;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_next_wrap);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* These are not inline because of header tangles. */
98*4882a593Smuzhiyun #ifdef CONFIG_CPUMASK_OFFSTACK
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun * alloc_cpumask_var_node - allocate a struct cpumask on a given node
101*4882a593Smuzhiyun * @mask: pointer to cpumask_var_t where the cpumask is returned
102*4882a593Smuzhiyun * @flags: GFP_ flags
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
105*4882a593Smuzhiyun * a nop returning a constant 1 (in <linux/cpumask.h>)
106*4882a593Smuzhiyun * Returns TRUE if memory allocation succeeded, FALSE otherwise.
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * In addition, mask will be NULL if this fails. Note that gcc is
109*4882a593Smuzhiyun * usually smart enough to know that mask can never be NULL if
110*4882a593Smuzhiyun * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
111*4882a593Smuzhiyun * too.
112*4882a593Smuzhiyun */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)113*4882a593Smuzhiyun bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun *mask = kmalloc_node(cpumask_size(), flags, node);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_PER_CPU_MAPS
118*4882a593Smuzhiyun if (!*mask) {
119*4882a593Smuzhiyun printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
120*4882a593Smuzhiyun dump_stack();
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return *mask != NULL;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL(alloc_cpumask_var_node);
127*4882a593Smuzhiyun
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)128*4882a593Smuzhiyun bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL(zalloc_cpumask_var_node);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun * alloc_cpumask_var - allocate a struct cpumask
136*4882a593Smuzhiyun * @mask: pointer to cpumask_var_t where the cpumask is returned
137*4882a593Smuzhiyun * @flags: GFP_ flags
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
140*4882a593Smuzhiyun * a nop returning a constant 1 (in <linux/cpumask.h>).
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * See alloc_cpumask_var_node.
143*4882a593Smuzhiyun */
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)144*4882a593Smuzhiyun bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun EXPORT_SYMBOL(alloc_cpumask_var);
149*4882a593Smuzhiyun
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)150*4882a593Smuzhiyun bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return alloc_cpumask_var(mask, flags | __GFP_ZERO);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun EXPORT_SYMBOL(zalloc_cpumask_var);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
158*4882a593Smuzhiyun * @mask: pointer to cpumask_var_t where the cpumask is returned
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
161*4882a593Smuzhiyun * a nop (in <linux/cpumask.h>).
162*4882a593Smuzhiyun * Either returns an allocated (zero-filled) cpumask, or causes the
163*4882a593Smuzhiyun * system to panic.
164*4882a593Smuzhiyun */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)165*4882a593Smuzhiyun void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
168*4882a593Smuzhiyun if (!*mask)
169*4882a593Smuzhiyun panic("%s: Failed to allocate %u bytes\n", __func__,
170*4882a593Smuzhiyun cpumask_size());
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun * free_cpumask_var - frees memory allocated for a struct cpumask.
175*4882a593Smuzhiyun * @mask: cpumask to free
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * This is safe on a NULL mask.
178*4882a593Smuzhiyun */
free_cpumask_var(cpumask_var_t mask)179*4882a593Smuzhiyun void free_cpumask_var(cpumask_var_t mask)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun kfree(mask);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun EXPORT_SYMBOL(free_cpumask_var);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
187*4882a593Smuzhiyun * @mask: cpumask to free
188*4882a593Smuzhiyun */
free_bootmem_cpumask_var(cpumask_var_t mask)189*4882a593Smuzhiyun void __init free_bootmem_cpumask_var(cpumask_var_t mask)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun memblock_free_early(__pa(mask), cpumask_size());
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun * cpumask_local_spread - select the i'th cpu with local numa cpu's first
197*4882a593Smuzhiyun * @i: index number
198*4882a593Smuzhiyun * @node: local numa_node
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * This function selects an online CPU according to a numa aware policy;
201*4882a593Smuzhiyun * local cpus are returned first, followed by non-local ones, then it
202*4882a593Smuzhiyun * wraps around.
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * It's not very efficient, but useful for setup.
205*4882a593Smuzhiyun */
cpumask_local_spread(unsigned int i,int node)206*4882a593Smuzhiyun unsigned int cpumask_local_spread(unsigned int i, int node)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun int cpu;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Wrap: we always want a cpu. */
211*4882a593Smuzhiyun i %= num_online_cpus();
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (node == NUMA_NO_NODE) {
214*4882a593Smuzhiyun for_each_cpu(cpu, cpu_online_mask)
215*4882a593Smuzhiyun if (i-- == 0)
216*4882a593Smuzhiyun return cpu;
217*4882a593Smuzhiyun } else {
218*4882a593Smuzhiyun /* NUMA first. */
219*4882a593Smuzhiyun for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
220*4882a593Smuzhiyun if (i-- == 0)
221*4882a593Smuzhiyun return cpu;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun for_each_cpu(cpu, cpu_online_mask) {
224*4882a593Smuzhiyun /* Skip NUMA nodes, done above. */
225*4882a593Smuzhiyun if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
226*4882a593Smuzhiyun continue;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (i-- == 0)
229*4882a593Smuzhiyun return cpu;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun BUG();
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_local_spread);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * Returns an arbitrary cpu within srcp1 & srcp2.
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * Iterated calls using the same srcp1 and srcp2 will be distributed within
242*4882a593Smuzhiyun * their intersection.
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * Returns >= nr_cpu_ids if the intersection is empty.
245*4882a593Smuzhiyun */
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)246*4882a593Smuzhiyun int cpumask_any_and_distribute(const struct cpumask *src1p,
247*4882a593Smuzhiyun const struct cpumask *src2p)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun int next, prev;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* NOTE: our first selection will skip 0. */
252*4882a593Smuzhiyun prev = __this_cpu_read(distribute_cpu_mask_prev);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun next = cpumask_next_and(prev, src1p, src2p);
255*4882a593Smuzhiyun if (next >= nr_cpu_ids)
256*4882a593Smuzhiyun next = cpumask_first_and(src1p, src2p);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (next < nr_cpu_ids)
259*4882a593Smuzhiyun __this_cpu_write(distribute_cpu_mask_prev, next);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return next;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun EXPORT_SYMBOL(cpumask_any_and_distribute);
264