1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #define pr_fmt(fmt) "%s: " fmt, __func__
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun #include <linux/wait.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/percpu-refcount.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * Initially, a percpu refcount is just a set of percpu counters. Initially, we
12*4882a593Smuzhiyun * don't try to detect the ref hitting 0 - which means that get/put can just
13*4882a593Smuzhiyun * increment or decrement the local counter. Note that the counter on a
14*4882a593Smuzhiyun * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
15*4882a593Smuzhiyun * percpu counters will all sum to the correct value
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * (More precisely: because modular arithmetic is commutative the sum of all the
18*4882a593Smuzhiyun * percpu_count vars will be equal to what it would have been if all the gets
19*4882a593Smuzhiyun * and puts were done to a single integer, even if some of the percpu integers
20*4882a593Smuzhiyun * overflow or underflow).
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * The real trick to implementing percpu refcounts is shutdown. We can't detect
23*4882a593Smuzhiyun * the ref hitting 0 on every put - this would require global synchronization
24*4882a593Smuzhiyun * and defeat the whole purpose of using percpu refs.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * What we do is require the user to keep track of the initial refcount; we know
27*4882a593Smuzhiyun * the ref can't hit 0 before the user drops the initial ref, so as long as we
28*4882a593Smuzhiyun * convert to non percpu mode before the initial ref is dropped everything
29*4882a593Smuzhiyun * works.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Converting to non percpu mode is done with some RCUish stuff in
32*4882a593Smuzhiyun * percpu_ref_kill. Additionally, we need a bias value so that the
33*4882a593Smuzhiyun * atomic_long_t can't hit 0 before we've added up all the percpu refs.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static DEFINE_SPINLOCK(percpu_ref_switch_lock);
39*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
40*4882a593Smuzhiyun
percpu_count_ptr(struct percpu_ref * ref)41*4882a593Smuzhiyun static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun return (unsigned long __percpu *)
44*4882a593Smuzhiyun (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * percpu_ref_init - initialize a percpu refcount
49*4882a593Smuzhiyun * @ref: percpu_ref to initialize
50*4882a593Smuzhiyun * @release: function which will be called when refcount hits 0
51*4882a593Smuzhiyun * @flags: PERCPU_REF_INIT_* flags
52*4882a593Smuzhiyun * @gfp: allocation mask to use
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
55*4882a593Smuzhiyun * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
56*4882a593Smuzhiyun * change the start state to atomic with the latter setting the initial refcount
57*4882a593Smuzhiyun * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Note that @release must not sleep - it may potentially be called from RCU
60*4882a593Smuzhiyun * callback context by percpu_ref_kill().
61*4882a593Smuzhiyun */
percpu_ref_init(struct percpu_ref * ref,percpu_ref_func_t * release,unsigned int flags,gfp_t gfp)62*4882a593Smuzhiyun int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
63*4882a593Smuzhiyun unsigned int flags, gfp_t gfp)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
66*4882a593Smuzhiyun __alignof__(unsigned long));
67*4882a593Smuzhiyun unsigned long start_count = 0;
68*4882a593Smuzhiyun struct percpu_ref_data *data;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun ref->percpu_count_ptr = (unsigned long)
71*4882a593Smuzhiyun __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
72*4882a593Smuzhiyun if (!ref->percpu_count_ptr)
73*4882a593Smuzhiyun return -ENOMEM;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun data = kzalloc(sizeof(*ref->data), gfp);
76*4882a593Smuzhiyun if (!data) {
77*4882a593Smuzhiyun free_percpu((void __percpu *)ref->percpu_count_ptr);
78*4882a593Smuzhiyun ref->percpu_count_ptr = 0;
79*4882a593Smuzhiyun return -ENOMEM;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
83*4882a593Smuzhiyun data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
86*4882a593Smuzhiyun ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
87*4882a593Smuzhiyun data->allow_reinit = true;
88*4882a593Smuzhiyun } else {
89*4882a593Smuzhiyun start_count += PERCPU_COUNT_BIAS;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (flags & PERCPU_REF_INIT_DEAD)
93*4882a593Smuzhiyun ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
94*4882a593Smuzhiyun else
95*4882a593Smuzhiyun start_count++;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun atomic_long_set(&data->count, start_count);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun data->release = release;
100*4882a593Smuzhiyun data->confirm_switch = NULL;
101*4882a593Smuzhiyun data->ref = ref;
102*4882a593Smuzhiyun ref->data = data;
103*4882a593Smuzhiyun return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_init);
106*4882a593Smuzhiyun
__percpu_ref_exit(struct percpu_ref * ref)107*4882a593Smuzhiyun static void __percpu_ref_exit(struct percpu_ref *ref)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (percpu_count) {
112*4882a593Smuzhiyun /* non-NULL confirm_switch indicates switching in progress */
113*4882a593Smuzhiyun WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
114*4882a593Smuzhiyun free_percpu(percpu_count);
115*4882a593Smuzhiyun ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * percpu_ref_exit - undo percpu_ref_init()
121*4882a593Smuzhiyun * @ref: percpu_ref to exit
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * This function exits @ref. The caller is responsible for ensuring that
124*4882a593Smuzhiyun * @ref is no longer in active use. The usual places to invoke this
125*4882a593Smuzhiyun * function from are the @ref->release() callback or in init failure path
126*4882a593Smuzhiyun * where percpu_ref_init() succeeded but other parts of the initialization
127*4882a593Smuzhiyun * of the embedding object failed.
128*4882a593Smuzhiyun */
percpu_ref_exit(struct percpu_ref * ref)129*4882a593Smuzhiyun void percpu_ref_exit(struct percpu_ref *ref)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct percpu_ref_data *data = ref->data;
132*4882a593Smuzhiyun unsigned long flags;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun __percpu_ref_exit(ref);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (!data)
137*4882a593Smuzhiyun return;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
140*4882a593Smuzhiyun ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
141*4882a593Smuzhiyun __PERCPU_REF_FLAG_BITS;
142*4882a593Smuzhiyun ref->data = NULL;
143*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun kfree(data);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_exit);
148*4882a593Smuzhiyun
percpu_ref_call_confirm_rcu(struct rcu_head * rcu)149*4882a593Smuzhiyun static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct percpu_ref_data *data = container_of(rcu,
152*4882a593Smuzhiyun struct percpu_ref_data, rcu);
153*4882a593Smuzhiyun struct percpu_ref *ref = data->ref;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun data->confirm_switch(ref);
156*4882a593Smuzhiyun data->confirm_switch = NULL;
157*4882a593Smuzhiyun wake_up_all(&percpu_ref_switch_waitq);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (!data->allow_reinit)
160*4882a593Smuzhiyun __percpu_ref_exit(ref);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* drop ref from percpu_ref_switch_to_atomic() */
163*4882a593Smuzhiyun percpu_ref_put(ref);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
percpu_ref_switch_to_atomic_rcu(struct rcu_head * rcu)166*4882a593Smuzhiyun static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct percpu_ref_data *data = container_of(rcu,
169*4882a593Smuzhiyun struct percpu_ref_data, rcu);
170*4882a593Smuzhiyun struct percpu_ref *ref = data->ref;
171*4882a593Smuzhiyun unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
172*4882a593Smuzhiyun unsigned long count = 0;
173*4882a593Smuzhiyun int cpu;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun for_each_possible_cpu(cpu)
176*4882a593Smuzhiyun count += *per_cpu_ptr(percpu_count, cpu);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun pr_debug("global %lu percpu %lu\n",
179*4882a593Smuzhiyun atomic_long_read(&data->count), count);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * It's crucial that we sum the percpu counters _before_ adding the sum
183*4882a593Smuzhiyun * to &ref->count; since gets could be happening on one cpu while puts
184*4882a593Smuzhiyun * happen on another, adding a single cpu's count could cause
185*4882a593Smuzhiyun * @ref->count to hit 0 before we've got a consistent value - but the
186*4882a593Smuzhiyun * sum of all the counts will be consistent and correct.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * Subtracting the bias value then has to happen _after_ adding count to
189*4882a593Smuzhiyun * &ref->count; we need the bias value to prevent &ref->count from
190*4882a593Smuzhiyun * reaching 0 before we add the percpu counts. But doing it at the same
191*4882a593Smuzhiyun * time is equivalent and saves us atomic operations:
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun WARN_ONCE(atomic_long_read(&data->count) <= 0,
196*4882a593Smuzhiyun "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
197*4882a593Smuzhiyun data->release, atomic_long_read(&data->count));
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* @ref is viewed as dead on all CPUs, send out switch confirmation */
200*4882a593Smuzhiyun percpu_ref_call_confirm_rcu(rcu);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
percpu_ref_noop_confirm_switch(struct percpu_ref * ref)203*4882a593Smuzhiyun static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
__percpu_ref_switch_to_atomic(struct percpu_ref * ref,percpu_ref_func_t * confirm_switch)207*4882a593Smuzhiyun static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
208*4882a593Smuzhiyun percpu_ref_func_t *confirm_switch)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
211*4882a593Smuzhiyun if (confirm_switch)
212*4882a593Smuzhiyun confirm_switch(ref);
213*4882a593Smuzhiyun return;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* switching from percpu to atomic */
217*4882a593Smuzhiyun ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * Non-NULL ->confirm_switch is used to indicate that switching is
221*4882a593Smuzhiyun * in progress. Use noop one if unspecified.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun ref->data->confirm_switch = confirm_switch ?:
224*4882a593Smuzhiyun percpu_ref_noop_confirm_switch;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun percpu_ref_get(ref); /* put after confirmation */
227*4882a593Smuzhiyun call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
__percpu_ref_switch_to_percpu(struct percpu_ref * ref)230*4882a593Smuzhiyun static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
233*4882a593Smuzhiyun int cpu;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun BUG_ON(!percpu_count);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
238*4882a593Smuzhiyun return;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (WARN_ON_ONCE(!ref->data->allow_reinit))
241*4882a593Smuzhiyun return;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * Restore per-cpu operation. smp_store_release() is paired
247*4882a593Smuzhiyun * with READ_ONCE() in __ref_is_percpu() and guarantees that the
248*4882a593Smuzhiyun * zeroing is visible to all percpu accesses which can see the
249*4882a593Smuzhiyun * following __PERCPU_REF_ATOMIC clearing.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun for_each_possible_cpu(cpu)
252*4882a593Smuzhiyun *per_cpu_ptr(percpu_count, cpu) = 0;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun smp_store_release(&ref->percpu_count_ptr,
255*4882a593Smuzhiyun ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
__percpu_ref_switch_mode(struct percpu_ref * ref,percpu_ref_func_t * confirm_switch)258*4882a593Smuzhiyun static void __percpu_ref_switch_mode(struct percpu_ref *ref,
259*4882a593Smuzhiyun percpu_ref_func_t *confirm_switch)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct percpu_ref_data *data = ref->data;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun lockdep_assert_held(&percpu_ref_switch_lock);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * If the previous ATOMIC switching hasn't finished yet, wait for
267*4882a593Smuzhiyun * its completion. If the caller ensures that ATOMIC switching
268*4882a593Smuzhiyun * isn't in progress, this function can be called from any context.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
271*4882a593Smuzhiyun percpu_ref_switch_lock);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
274*4882a593Smuzhiyun __percpu_ref_switch_to_atomic(ref, confirm_switch);
275*4882a593Smuzhiyun else
276*4882a593Smuzhiyun __percpu_ref_switch_to_percpu(ref);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
281*4882a593Smuzhiyun * @ref: percpu_ref to switch to atomic mode
282*4882a593Smuzhiyun * @confirm_switch: optional confirmation callback
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * There's no reason to use this function for the usual reference counting.
285*4882a593Smuzhiyun * Use percpu_ref_kill[_and_confirm]().
286*4882a593Smuzhiyun *
287*4882a593Smuzhiyun * Schedule switching of @ref to atomic mode. All its percpu counts will
288*4882a593Smuzhiyun * be collected to the main atomic counter. On completion, when all CPUs
289*4882a593Smuzhiyun * are guaraneed to be in atomic mode, @confirm_switch, which may not
290*4882a593Smuzhiyun * block, is invoked. This function may be invoked concurrently with all
291*4882a593Smuzhiyun * the get/put operations and can safely be mixed with kill and reinit
292*4882a593Smuzhiyun * operations. Note that @ref will stay in atomic mode across kill/reinit
293*4882a593Smuzhiyun * cycles until percpu_ref_switch_to_percpu() is called.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * This function may block if @ref is in the process of switching to atomic
296*4882a593Smuzhiyun * mode. If the caller ensures that @ref is not in the process of
297*4882a593Smuzhiyun * switching to atomic mode, this function can be called from any context.
298*4882a593Smuzhiyun */
percpu_ref_switch_to_atomic(struct percpu_ref * ref,percpu_ref_func_t * confirm_switch)299*4882a593Smuzhiyun void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
300*4882a593Smuzhiyun percpu_ref_func_t *confirm_switch)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun unsigned long flags;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun ref->data->force_atomic = true;
307*4882a593Smuzhiyun __percpu_ref_switch_mode(ref, confirm_switch);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /**
314*4882a593Smuzhiyun * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
315*4882a593Smuzhiyun * @ref: percpu_ref to switch to atomic mode
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * Schedule switching the ref to atomic mode, and wait for the
318*4882a593Smuzhiyun * switch to complete. Caller must ensure that no other thread
319*4882a593Smuzhiyun * will switch back to percpu mode.
320*4882a593Smuzhiyun */
percpu_ref_switch_to_atomic_sync(struct percpu_ref * ref)321*4882a593Smuzhiyun void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun percpu_ref_switch_to_atomic(ref, NULL);
324*4882a593Smuzhiyun wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
330*4882a593Smuzhiyun * @ref: percpu_ref to switch to percpu mode
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * There's no reason to use this function for the usual reference counting.
333*4882a593Smuzhiyun * To re-use an expired ref, use percpu_ref_reinit().
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * Switch @ref to percpu mode. This function may be invoked concurrently
336*4882a593Smuzhiyun * with all the get/put operations and can safely be mixed with kill and
337*4882a593Smuzhiyun * reinit operations. This function reverses the sticky atomic state set
338*4882a593Smuzhiyun * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
339*4882a593Smuzhiyun * dying or dead, the actual switching takes place on the following
340*4882a593Smuzhiyun * percpu_ref_reinit().
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * This function may block if @ref is in the process of switching to atomic
343*4882a593Smuzhiyun * mode. If the caller ensures that @ref is not in the process of
344*4882a593Smuzhiyun * switching to atomic mode, this function can be called from any context.
345*4882a593Smuzhiyun */
percpu_ref_switch_to_percpu(struct percpu_ref * ref)346*4882a593Smuzhiyun void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun unsigned long flags;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ref->data->force_atomic = false;
353*4882a593Smuzhiyun __percpu_ref_switch_mode(ref, NULL);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /**
360*4882a593Smuzhiyun * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
361*4882a593Smuzhiyun * @ref: percpu_ref to kill
362*4882a593Smuzhiyun * @confirm_kill: optional confirmation callback
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
365*4882a593Smuzhiyun * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
366*4882a593Smuzhiyun * called after @ref is seen as dead from all CPUs at which point all
367*4882a593Smuzhiyun * further invocations of percpu_ref_tryget_live() will fail. See
368*4882a593Smuzhiyun * percpu_ref_tryget_live() for details.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * This function normally doesn't block and can be called from any context
371*4882a593Smuzhiyun * but it may block if @confirm_kill is specified and @ref is in the
372*4882a593Smuzhiyun * process of switching to atomic mode by percpu_ref_switch_to_atomic().
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * There are no implied RCU grace periods between kill and release.
375*4882a593Smuzhiyun */
percpu_ref_kill_and_confirm(struct percpu_ref * ref,percpu_ref_func_t * confirm_kill)376*4882a593Smuzhiyun void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
377*4882a593Smuzhiyun percpu_ref_func_t *confirm_kill)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun unsigned long flags;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
384*4882a593Smuzhiyun "%s called more than once on %ps!", __func__,
385*4882a593Smuzhiyun ref->data->release);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
388*4882a593Smuzhiyun __percpu_ref_switch_mode(ref, confirm_kill);
389*4882a593Smuzhiyun percpu_ref_put(ref);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * percpu_ref_is_zero - test whether a percpu refcount reached zero
397*4882a593Smuzhiyun * @ref: percpu_ref to test
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * Returns %true if @ref reached zero.
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * This function is safe to call as long as @ref is between init and exit.
402*4882a593Smuzhiyun */
percpu_ref_is_zero(struct percpu_ref * ref)403*4882a593Smuzhiyun bool percpu_ref_is_zero(struct percpu_ref *ref)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun unsigned long __percpu *percpu_count;
406*4882a593Smuzhiyun unsigned long count, flags;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (__ref_is_percpu(ref, &percpu_count))
409*4882a593Smuzhiyun return false;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* protect us from being destroyed */
412*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
413*4882a593Smuzhiyun if (ref->data)
414*4882a593Smuzhiyun count = atomic_long_read(&ref->data->count);
415*4882a593Smuzhiyun else
416*4882a593Smuzhiyun count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
417*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun return count == 0;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun * percpu_ref_reinit - re-initialize a percpu refcount
425*4882a593Smuzhiyun * @ref: perpcu_ref to re-initialize
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * Re-initialize @ref so that it's in the same state as when it finished
428*4882a593Smuzhiyun * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
429*4882a593Smuzhiyun * initialized successfully and reached 0 but not exited.
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
432*4882a593Smuzhiyun * this function is in progress.
433*4882a593Smuzhiyun */
percpu_ref_reinit(struct percpu_ref * ref)434*4882a593Smuzhiyun void percpu_ref_reinit(struct percpu_ref *ref)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun WARN_ON_ONCE(!percpu_ref_is_zero(ref));
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun percpu_ref_resurrect(ref);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_reinit);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun * percpu_ref_resurrect - modify a percpu refcount from dead to live
444*4882a593Smuzhiyun * @ref: perpcu_ref to resurrect
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * Modify @ref so that it's in the same state as before percpu_ref_kill() was
447*4882a593Smuzhiyun * called. @ref must be dead but must not yet have exited.
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * If @ref->release() frees @ref then the caller is responsible for
450*4882a593Smuzhiyun * guaranteeing that @ref->release() does not get called while this
451*4882a593Smuzhiyun * function is in progress.
452*4882a593Smuzhiyun *
453*4882a593Smuzhiyun * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
454*4882a593Smuzhiyun * this function is in progress.
455*4882a593Smuzhiyun */
percpu_ref_resurrect(struct percpu_ref * ref)456*4882a593Smuzhiyun void percpu_ref_resurrect(struct percpu_ref *ref)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun unsigned long __percpu *percpu_count;
459*4882a593Smuzhiyun unsigned long flags;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun spin_lock_irqsave(&percpu_ref_switch_lock, flags);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
464*4882a593Smuzhiyun WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
467*4882a593Smuzhiyun percpu_ref_get(ref);
468*4882a593Smuzhiyun __percpu_ref_switch_mode(ref, NULL);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
473