xref: /OK3568_Linux_fs/kernel/include/linux/percpu-refcount.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Percpu refcounts:
4*4882a593Smuzhiyun  * (C) 2012 Google, Inc.
5*4882a593Smuzhiyun  * Author: Kent Overstreet <koverstreet@google.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
8*4882a593Smuzhiyun  * atomic_dec_and_test() - but percpu.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * There's one important difference between percpu refs and normal atomic_t
11*4882a593Smuzhiyun  * refcounts; you have to keep track of your initial refcount, and then when you
12*4882a593Smuzhiyun  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
13*4882a593Smuzhiyun  * refcount.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
16*4882a593Smuzhiyun  * than an atomic_t - this is because of the way shutdown works, see
17*4882a593Smuzhiyun  * percpu_ref_kill()/PERCPU_COUNT_BIAS.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
20*4882a593Smuzhiyun  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
21*4882a593Smuzhiyun  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22*4882a593Smuzhiyun  * issuing the appropriate barriers, and then marks the ref as shutting down so
23*4882a593Smuzhiyun  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
24*4882a593Smuzhiyun  * it's safe to drop the initial ref.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * USAGE:
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
29*4882a593Smuzhiyun  * is created when userspaces calls io_setup(), and destroyed when userspace
30*4882a593Smuzhiyun  * calls io_destroy() or the process exits.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
33*4882a593Smuzhiyun  * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34*4882a593Smuzhiyun  * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35*4882a593Smuzhiyun  * and it's then safe to drop the initial ref with percpu_ref_put().
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38*4882a593Smuzhiyun  * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
39*4882a593Smuzhiyun  * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40*4882a593Smuzhiyun  * with RCU protection, it must be done explicitly.
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * Code that does a two stage shutdown like this often needs some kind of
43*4882a593Smuzhiyun  * explicit synchronization to ensure the initial refcount can only be dropped
44*4882a593Smuzhiyun  * once - percpu_ref_kill() does this for you, it returns true once and false if
45*4882a593Smuzhiyun  * someone else already called it. The aio code uses it this way, but it's not
46*4882a593Smuzhiyun  * necessary if the code has some other mechanism to synchronize teardown.
47*4882a593Smuzhiyun  * around.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #ifndef _LINUX_PERCPU_REFCOUNT_H
51*4882a593Smuzhiyun #define _LINUX_PERCPU_REFCOUNT_H
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #include <linux/atomic.h>
54*4882a593Smuzhiyun #include <linux/kernel.h>
55*4882a593Smuzhiyun #include <linux/percpu.h>
56*4882a593Smuzhiyun #include <linux/rcupdate.h>
57*4882a593Smuzhiyun #include <linux/gfp.h>
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun struct percpu_ref;
60*4882a593Smuzhiyun typedef void (percpu_ref_func_t)(struct percpu_ref *);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
63*4882a593Smuzhiyun enum {
64*4882a593Smuzhiyun 	__PERCPU_REF_ATOMIC	= 1LU << 0,	/* operating in atomic mode */
65*4882a593Smuzhiyun 	__PERCPU_REF_DEAD	= 1LU << 1,	/* (being) killed */
66*4882a593Smuzhiyun 	__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	__PERCPU_REF_FLAG_BITS	= 2,
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* @flags for percpu_ref_init() */
72*4882a593Smuzhiyun enum {
73*4882a593Smuzhiyun 	/*
74*4882a593Smuzhiyun 	 * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
75*4882a593Smuzhiyun 	 * operation using percpu_ref_switch_to_percpu().  If initialized
76*4882a593Smuzhiyun 	 * with this flag, the ref will stay in atomic mode until
77*4882a593Smuzhiyun 	 * percpu_ref_switch_to_percpu() is invoked on it.
78*4882a593Smuzhiyun 	 * Implies ALLOW_REINIT.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	PERCPU_REF_INIT_ATOMIC	= 1 << 0,
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
84*4882a593Smuzhiyun 	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
85*4882a593Smuzhiyun 	 * ALLOW_REINIT.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	PERCPU_REF_INIT_DEAD	= 1 << 1,
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/*
90*4882a593Smuzhiyun 	 * Allow switching from atomic mode to percpu mode.
91*4882a593Smuzhiyun 	 */
92*4882a593Smuzhiyun 	PERCPU_REF_ALLOW_REINIT	= 1 << 2,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct percpu_ref_data {
96*4882a593Smuzhiyun 	atomic_long_t		count;
97*4882a593Smuzhiyun 	percpu_ref_func_t	*release;
98*4882a593Smuzhiyun 	percpu_ref_func_t	*confirm_switch;
99*4882a593Smuzhiyun 	bool			force_atomic:1;
100*4882a593Smuzhiyun 	bool			allow_reinit:1;
101*4882a593Smuzhiyun 	struct rcu_head		rcu;
102*4882a593Smuzhiyun 	struct percpu_ref	*ref;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct percpu_ref {
106*4882a593Smuzhiyun 	/*
107*4882a593Smuzhiyun 	 * The low bit of the pointer indicates whether the ref is in percpu
108*4882a593Smuzhiyun 	 * mode; if set, then get/put will manipulate the atomic_t.
109*4882a593Smuzhiyun 	 */
110*4882a593Smuzhiyun 	unsigned long		percpu_count_ptr;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * 'percpu_ref' is often embedded into user structure, and only
114*4882a593Smuzhiyun 	 * 'percpu_count_ptr' is required in fast path, move other fields
115*4882a593Smuzhiyun 	 * into 'percpu_ref_data', so we can reduce memory footprint in
116*4882a593Smuzhiyun 	 * fast path.
117*4882a593Smuzhiyun 	 */
118*4882a593Smuzhiyun 	struct percpu_ref_data  *data;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun int __must_check percpu_ref_init(struct percpu_ref *ref,
122*4882a593Smuzhiyun 				 percpu_ref_func_t *release, unsigned int flags,
123*4882a593Smuzhiyun 				 gfp_t gfp);
124*4882a593Smuzhiyun void percpu_ref_exit(struct percpu_ref *ref);
125*4882a593Smuzhiyun void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
126*4882a593Smuzhiyun 				 percpu_ref_func_t *confirm_switch);
127*4882a593Smuzhiyun void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
128*4882a593Smuzhiyun void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
129*4882a593Smuzhiyun void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
130*4882a593Smuzhiyun 				 percpu_ref_func_t *confirm_kill);
131*4882a593Smuzhiyun void percpu_ref_resurrect(struct percpu_ref *ref);
132*4882a593Smuzhiyun void percpu_ref_reinit(struct percpu_ref *ref);
133*4882a593Smuzhiyun bool percpu_ref_is_zero(struct percpu_ref *ref);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * percpu_ref_kill - drop the initial ref
137*4882a593Smuzhiyun  * @ref: percpu_ref to kill
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * Must be used to drop the initial ref on a percpu refcount; must be called
140*4882a593Smuzhiyun  * precisely once before shutdown.
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * Switches @ref into atomic mode before gathering up the percpu counters
143*4882a593Smuzhiyun  * and dropping the initial ref.
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * There are no implied RCU grace periods between kill and release.
146*4882a593Smuzhiyun  */
percpu_ref_kill(struct percpu_ref * ref)147*4882a593Smuzhiyun static inline void percpu_ref_kill(struct percpu_ref *ref)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	percpu_ref_kill_and_confirm(ref, NULL);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  * Internal helper.  Don't use outside percpu-refcount proper.  The
154*4882a593Smuzhiyun  * function doesn't return the pointer and let the caller test it for NULL
155*4882a593Smuzhiyun  * because doing so forces the compiler to generate two conditional
156*4882a593Smuzhiyun  * branches as it can't assume that @ref->percpu_count is not NULL.
157*4882a593Smuzhiyun  */
__ref_is_percpu(struct percpu_ref * ref,unsigned long __percpu ** percpu_countp)158*4882a593Smuzhiyun static inline bool __ref_is_percpu(struct percpu_ref *ref,
159*4882a593Smuzhiyun 					  unsigned long __percpu **percpu_countp)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	unsigned long percpu_ptr;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/*
164*4882a593Smuzhiyun 	 * The value of @ref->percpu_count_ptr is tested for
165*4882a593Smuzhiyun 	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
166*4882a593Smuzhiyun 	 * used as a pointer.  If the compiler generates a separate fetch
167*4882a593Smuzhiyun 	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
168*4882a593Smuzhiyun 	 * between contaminating the pointer value, meaning that
169*4882a593Smuzhiyun 	 * READ_ONCE() is required when fetching it.
170*4882a593Smuzhiyun 	 *
171*4882a593Smuzhiyun 	 * The dependency ordering from the READ_ONCE() pairs
172*4882a593Smuzhiyun 	 * with smp_store_release() in __percpu_ref_switch_to_percpu().
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/*
177*4882a593Smuzhiyun 	 * Theoretically, the following could test just ATOMIC; however,
178*4882a593Smuzhiyun 	 * then we'd have to mask off DEAD separately as DEAD may be
179*4882a593Smuzhiyun 	 * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
180*4882a593Smuzhiyun 	 * implies ATOMIC anyway.  Test them together.
181*4882a593Smuzhiyun 	 */
182*4882a593Smuzhiyun 	if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
183*4882a593Smuzhiyun 		return false;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	*percpu_countp = (unsigned long __percpu *)percpu_ptr;
186*4882a593Smuzhiyun 	return true;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun  * percpu_ref_get_many - increment a percpu refcount
191*4882a593Smuzhiyun  * @ref: percpu_ref to get
192*4882a593Smuzhiyun  * @nr: number of references to get
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * Analogous to atomic_long_add().
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
197*4882a593Smuzhiyun  */
percpu_ref_get_many(struct percpu_ref * ref,unsigned long nr)198*4882a593Smuzhiyun static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	unsigned long __percpu *percpu_count;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	rcu_read_lock();
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (__ref_is_percpu(ref, &percpu_count))
205*4882a593Smuzhiyun 		this_cpu_add(*percpu_count, nr);
206*4882a593Smuzhiyun 	else
207*4882a593Smuzhiyun 		atomic_long_add(nr, &ref->data->count);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	rcu_read_unlock();
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun  * percpu_ref_get - increment a percpu refcount
214*4882a593Smuzhiyun  * @ref: percpu_ref to get
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  * Analagous to atomic_long_inc().
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
219*4882a593Smuzhiyun  */
percpu_ref_get(struct percpu_ref * ref)220*4882a593Smuzhiyun static inline void percpu_ref_get(struct percpu_ref *ref)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	percpu_ref_get_many(ref, 1);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /**
226*4882a593Smuzhiyun  * percpu_ref_tryget_many - try to increment a percpu refcount
227*4882a593Smuzhiyun  * @ref: percpu_ref to try-get
228*4882a593Smuzhiyun  * @nr: number of references to get
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * Increment a percpu refcount  by @nr unless its count already reached zero.
231*4882a593Smuzhiyun  * Returns %true on success; %false on failure.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
234*4882a593Smuzhiyun  */
percpu_ref_tryget_many(struct percpu_ref * ref,unsigned long nr)235*4882a593Smuzhiyun static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
236*4882a593Smuzhiyun 					  unsigned long nr)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	unsigned long __percpu *percpu_count;
239*4882a593Smuzhiyun 	bool ret;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	rcu_read_lock();
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (__ref_is_percpu(ref, &percpu_count)) {
244*4882a593Smuzhiyun 		this_cpu_add(*percpu_count, nr);
245*4882a593Smuzhiyun 		ret = true;
246*4882a593Smuzhiyun 	} else {
247*4882a593Smuzhiyun 		ret = atomic_long_add_unless(&ref->data->count, nr, 0);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	rcu_read_unlock();
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun  * percpu_ref_tryget - try to increment a percpu refcount
257*4882a593Smuzhiyun  * @ref: percpu_ref to try-get
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Increment a percpu refcount unless its count already reached zero.
260*4882a593Smuzhiyun  * Returns %true on success; %false on failure.
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
263*4882a593Smuzhiyun  */
percpu_ref_tryget(struct percpu_ref * ref)264*4882a593Smuzhiyun static inline bool percpu_ref_tryget(struct percpu_ref *ref)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return percpu_ref_tryget_many(ref, 1);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun  * percpu_ref_tryget_live - try to increment a live percpu refcount
271*4882a593Smuzhiyun  * @ref: percpu_ref to try-get
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * Increment a percpu refcount unless it has already been killed.  Returns
274*4882a593Smuzhiyun  * %true on success; %false on failure.
275*4882a593Smuzhiyun  *
276*4882a593Smuzhiyun  * Completion of percpu_ref_kill() in itself doesn't guarantee that this
277*4882a593Smuzhiyun  * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
278*4882a593Smuzhiyun  * should be used.  After the confirm_kill callback is invoked, it's
279*4882a593Smuzhiyun  * guaranteed that no new reference will be given out by
280*4882a593Smuzhiyun  * percpu_ref_tryget_live().
281*4882a593Smuzhiyun  *
282*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
283*4882a593Smuzhiyun  */
percpu_ref_tryget_live(struct percpu_ref * ref)284*4882a593Smuzhiyun static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	unsigned long __percpu *percpu_count;
287*4882a593Smuzhiyun 	bool ret = false;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	rcu_read_lock();
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (__ref_is_percpu(ref, &percpu_count)) {
292*4882a593Smuzhiyun 		this_cpu_inc(*percpu_count);
293*4882a593Smuzhiyun 		ret = true;
294*4882a593Smuzhiyun 	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
295*4882a593Smuzhiyun 		ret = atomic_long_inc_not_zero(&ref->data->count);
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	rcu_read_unlock();
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	return ret;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun  * percpu_ref_put_many - decrement a percpu refcount
305*4882a593Smuzhiyun  * @ref: percpu_ref to put
306*4882a593Smuzhiyun  * @nr: number of references to put
307*4882a593Smuzhiyun  *
308*4882a593Smuzhiyun  * Decrement the refcount, and if 0, call the release function (which was passed
309*4882a593Smuzhiyun  * to percpu_ref_init())
310*4882a593Smuzhiyun  *
311*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
312*4882a593Smuzhiyun  */
percpu_ref_put_many(struct percpu_ref * ref,unsigned long nr)313*4882a593Smuzhiyun static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	unsigned long __percpu *percpu_count;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	rcu_read_lock();
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (__ref_is_percpu(ref, &percpu_count))
320*4882a593Smuzhiyun 		this_cpu_sub(*percpu_count, nr);
321*4882a593Smuzhiyun 	else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
322*4882a593Smuzhiyun 		ref->data->release(ref);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	rcu_read_unlock();
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun  * percpu_ref_put - decrement a percpu refcount
329*4882a593Smuzhiyun  * @ref: percpu_ref to put
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * Decrement the refcount, and if 0, call the release function (which was passed
332*4882a593Smuzhiyun  * to percpu_ref_init())
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit.
335*4882a593Smuzhiyun  */
percpu_ref_put(struct percpu_ref * ref)336*4882a593Smuzhiyun static inline void percpu_ref_put(struct percpu_ref *ref)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	percpu_ref_put_many(ref, 1);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun  * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
343*4882a593Smuzhiyun  * @ref: percpu_ref to test
344*4882a593Smuzhiyun  *
345*4882a593Smuzhiyun  * Returns %true if @ref is dying or dead.
346*4882a593Smuzhiyun  *
347*4882a593Smuzhiyun  * This function is safe to call as long as @ref is between init and exit
348*4882a593Smuzhiyun  * and the caller is responsible for synchronizing against state changes.
349*4882a593Smuzhiyun  */
percpu_ref_is_dying(struct percpu_ref * ref)350*4882a593Smuzhiyun static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun #endif
356