xref: /OK3568_Linux_fs/kernel/lib/refcount.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Out-of-line refcount functions.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/mutex.h>
7*4882a593Smuzhiyun #include <linux/refcount.h>
8*4882a593Smuzhiyun #include <linux/spinlock.h>
9*4882a593Smuzhiyun #include <linux/bug.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
12*4882a593Smuzhiyun 
refcount_warn_saturate(refcount_t * r,enum refcount_saturation_type t)13*4882a593Smuzhiyun void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	refcount_set(r, REFCOUNT_SATURATED);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	switch (t) {
18*4882a593Smuzhiyun 	case REFCOUNT_ADD_NOT_ZERO_OVF:
19*4882a593Smuzhiyun 		REFCOUNT_WARN("saturated; leaking memory");
20*4882a593Smuzhiyun 		break;
21*4882a593Smuzhiyun 	case REFCOUNT_ADD_OVF:
22*4882a593Smuzhiyun 		REFCOUNT_WARN("saturated; leaking memory");
23*4882a593Smuzhiyun 		break;
24*4882a593Smuzhiyun 	case REFCOUNT_ADD_UAF:
25*4882a593Smuzhiyun 		REFCOUNT_WARN("addition on 0; use-after-free");
26*4882a593Smuzhiyun 		break;
27*4882a593Smuzhiyun 	case REFCOUNT_SUB_UAF:
28*4882a593Smuzhiyun 		REFCOUNT_WARN("underflow; use-after-free");
29*4882a593Smuzhiyun 		break;
30*4882a593Smuzhiyun 	case REFCOUNT_DEC_LEAK:
31*4882a593Smuzhiyun 		REFCOUNT_WARN("decrement hit 0; leaking memory");
32*4882a593Smuzhiyun 		break;
33*4882a593Smuzhiyun 	default:
34*4882a593Smuzhiyun 		REFCOUNT_WARN("unknown saturation event!?");
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_warn_saturate);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun  * refcount_dec_if_one - decrement a refcount if it is 1
41*4882a593Smuzhiyun  * @r: the refcount
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
44*4882a593Smuzhiyun  * success thereof.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Like all decrement operations, it provides release memory order and provides
47*4882a593Smuzhiyun  * a control dependency.
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * It can be used like a try-delete operator; this explicit case is provided
50*4882a593Smuzhiyun  * and not cmpxchg in generic, because that would allow implementing unsafe
51*4882a593Smuzhiyun  * operations.
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Return: true if the resulting refcount is 0, false otherwise
54*4882a593Smuzhiyun  */
refcount_dec_if_one(refcount_t * r)55*4882a593Smuzhiyun bool refcount_dec_if_one(refcount_t *r)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int val = 1;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_dec_if_one);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /**
64*4882a593Smuzhiyun  * refcount_dec_not_one - decrement a refcount if it is not 1
65*4882a593Smuzhiyun  * @r: the refcount
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * No atomic_t counterpart, it decrements unless the value is 1, in which case
68*4882a593Smuzhiyun  * it will return false.
69*4882a593Smuzhiyun  *
70*4882a593Smuzhiyun  * Was often done like: atomic_add_unless(&var, -1, 1)
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  * Return: true if the decrement operation was successful, false otherwise
73*4882a593Smuzhiyun  */
refcount_dec_not_one(refcount_t * r)74*4882a593Smuzhiyun bool refcount_dec_not_one(refcount_t *r)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	unsigned int new, val = atomic_read(&r->refs);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	do {
79*4882a593Smuzhiyun 		if (unlikely(val == REFCOUNT_SATURATED))
80*4882a593Smuzhiyun 			return true;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		if (val == 1)
83*4882a593Smuzhiyun 			return false;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		new = val - 1;
86*4882a593Smuzhiyun 		if (new > val) {
87*4882a593Smuzhiyun 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
88*4882a593Smuzhiyun 			return true;
89*4882a593Smuzhiyun 		}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return true;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_dec_not_one);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun  * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
99*4882a593Smuzhiyun  *                               refcount to 0
100*4882a593Smuzhiyun  * @r: the refcount
101*4882a593Smuzhiyun  * @lock: the mutex to be locked
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
104*4882a593Smuzhiyun  * to decrement when saturated at REFCOUNT_SATURATED.
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Provides release memory ordering, such that prior loads and stores are done
107*4882a593Smuzhiyun  * before, and provides a control dependency such that free() must come after.
108*4882a593Smuzhiyun  * See the comment on top.
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * Return: true and hold mutex if able to decrement refcount to 0, false
111*4882a593Smuzhiyun  *         otherwise
112*4882a593Smuzhiyun  */
refcount_dec_and_mutex_lock(refcount_t * r,struct mutex * lock)113*4882a593Smuzhiyun bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	if (refcount_dec_not_one(r))
116*4882a593Smuzhiyun 		return false;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	mutex_lock(lock);
119*4882a593Smuzhiyun 	if (!refcount_dec_and_test(r)) {
120*4882a593Smuzhiyun 		mutex_unlock(lock);
121*4882a593Smuzhiyun 		return false;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return true;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun  * refcount_dec_and_lock - return holding spinlock if able to decrement
130*4882a593Smuzhiyun  *                         refcount to 0
131*4882a593Smuzhiyun  * @r: the refcount
132*4882a593Smuzhiyun  * @lock: the spinlock to be locked
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
135*4882a593Smuzhiyun  * decrement when saturated at REFCOUNT_SATURATED.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Provides release memory ordering, such that prior loads and stores are done
138*4882a593Smuzhiyun  * before, and provides a control dependency such that free() must come after.
139*4882a593Smuzhiyun  * See the comment on top.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Return: true and hold spinlock if able to decrement refcount to 0, false
142*4882a593Smuzhiyun  *         otherwise
143*4882a593Smuzhiyun  */
refcount_dec_and_lock(refcount_t * r,spinlock_t * lock)144*4882a593Smuzhiyun bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	if (refcount_dec_not_one(r))
147*4882a593Smuzhiyun 		return false;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	spin_lock(lock);
150*4882a593Smuzhiyun 	if (!refcount_dec_and_test(r)) {
151*4882a593Smuzhiyun 		spin_unlock(lock);
152*4882a593Smuzhiyun 		return false;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return true;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_dec_and_lock);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
161*4882a593Smuzhiyun  *                                 interrupts if able to decrement refcount to 0
162*4882a593Smuzhiyun  * @r: the refcount
163*4882a593Smuzhiyun  * @lock: the spinlock to be locked
164*4882a593Smuzhiyun  * @flags: saved IRQ-flags if the is acquired
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * Same as refcount_dec_and_lock() above except that the spinlock is acquired
167*4882a593Smuzhiyun  * with disabled interupts.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * Return: true and hold spinlock if able to decrement refcount to 0, false
170*4882a593Smuzhiyun  *         otherwise
171*4882a593Smuzhiyun  */
refcount_dec_and_lock_irqsave(refcount_t * r,spinlock_t * lock,unsigned long * flags)172*4882a593Smuzhiyun bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
173*4882a593Smuzhiyun 				   unsigned long *flags)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	if (refcount_dec_not_one(r))
176*4882a593Smuzhiyun 		return false;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	spin_lock_irqsave(lock, *flags);
179*4882a593Smuzhiyun 	if (!refcount_dec_and_test(r)) {
180*4882a593Smuzhiyun 		spin_unlock_irqrestore(lock, *flags);
181*4882a593Smuzhiyun 		return false;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return true;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
187