xref: /optee_os/core/include/kernel/refcount.h (revision 546291f4de9b76fb6c5ed4dbdbe685a9805cad2a)
1 /*
2  * Copyright (c) 2017, Linaro Limited
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  */
6 
7 #ifndef __KERNEL_REFCOUNT_H
8 #define __KERNEL_REFCOUNT_H
9 
10 #include <atomic.h>
11 
12 /*
13  * Reference counter
14  *
15  * When val is 0, refcount_inc() does not change the value and returns false.
16  * Otherwise, it increments the value and returns true.
17  *
18  * refcount_dec() decrements the value and returns true when the call
19  * caused the value to become 0, false otherwise.
20  *
21  * Since each call to refcount_dec() is supposed to match a call to
22  * refcount_inc(), refcount_dec() called for val == 0 should never happen.
23  *
24  * This behaviour makes this pattern possible:
25  * if (!refcount_inc(r)) {
26  *	mutex_lock(m);
27  *	// Some other thread may have initialized o by now so check that
28  *	// we still need to initialize o.
29  *	if (!o) {
30  *		o = initialize();
31  *		refcount_set(r, 1);
32  *	}
33  *	mutex_unlock(m);
34  * }
35  *
36  * or
37  * if (refcount_dec(r)) {
38  *	mutex_lock(m);
39  *	// Now that we have the mutex o can't be ininialized/uninitialized
40  *	// by any other thread, check that the refcount value is still 0
41  *	// to guard against the thread above already having reinitialized o
42  *	if (!refcount_val(r) && o)
43  *		uninitialize(o)
44  *	mutex_unlock(m);
45  * }
46  *
47  * where r if the reference counter, o is the object and m the mutex
48  * protecting the object.
49  */
50 
51 struct refcount {
52 	unsigned int val;
53 };
54 
55 /* Increases refcount by 1, return true if val > 0 else false */
56 bool refcount_inc(struct refcount *r);
57 /* Decreases refcount by 1, return true if val == 0 else false */
58 bool refcount_dec(struct refcount *r);
59 
60 static inline void refcount_set(struct refcount *r, unsigned int val)
61 {
62 	atomic_store_uint(&r->val, val);
63 }
64 
65 static inline unsigned int refcount_val(struct refcount *r)
66 {
67 	return atomic_load_uint(&r->val);
68 }
69 
70 #endif /*!__KERNEL_REFCOUNT_H*/
71