xref: /optee_os/core/include/kernel/refcount.h (revision b97e9666f646ca681890b1f5c61b8d62f0160d34)
1*b97e9666SJens Wiklander /*
2*b97e9666SJens Wiklander  * Copyright (c) 2017, Linaro Limited
3*b97e9666SJens Wiklander  *
4*b97e9666SJens Wiklander  * SPDX-License-Identifier: BSD-2-Clause
5*b97e9666SJens Wiklander  */
6*b97e9666SJens Wiklander 
7*b97e9666SJens Wiklander #ifndef __KERNEL_REFCOUNT_H
8*b97e9666SJens Wiklander #define __KERNEL_REFCOUNT_H
9*b97e9666SJens Wiklander 
10*b97e9666SJens Wiklander #include <atomic.h>
11*b97e9666SJens Wiklander 
12*b97e9666SJens Wiklander /*
13*b97e9666SJens Wiklander  * Reference counter
14*b97e9666SJens Wiklander  *
15*b97e9666SJens Wiklander  * When val is 0, refcount_inc() does not change the value and returns false.
16*b97e9666SJens Wiklander  * Otherwise, it increments the value and returns true.
17*b97e9666SJens Wiklander  *
18*b97e9666SJens Wiklander  * refcount_dec() decrements the value and returns true when the call
19*b97e9666SJens Wiklander  * caused the value to become 0, false otherwise.
20*b97e9666SJens Wiklander  *
21*b97e9666SJens Wiklander  * Since each call to refcount_dec() is supposed to match a call to
22*b97e9666SJens Wiklander  * refcount_inc(), refcount_dec() called for val == 0 should never happen.
23*b97e9666SJens Wiklander  *
24*b97e9666SJens Wiklander  * This behaviour makes this pattern possible:
25*b97e9666SJens Wiklander  * if (!refcount_inc(r)) {
26*b97e9666SJens Wiklander  *	mutex_lock(m);
27*b97e9666SJens Wiklander  *	// Some other thread may have initialized o by now so check that
28*b97e9666SJens Wiklander  *	// we still need to initialize o.
29*b97e9666SJens Wiklander  *	if (!o) {
30*b97e9666SJens Wiklander  *		o = initialize();
31*b97e9666SJens Wiklander  *		refcount_set(r, 1);
32*b97e9666SJens Wiklander  *	}
33*b97e9666SJens Wiklander  *	mutex_unlock(m);
34*b97e9666SJens Wiklander  * }
35*b97e9666SJens Wiklander  *
36*b97e9666SJens Wiklander  * or
37*b97e9666SJens Wiklander  * if (refcount_dec(r)) {
38*b97e9666SJens Wiklander  *	mutex_lock(m);
39*b97e9666SJens Wiklander  *	// Now that we have the mutex o can't be ininialized/uninitialized
40*b97e9666SJens Wiklander  *	// by any other thread, check that the refcount value is still 0
41*b97e9666SJens Wiklander  *	// to guard against the thread above already having reinitialized o
42*b97e9666SJens Wiklander  *	if (!refcount_val(r) && o)
43*b97e9666SJens Wiklander  *		uninitialize(o)
44*b97e9666SJens Wiklander  *	mutex_unlock(m);
45*b97e9666SJens Wiklander  * }
46*b97e9666SJens Wiklander  *
47*b97e9666SJens Wiklander  * where r if the reference counter, o is the object and m the mutex
48*b97e9666SJens Wiklander  * protecting the object.
49*b97e9666SJens Wiklander  */
50*b97e9666SJens Wiklander 
51*b97e9666SJens Wiklander struct refcount {
52*b97e9666SJens Wiklander 	unsigned int val;
53*b97e9666SJens Wiklander };
54*b97e9666SJens Wiklander 
55*b97e9666SJens Wiklander /* Increases refcount by 1, return true if val > 0 else false */
56*b97e9666SJens Wiklander bool refcount_inc(struct refcount *r);
57*b97e9666SJens Wiklander /* Decreases refcount by 1, return true if val == 0 else false */
58*b97e9666SJens Wiklander bool refcount_dec(struct refcount *r);
59*b97e9666SJens Wiklander 
60*b97e9666SJens Wiklander static inline void refcount_set(struct refcount *r, unsigned int val)
61*b97e9666SJens Wiklander {
62*b97e9666SJens Wiklander 	atomic_store_uint(&r->val, val);
63*b97e9666SJens Wiklander }
64*b97e9666SJens Wiklander 
65*b97e9666SJens Wiklander static inline unsigned int refcount_val(struct refcount *r)
66*b97e9666SJens Wiklander {
67*b97e9666SJens Wiklander 	return atomic_load_uint(&r->val);
68*b97e9666SJens Wiklander }
69*b97e9666SJens Wiklander 
70*b97e9666SJens Wiklander #endif /*!__KERNEL_REFCOUNT_H*/
71