1*fb7ef469SJerome Forissier /* SPDX-License-Identifier: BSD-2-Clause */
2b97e9666SJens Wiklander /*
3b97e9666SJens Wiklander * Copyright (c) 2017, Linaro Limited
4b97e9666SJens Wiklander */
5b97e9666SJens Wiklander
6b97e9666SJens Wiklander #ifndef __KERNEL_REFCOUNT_H
7b97e9666SJens Wiklander #define __KERNEL_REFCOUNT_H
8b97e9666SJens Wiklander
9b97e9666SJens Wiklander #include <atomic.h>
10b97e9666SJens Wiklander
11b97e9666SJens Wiklander /*
12b97e9666SJens Wiklander * Reference counter
13b97e9666SJens Wiklander *
14b97e9666SJens Wiklander * When val is 0, refcount_inc() does not change the value and returns false.
15b97e9666SJens Wiklander * Otherwise, it increments the value and returns true.
16b97e9666SJens Wiklander *
17b97e9666SJens Wiklander * refcount_dec() decrements the value and returns true when the call
18b97e9666SJens Wiklander * caused the value to become 0, false otherwise.
19b97e9666SJens Wiklander *
20b97e9666SJens Wiklander * Since each call to refcount_dec() is supposed to match a call to
21b97e9666SJens Wiklander * refcount_inc(), refcount_dec() called for val == 0 should never happen.
22b97e9666SJens Wiklander *
23b97e9666SJens Wiklander * This behaviour makes this pattern possible:
24b97e9666SJens Wiklander * if (!refcount_inc(r)) {
25b97e9666SJens Wiklander * mutex_lock(m);
26b97e9666SJens Wiklander * // Some other thread may have initialized o by now so check that
27b97e9666SJens Wiklander * // we still need to initialize o.
28b97e9666SJens Wiklander * if (!o) {
29b97e9666SJens Wiklander * o = initialize();
30b97e9666SJens Wiklander * refcount_set(r, 1);
31b97e9666SJens Wiklander * }
32b97e9666SJens Wiklander * mutex_unlock(m);
33b97e9666SJens Wiklander * }
34b97e9666SJens Wiklander *
35b97e9666SJens Wiklander * or
36b97e9666SJens Wiklander * if (refcount_dec(r)) {
37b97e9666SJens Wiklander * mutex_lock(m);
38b97e9666SJens Wiklander * // Now that we have the mutex o can't be ininialized/uninitialized
39b97e9666SJens Wiklander * // by any other thread, check that the refcount value is still 0
40b97e9666SJens Wiklander * // to guard against the thread above already having reinitialized o
41b97e9666SJens Wiklander * if (!refcount_val(r) && o)
42b97e9666SJens Wiklander * uninitialize(o)
43b97e9666SJens Wiklander * mutex_unlock(m);
44b97e9666SJens Wiklander * }
45b97e9666SJens Wiklander *
46b97e9666SJens Wiklander * where r if the reference counter, o is the object and m the mutex
47b97e9666SJens Wiklander * protecting the object.
48b97e9666SJens Wiklander */
49b97e9666SJens Wiklander
50b97e9666SJens Wiklander struct refcount {
51b97e9666SJens Wiklander unsigned int val;
52b97e9666SJens Wiklander };
53b97e9666SJens Wiklander
54b97e9666SJens Wiklander /* Increases refcount by 1, return true if val > 0 else false */
55b97e9666SJens Wiklander bool refcount_inc(struct refcount *r);
56b97e9666SJens Wiklander /* Decreases refcount by 1, return true if val == 0 else false */
57b97e9666SJens Wiklander bool refcount_dec(struct refcount *r);
58b97e9666SJens Wiklander
refcount_set(struct refcount * r,unsigned int val)59b97e9666SJens Wiklander static inline void refcount_set(struct refcount *r, unsigned int val)
60b97e9666SJens Wiklander {
61b97e9666SJens Wiklander atomic_store_uint(&r->val, val);
62b97e9666SJens Wiklander }
63b97e9666SJens Wiklander
refcount_val(struct refcount * r)64b97e9666SJens Wiklander static inline unsigned int refcount_val(struct refcount *r)
65b97e9666SJens Wiklander {
66b97e9666SJens Wiklander return atomic_load_uint(&r->val);
67b97e9666SJens Wiklander }
68b97e9666SJens Wiklander
69b97e9666SJens Wiklander #endif /*!__KERNEL_REFCOUNT_H*/
70