1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2017, Linaro Limited 4 */ 5 6 #ifndef __KERNEL_REFCOUNT_H 7 #define __KERNEL_REFCOUNT_H 8 9 #include <atomic.h> 10 11 /* 12 * Reference counter 13 * 14 * When val is 0, refcount_inc() does not change the value and returns false. 15 * Otherwise, it increments the value and returns true. 16 * 17 * refcount_dec() decrements the value and returns true when the call 18 * caused the value to become 0, false otherwise. 19 * 20 * Since each call to refcount_dec() is supposed to match a call to 21 * refcount_inc(), refcount_dec() called for val == 0 should never happen. 22 * 23 * This behaviour makes this pattern possible: 24 * if (!refcount_inc(r)) { 25 * mutex_lock(m); 26 * // Some other thread may have initialized o by now so check that 27 * // we still need to initialize o. 28 * if (!o) { 29 * o = initialize(); 30 * refcount_set(r, 1); 31 * } 32 * mutex_unlock(m); 33 * } 34 * 35 * or 36 * if (refcount_dec(r)) { 37 * mutex_lock(m); 38 * // Now that we have the mutex o can't be ininialized/uninitialized 39 * // by any other thread, check that the refcount value is still 0 40 * // to guard against the thread above already having reinitialized o 41 * if (!refcount_val(r) && o) 42 * uninitialize(o) 43 * mutex_unlock(m); 44 * } 45 * 46 * where r if the reference counter, o is the object and m the mutex 47 * protecting the object. 48 */ 49 50 struct refcount { 51 unsigned int val; 52 }; 53 54 /* Increases refcount by 1, return true if val > 0 else false */ 55 bool refcount_inc(struct refcount *r); 56 /* Decreases refcount by 1, return true if val == 0 else false */ 57 bool refcount_dec(struct refcount *r); 58 59 static inline void refcount_set(struct refcount *r, unsigned int val) 60 { 61 atomic_store_uint(&r->val, val); 62 } 63 64 static inline unsigned int refcount_val(struct refcount *r) 65 { 66 return atomic_load_uint(&r->val); 67 } 68 69 #endif /*!__KERNEL_REFCOUNT_H*/ 70