xref: /OK3568_Linux_fs/kernel/include/asm-generic/local64.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_GENERIC_LOCAL64_H
3*4882a593Smuzhiyun #define _ASM_GENERIC_LOCAL64_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/percpu.h>
6*4882a593Smuzhiyun #include <asm/types.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * A signed long type for operations which are atomic for a single CPU.
10*4882a593Smuzhiyun  * Usually used in combination with per-cpu variables.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * This is the default implementation, which uses atomic64_t.  Which is
13*4882a593Smuzhiyun  * rather pointless.  The whole point behind local64_t is that some processors
14*4882a593Smuzhiyun  * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
15*4882a593Smuzhiyun  * running on this CPU.  local64_t allows exploitation of such capabilities.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* Implement in terms of atomics. */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #if BITS_PER_LONG == 64
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <asm/local.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun typedef struct {
25*4882a593Smuzhiyun 	local_t a;
26*4882a593Smuzhiyun } local64_t;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define LOCAL64_INIT(i)	{ LOCAL_INIT(i) }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define local64_read(l)		local_read(&(l)->a)
31*4882a593Smuzhiyun #define local64_set(l,i)	local_set((&(l)->a),(i))
32*4882a593Smuzhiyun #define local64_inc(l)		local_inc(&(l)->a)
33*4882a593Smuzhiyun #define local64_dec(l)		local_dec(&(l)->a)
34*4882a593Smuzhiyun #define local64_add(i,l)	local_add((i),(&(l)->a))
35*4882a593Smuzhiyun #define local64_sub(i,l)	local_sub((i),(&(l)->a))
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
38*4882a593Smuzhiyun #define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
39*4882a593Smuzhiyun #define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
40*4882a593Smuzhiyun #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
41*4882a593Smuzhiyun #define local64_add_return(i, l) local_add_return((i), (&(l)->a))
42*4882a593Smuzhiyun #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
43*4882a593Smuzhiyun #define local64_inc_return(l)	local_inc_return(&(l)->a)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
46*4882a593Smuzhiyun #define local64_xchg(l, n)	local_xchg((&(l)->a), (n))
47*4882a593Smuzhiyun #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
48*4882a593Smuzhiyun #define local64_inc_not_zero(l)	local_inc_not_zero(&(l)->a)
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* Non-atomic variants, ie. preemption disabled and won't be touched
51*4882a593Smuzhiyun  * in interrupt, etc.  Some archs can optimize this case well. */
52*4882a593Smuzhiyun #define __local64_inc(l)	local64_set((l), local64_read(l) + 1)
53*4882a593Smuzhiyun #define __local64_dec(l)	local64_set((l), local64_read(l) - 1)
54*4882a593Smuzhiyun #define __local64_add(i,l)	local64_set((l), local64_read(l) + (i))
55*4882a593Smuzhiyun #define __local64_sub(i,l)	local64_set((l), local64_read(l) - (i))
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #else /* BITS_PER_LONG != 64 */
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #include <linux/atomic.h>
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* Don't use typedef: don't want them to be mixed with atomic_t's. */
62*4882a593Smuzhiyun typedef struct {
63*4882a593Smuzhiyun 	atomic64_t a;
64*4882a593Smuzhiyun } local64_t;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define LOCAL64_INIT(i)	{ ATOMIC_LONG_INIT(i) }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define local64_read(l)		atomic64_read(&(l)->a)
69*4882a593Smuzhiyun #define local64_set(l,i)	atomic64_set((&(l)->a),(i))
70*4882a593Smuzhiyun #define local64_inc(l)		atomic64_inc(&(l)->a)
71*4882a593Smuzhiyun #define local64_dec(l)		atomic64_dec(&(l)->a)
72*4882a593Smuzhiyun #define local64_add(i,l)	atomic64_add((i),(&(l)->a))
73*4882a593Smuzhiyun #define local64_sub(i,l)	atomic64_sub((i),(&(l)->a))
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
76*4882a593Smuzhiyun #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
77*4882a593Smuzhiyun #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
78*4882a593Smuzhiyun #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
79*4882a593Smuzhiyun #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
80*4882a593Smuzhiyun #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
81*4882a593Smuzhiyun #define local64_inc_return(l)	atomic64_inc_return(&(l)->a)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
84*4882a593Smuzhiyun #define local64_xchg(l, n)	atomic64_xchg((&(l)->a), (n))
85*4882a593Smuzhiyun #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
86*4882a593Smuzhiyun #define local64_inc_not_zero(l)	atomic64_inc_not_zero(&(l)->a)
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Non-atomic variants, ie. preemption disabled and won't be touched
89*4882a593Smuzhiyun  * in interrupt, etc.  Some archs can optimize this case well. */
90*4882a593Smuzhiyun #define __local64_inc(l)	local64_set((l), local64_read(l) + 1)
91*4882a593Smuzhiyun #define __local64_dec(l)	local64_set((l), local64_read(l) - 1)
92*4882a593Smuzhiyun #define __local64_add(i,l)	local64_set((l), local64_read(l) + (i))
93*4882a593Smuzhiyun #define __local64_sub(i,l)	local64_set((l), local64_read(l) - (i))
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #endif /* BITS_PER_LONG != 64 */
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #endif /* _ASM_GENERIC_LOCAL64_H */
98