xref: /rk3399_rockchip-uboot/arch/nios2/include/asm/bitops/non-atomic.h (revision 83653121d7382fccfe329cb732f77f116341ef1d)
1*819833afSPeter Tyser #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2*819833afSPeter Tyser #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3*819833afSPeter Tyser 
4*819833afSPeter Tyser #include <asm/types.h>
5*819833afSPeter Tyser 
6*819833afSPeter Tyser /**
7*819833afSPeter Tyser  * __set_bit - Set a bit in memory
8*819833afSPeter Tyser  * @nr: the bit to set
9*819833afSPeter Tyser  * @addr: the address to start counting from
10*819833afSPeter Tyser  *
11*819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
12*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
13*819833afSPeter Tyser  * may be that only one operation succeeds.
14*819833afSPeter Tyser  */
__set_bit(int nr,volatile unsigned long * addr)15*819833afSPeter Tyser static inline void __set_bit(int nr, volatile unsigned long *addr)
16*819833afSPeter Tyser {
17*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
18*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
19*819833afSPeter Tyser 
20*819833afSPeter Tyser 	*p  |= mask;
21*819833afSPeter Tyser }
22*819833afSPeter Tyser 
__clear_bit(int nr,volatile unsigned long * addr)23*819833afSPeter Tyser static inline void __clear_bit(int nr, volatile unsigned long *addr)
24*819833afSPeter Tyser {
25*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
26*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
27*819833afSPeter Tyser 
28*819833afSPeter Tyser 	*p &= ~mask;
29*819833afSPeter Tyser }
30*819833afSPeter Tyser 
31*819833afSPeter Tyser /**
32*819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
33*819833afSPeter Tyser  * @nr: the bit to change
34*819833afSPeter Tyser  * @addr: the address to start counting from
35*819833afSPeter Tyser  *
36*819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
37*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
38*819833afSPeter Tyser  * may be that only one operation succeeds.
39*819833afSPeter Tyser  */
__change_bit(int nr,volatile unsigned long * addr)40*819833afSPeter Tyser static inline void __change_bit(int nr, volatile unsigned long *addr)
41*819833afSPeter Tyser {
42*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
43*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
44*819833afSPeter Tyser 
45*819833afSPeter Tyser 	*p ^= mask;
46*819833afSPeter Tyser }
47*819833afSPeter Tyser 
48*819833afSPeter Tyser /**
49*819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
50*819833afSPeter Tyser  * @nr: Bit to set
51*819833afSPeter Tyser  * @addr: Address to count from
52*819833afSPeter Tyser  *
53*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
54*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
55*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
56*819833afSPeter Tyser  */
__test_and_set_bit(int nr,volatile unsigned long * addr)57*819833afSPeter Tyser static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
58*819833afSPeter Tyser {
59*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
60*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
61*819833afSPeter Tyser 	unsigned long old = *p;
62*819833afSPeter Tyser 
63*819833afSPeter Tyser 	*p = old | mask;
64*819833afSPeter Tyser 	return (old & mask) != 0;
65*819833afSPeter Tyser }
66*819833afSPeter Tyser 
67*819833afSPeter Tyser /**
68*819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
69*819833afSPeter Tyser  * @nr: Bit to clear
70*819833afSPeter Tyser  * @addr: Address to count from
71*819833afSPeter Tyser  *
72*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
73*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
74*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
75*819833afSPeter Tyser  */
__test_and_clear_bit(int nr,volatile unsigned long * addr)76*819833afSPeter Tyser static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
77*819833afSPeter Tyser {
78*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
79*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
80*819833afSPeter Tyser 	unsigned long old = *p;
81*819833afSPeter Tyser 
82*819833afSPeter Tyser 	*p = old & ~mask;
83*819833afSPeter Tyser 	return (old & mask) != 0;
84*819833afSPeter Tyser }
85*819833afSPeter Tyser 
86*819833afSPeter Tyser /* WARNING: non atomic and it can be reordered! */
__test_and_change_bit(int nr,volatile unsigned long * addr)87*819833afSPeter Tyser static inline int __test_and_change_bit(int nr,
88*819833afSPeter Tyser 					    volatile unsigned long *addr)
89*819833afSPeter Tyser {
90*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
91*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
92*819833afSPeter Tyser 	unsigned long old = *p;
93*819833afSPeter Tyser 
94*819833afSPeter Tyser 	*p = old ^ mask;
95*819833afSPeter Tyser 	return (old & mask) != 0;
96*819833afSPeter Tyser }
97*819833afSPeter Tyser 
98*819833afSPeter Tyser /**
99*819833afSPeter Tyser  * test_bit - Determine whether a bit is set
100*819833afSPeter Tyser  * @nr: bit number to test
101*819833afSPeter Tyser  * @addr: Address to start counting from
102*819833afSPeter Tyser  */
test_bit(int nr,const volatile unsigned long * addr)103*819833afSPeter Tyser static inline int test_bit(int nr, const volatile unsigned long *addr)
104*819833afSPeter Tyser {
105*819833afSPeter Tyser 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
106*819833afSPeter Tyser }
107*819833afSPeter Tyser 
108*819833afSPeter Tyser #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
109