1*4882a593Smuzhiyun #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2*4882a593Smuzhiyun #define _ASM_GENERIC_BITOPS_ATOMIC_H_
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <asm/types.h>
5*4882a593Smuzhiyun #include <asm/system.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifdef CONFIG_SMP
8*4882a593Smuzhiyun #include <asm/spinlock.h>
9*4882a593Smuzhiyun #include <asm/cache.h> /* we use L1_CACHE_BYTES */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /* Use an array of spinlocks for our atomic_ts.
12*4882a593Smuzhiyun * Hash function to index into a different SPINLOCK.
13*4882a593Smuzhiyun * Since "a" is usually an address, use one spinlock per cacheline.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun # define ATOMIC_HASH_SIZE 4
16*4882a593Smuzhiyun # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Can't use raw_spin_lock_irq because of #include problems, so
21*4882a593Smuzhiyun * this is the substitute */
22*4882a593Smuzhiyun #define _atomic_spin_lock_irqsave(l,f) do { \
23*4882a593Smuzhiyun raw_spinlock_t *s = ATOMIC_HASH(l); \
24*4882a593Smuzhiyun local_irq_save(f); \
25*4882a593Smuzhiyun __raw_spin_lock(s); \
26*4882a593Smuzhiyun } while(0)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define _atomic_spin_unlock_irqrestore(l,f) do { \
29*4882a593Smuzhiyun raw_spinlock_t *s = ATOMIC_HASH(l); \
30*4882a593Smuzhiyun __raw_spin_unlock(s); \
31*4882a593Smuzhiyun local_irq_restore(f); \
32*4882a593Smuzhiyun } while(0)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #else
36*4882a593Smuzhiyun # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
37*4882a593Smuzhiyun # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * NMI events can occur at any time, including when interrupts have been
42*4882a593Smuzhiyun * disabled by *_irqsave(). So you can get NMI events occurring while a
43*4882a593Smuzhiyun * *_bit function is holding a spin lock. If the NMI handler also wants
44*4882a593Smuzhiyun * to do bit manipulation (and they do) then you can get a deadlock
45*4882a593Smuzhiyun * between the original caller of *_bit() and the NMI handler.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * by Keith Owens
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * set_bit - Atomically set a bit in memory
52*4882a593Smuzhiyun * @nr: the bit to set
53*4882a593Smuzhiyun * @addr: the address to start counting from
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * This function is atomic and may not be reordered. See __set_bit()
56*4882a593Smuzhiyun * if you do not require the atomic guarantees.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Note: there are no guarantees that this function will not be reordered
59*4882a593Smuzhiyun * on non x86 architectures, so if you are writing portable code,
60*4882a593Smuzhiyun * make sure not to rely on its reordering guarantees.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Note that @nr may be almost arbitrarily large; this function is not
63*4882a593Smuzhiyun * restricted to acting on a single-word quantity.
64*4882a593Smuzhiyun */
set_bit(int nr,volatile unsigned long * addr)65*4882a593Smuzhiyun static inline void set_bit(int nr, volatile unsigned long *addr)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
68*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69*4882a593Smuzhiyun unsigned long flags;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
72*4882a593Smuzhiyun *p |= mask;
73*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun * clear_bit - Clears a bit in memory
78*4882a593Smuzhiyun * @nr: Bit to clear
79*4882a593Smuzhiyun * @addr: Address to start counting from
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * clear_bit() is atomic and may not be reordered. However, it does
82*4882a593Smuzhiyun * not contain a memory barrier, so if it is used for locking purposes,
83*4882a593Smuzhiyun * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
84*4882a593Smuzhiyun * in order to ensure changes are visible on other processors.
85*4882a593Smuzhiyun */
clear_bit(int nr,volatile unsigned long * addr)86*4882a593Smuzhiyun static inline void clear_bit(int nr, volatile unsigned long *addr)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
89*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
90*4882a593Smuzhiyun unsigned long flags;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
93*4882a593Smuzhiyun *p &= ~mask;
94*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun * change_bit - Toggle a bit in memory
99*4882a593Smuzhiyun * @nr: Bit to change
100*4882a593Smuzhiyun * @addr: Address to start counting from
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * change_bit() is atomic and may not be reordered. It may be
103*4882a593Smuzhiyun * reordered on other architectures than x86.
104*4882a593Smuzhiyun * Note that @nr may be almost arbitrarily large; this function is not
105*4882a593Smuzhiyun * restricted to acting on a single-word quantity.
106*4882a593Smuzhiyun */
change_bit(int nr,volatile unsigned long * addr)107*4882a593Smuzhiyun static inline void change_bit(int nr, volatile unsigned long *addr)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
110*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
111*4882a593Smuzhiyun unsigned long flags;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
114*4882a593Smuzhiyun *p ^= mask;
115*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * test_and_set_bit - Set a bit and return its old value
120*4882a593Smuzhiyun * @nr: Bit to set
121*4882a593Smuzhiyun * @addr: Address to count from
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * This operation is atomic and cannot be reordered.
124*4882a593Smuzhiyun * It may be reordered on other architectures than x86.
125*4882a593Smuzhiyun * It also implies a memory barrier.
126*4882a593Smuzhiyun */
test_and_set_bit(int nr,volatile unsigned long * addr)127*4882a593Smuzhiyun static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
130*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
131*4882a593Smuzhiyun unsigned long old;
132*4882a593Smuzhiyun unsigned long flags;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
135*4882a593Smuzhiyun old = *p;
136*4882a593Smuzhiyun *p = old | mask;
137*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return (old & mask) != 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * test_and_clear_bit - Clear a bit and return its old value
144*4882a593Smuzhiyun * @nr: Bit to clear
145*4882a593Smuzhiyun * @addr: Address to count from
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * This operation is atomic and cannot be reordered.
148*4882a593Smuzhiyun * It can be reorderdered on other architectures other than x86.
149*4882a593Smuzhiyun * It also implies a memory barrier.
150*4882a593Smuzhiyun */
test_and_clear_bit(int nr,volatile unsigned long * addr)151*4882a593Smuzhiyun static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
154*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
155*4882a593Smuzhiyun unsigned long old;
156*4882a593Smuzhiyun unsigned long flags;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
159*4882a593Smuzhiyun old = *p;
160*4882a593Smuzhiyun *p = old & ~mask;
161*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return (old & mask) != 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * test_and_change_bit - Change a bit and return its old value
168*4882a593Smuzhiyun * @nr: Bit to change
169*4882a593Smuzhiyun * @addr: Address to count from
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * This operation is atomic and cannot be reordered.
172*4882a593Smuzhiyun * It also implies a memory barrier.
173*4882a593Smuzhiyun */
test_and_change_bit(int nr,volatile unsigned long * addr)174*4882a593Smuzhiyun static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun unsigned long mask = BIT_MASK(nr);
177*4882a593Smuzhiyun unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
178*4882a593Smuzhiyun unsigned long old;
179*4882a593Smuzhiyun unsigned long flags;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun _atomic_spin_lock_irqsave(p, flags);
182*4882a593Smuzhiyun old = *p;
183*4882a593Smuzhiyun *p = old ^ mask;
184*4882a593Smuzhiyun _atomic_spin_unlock_irqrestore(p, flags);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return (old & mask) != 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
190