xref: /rk3399_rockchip-uboot/arch/mips/include/asm/bitops.h (revision 819833af39a91fa1c1e8252862bbda6f5a602f7b)
1*819833afSPeter Tyser /*
2*819833afSPeter Tyser  * This file is subject to the terms and conditions of the GNU General Public
3*819833afSPeter Tyser  * License.  See the file "COPYING" in the main directory of this archive
4*819833afSPeter Tyser  * for more details.
5*819833afSPeter Tyser  *
6*819833afSPeter Tyser  * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
7*819833afSPeter Tyser  * Copyright (c) 2000  Silicon Graphics, Inc.
8*819833afSPeter Tyser  */
9*819833afSPeter Tyser #ifndef _ASM_BITOPS_H
10*819833afSPeter Tyser #define _ASM_BITOPS_H
11*819833afSPeter Tyser 
12*819833afSPeter Tyser #include <linux/types.h>
13*819833afSPeter Tyser #include <asm/byteorder.h>		/* sigh ... */
14*819833afSPeter Tyser 
15*819833afSPeter Tyser #ifdef __KERNEL__
16*819833afSPeter Tyser 
17*819833afSPeter Tyser #include <asm/sgidefs.h>
18*819833afSPeter Tyser #include <asm/system.h>
19*819833afSPeter Tyser #include <linux/config.h>
20*819833afSPeter Tyser 
21*819833afSPeter Tyser /*
22*819833afSPeter Tyser  * clear_bit() doesn't provide any barrier for the compiler.
23*819833afSPeter Tyser  */
24*819833afSPeter Tyser #define smp_mb__before_clear_bit()	barrier()
25*819833afSPeter Tyser #define smp_mb__after_clear_bit()	barrier()
26*819833afSPeter Tyser 
27*819833afSPeter Tyser /*
28*819833afSPeter Tyser  * Only disable interrupt for kernel mode stuff to keep usermode stuff
29*819833afSPeter Tyser  * that dares to use kernel include files alive.
30*819833afSPeter Tyser  */
31*819833afSPeter Tyser #define __bi_flags unsigned long flags
32*819833afSPeter Tyser #define __bi_cli() __cli()
33*819833afSPeter Tyser #define __bi_save_flags(x) __save_flags(x)
34*819833afSPeter Tyser #define __bi_save_and_cli(x) __save_and_cli(x)
35*819833afSPeter Tyser #define __bi_restore_flags(x) __restore_flags(x)
36*819833afSPeter Tyser #else
37*819833afSPeter Tyser #define __bi_flags
38*819833afSPeter Tyser #define __bi_cli()
39*819833afSPeter Tyser #define __bi_save_flags(x)
40*819833afSPeter Tyser #define __bi_save_and_cli(x)
41*819833afSPeter Tyser #define __bi_restore_flags(x)
42*819833afSPeter Tyser #endif /* __KERNEL__ */
43*819833afSPeter Tyser 
44*819833afSPeter Tyser #ifdef CONFIG_CPU_HAS_LLSC
45*819833afSPeter Tyser 
46*819833afSPeter Tyser #include <asm/mipsregs.h>
47*819833afSPeter Tyser 
48*819833afSPeter Tyser /*
49*819833afSPeter Tyser  * These functions for MIPS ISA > 1 are interrupt and SMP proof and
50*819833afSPeter Tyser  * interrupt friendly
51*819833afSPeter Tyser  */
52*819833afSPeter Tyser 
53*819833afSPeter Tyser /*
54*819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
55*819833afSPeter Tyser  * @nr: the bit to set
56*819833afSPeter Tyser  * @addr: the address to start counting from
57*819833afSPeter Tyser  *
58*819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
59*819833afSPeter Tyser  * if you do not require the atomic guarantees.
60*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
61*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
62*819833afSPeter Tyser  */
63*819833afSPeter Tyser static __inline__ void
64*819833afSPeter Tyser set_bit(int nr, volatile void *addr)
65*819833afSPeter Tyser {
66*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
67*819833afSPeter Tyser 	unsigned long temp;
68*819833afSPeter Tyser 
69*819833afSPeter Tyser 	__asm__ __volatile__(
70*819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# set_bit\n\t"
71*819833afSPeter Tyser 		"or\t%0, %2\n\t"
72*819833afSPeter Tyser 		"sc\t%0, %1\n\t"
73*819833afSPeter Tyser 		"beqz\t%0, 1b"
74*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
75*819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
76*819833afSPeter Tyser }
77*819833afSPeter Tyser 
78*819833afSPeter Tyser /*
79*819833afSPeter Tyser  * __set_bit - Set a bit in memory
80*819833afSPeter Tyser  * @nr: the bit to set
81*819833afSPeter Tyser  * @addr: the address to start counting from
82*819833afSPeter Tyser  *
83*819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
84*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
85*819833afSPeter Tyser  * may be that only one operation succeeds.
86*819833afSPeter Tyser  */
87*819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
88*819833afSPeter Tyser {
89*819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
90*819833afSPeter Tyser 
91*819833afSPeter Tyser 	*m |= 1UL << (nr & 31);
92*819833afSPeter Tyser }
93*819833afSPeter Tyser #define PLATFORM__SET_BIT
94*819833afSPeter Tyser 
95*819833afSPeter Tyser /*
96*819833afSPeter Tyser  * clear_bit - Clears a bit in memory
97*819833afSPeter Tyser  * @nr: Bit to clear
98*819833afSPeter Tyser  * @addr: Address to start counting from
99*819833afSPeter Tyser  *
100*819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
101*819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
102*819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
103*819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
104*819833afSPeter Tyser  */
105*819833afSPeter Tyser static __inline__ void
106*819833afSPeter Tyser clear_bit(int nr, volatile void *addr)
107*819833afSPeter Tyser {
108*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
109*819833afSPeter Tyser 	unsigned long temp;
110*819833afSPeter Tyser 
111*819833afSPeter Tyser 	__asm__ __volatile__(
112*819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
113*819833afSPeter Tyser 		"and\t%0, %2\n\t"
114*819833afSPeter Tyser 		"sc\t%0, %1\n\t"
115*819833afSPeter Tyser 		"beqz\t%0, 1b\n\t"
116*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
117*819833afSPeter Tyser 		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
118*819833afSPeter Tyser }
119*819833afSPeter Tyser 
120*819833afSPeter Tyser /*
121*819833afSPeter Tyser  * change_bit - Toggle a bit in memory
122*819833afSPeter Tyser  * @nr: Bit to clear
123*819833afSPeter Tyser  * @addr: Address to start counting from
124*819833afSPeter Tyser  *
125*819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
126*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
127*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
128*819833afSPeter Tyser  */
129*819833afSPeter Tyser static __inline__ void
130*819833afSPeter Tyser change_bit(int nr, volatile void *addr)
131*819833afSPeter Tyser {
132*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
133*819833afSPeter Tyser 	unsigned long temp;
134*819833afSPeter Tyser 
135*819833afSPeter Tyser 	__asm__ __volatile__(
136*819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# change_bit\n\t"
137*819833afSPeter Tyser 		"xor\t%0, %2\n\t"
138*819833afSPeter Tyser 		"sc\t%0, %1\n\t"
139*819833afSPeter Tyser 		"beqz\t%0, 1b"
140*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
141*819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
142*819833afSPeter Tyser }
143*819833afSPeter Tyser 
144*819833afSPeter Tyser /*
145*819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
146*819833afSPeter Tyser  * @nr: the bit to set
147*819833afSPeter Tyser  * @addr: the address to start counting from
148*819833afSPeter Tyser  *
149*819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
150*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
151*819833afSPeter Tyser  * may be that only one operation succeeds.
152*819833afSPeter Tyser  */
153*819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
154*819833afSPeter Tyser {
155*819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
156*819833afSPeter Tyser 
157*819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
158*819833afSPeter Tyser }
159*819833afSPeter Tyser 
160*819833afSPeter Tyser /*
161*819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
162*819833afSPeter Tyser  * @nr: Bit to set
163*819833afSPeter Tyser  * @addr: Address to count from
164*819833afSPeter Tyser  *
165*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
166*819833afSPeter Tyser  * It also implies a memory barrier.
167*819833afSPeter Tyser  */
168*819833afSPeter Tyser static __inline__ int
169*819833afSPeter Tyser test_and_set_bit(int nr, volatile void *addr)
170*819833afSPeter Tyser {
171*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
172*819833afSPeter Tyser 	unsigned long temp, res;
173*819833afSPeter Tyser 
174*819833afSPeter Tyser 	__asm__ __volatile__(
175*819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_set_bit\n"
176*819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
177*819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
178*819833afSPeter Tyser 		"sc\t%2, %1\n\t"
179*819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
180*819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
181*819833afSPeter Tyser 		".set\treorder"
182*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
183*819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
184*819833afSPeter Tyser 		: "memory");
185*819833afSPeter Tyser 
186*819833afSPeter Tyser 	return res != 0;
187*819833afSPeter Tyser }
188*819833afSPeter Tyser 
189*819833afSPeter Tyser /*
190*819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
191*819833afSPeter Tyser  * @nr: Bit to set
192*819833afSPeter Tyser  * @addr: Address to count from
193*819833afSPeter Tyser  *
194*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
195*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
196*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
197*819833afSPeter Tyser  */
198*819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
199*819833afSPeter Tyser {
200*819833afSPeter Tyser 	int mask, retval;
201*819833afSPeter Tyser 	volatile int *a = addr;
202*819833afSPeter Tyser 
203*819833afSPeter Tyser 	a += nr >> 5;
204*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
205*819833afSPeter Tyser 	retval = (mask & *a) != 0;
206*819833afSPeter Tyser 	*a |= mask;
207*819833afSPeter Tyser 
208*819833afSPeter Tyser 	return retval;
209*819833afSPeter Tyser }
210*819833afSPeter Tyser 
211*819833afSPeter Tyser /*
212*819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
213*819833afSPeter Tyser  * @nr: Bit to set
214*819833afSPeter Tyser  * @addr: Address to count from
215*819833afSPeter Tyser  *
216*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
217*819833afSPeter Tyser  * It also implies a memory barrier.
218*819833afSPeter Tyser  */
219*819833afSPeter Tyser static __inline__ int
220*819833afSPeter Tyser test_and_clear_bit(int nr, volatile void *addr)
221*819833afSPeter Tyser {
222*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
223*819833afSPeter Tyser 	unsigned long temp, res;
224*819833afSPeter Tyser 
225*819833afSPeter Tyser 	__asm__ __volatile__(
226*819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_clear_bit\n"
227*819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
228*819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
229*819833afSPeter Tyser 		"xor\t%2, %3\n\t"
230*819833afSPeter Tyser 		"sc\t%2, %1\n\t"
231*819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
232*819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
233*819833afSPeter Tyser 		".set\treorder"
234*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
235*819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
236*819833afSPeter Tyser 		: "memory");
237*819833afSPeter Tyser 
238*819833afSPeter Tyser 	return res != 0;
239*819833afSPeter Tyser }
240*819833afSPeter Tyser 
241*819833afSPeter Tyser /*
242*819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
243*819833afSPeter Tyser  * @nr: Bit to set
244*819833afSPeter Tyser  * @addr: Address to count from
245*819833afSPeter Tyser  *
246*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
247*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
248*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
249*819833afSPeter Tyser  */
250*819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
251*819833afSPeter Tyser {
252*819833afSPeter Tyser 	int	mask, retval;
253*819833afSPeter Tyser 	volatile int	*a = addr;
254*819833afSPeter Tyser 
255*819833afSPeter Tyser 	a += nr >> 5;
256*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
257*819833afSPeter Tyser 	retval = (mask & *a) != 0;
258*819833afSPeter Tyser 	*a &= ~mask;
259*819833afSPeter Tyser 
260*819833afSPeter Tyser 	return retval;
261*819833afSPeter Tyser }
262*819833afSPeter Tyser 
263*819833afSPeter Tyser /*
264*819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
265*819833afSPeter Tyser  * @nr: Bit to set
266*819833afSPeter Tyser  * @addr: Address to count from
267*819833afSPeter Tyser  *
268*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
269*819833afSPeter Tyser  * It also implies a memory barrier.
270*819833afSPeter Tyser  */
271*819833afSPeter Tyser static __inline__ int
272*819833afSPeter Tyser test_and_change_bit(int nr, volatile void *addr)
273*819833afSPeter Tyser {
274*819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
275*819833afSPeter Tyser 	unsigned long temp, res;
276*819833afSPeter Tyser 
277*819833afSPeter Tyser 	__asm__ __volatile__(
278*819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_change_bit\n"
279*819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
280*819833afSPeter Tyser 		"xor\t%2, %0, %3\n\t"
281*819833afSPeter Tyser 		"sc\t%2, %1\n\t"
282*819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
283*819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
284*819833afSPeter Tyser 		".set\treorder"
285*819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
286*819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
287*819833afSPeter Tyser 		: "memory");
288*819833afSPeter Tyser 
289*819833afSPeter Tyser 	return res != 0;
290*819833afSPeter Tyser }
291*819833afSPeter Tyser 
292*819833afSPeter Tyser /*
293*819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
294*819833afSPeter Tyser  * @nr: Bit to set
295*819833afSPeter Tyser  * @addr: Address to count from
296*819833afSPeter Tyser  *
297*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
298*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
299*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
300*819833afSPeter Tyser  */
301*819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
302*819833afSPeter Tyser {
303*819833afSPeter Tyser 	int	mask, retval;
304*819833afSPeter Tyser 	volatile int	*a = addr;
305*819833afSPeter Tyser 
306*819833afSPeter Tyser 	a += nr >> 5;
307*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
308*819833afSPeter Tyser 	retval = (mask & *a) != 0;
309*819833afSPeter Tyser 	*a ^= mask;
310*819833afSPeter Tyser 
311*819833afSPeter Tyser 	return retval;
312*819833afSPeter Tyser }
313*819833afSPeter Tyser 
314*819833afSPeter Tyser #else /* MIPS I */
315*819833afSPeter Tyser 
316*819833afSPeter Tyser /*
317*819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
318*819833afSPeter Tyser  * @nr: the bit to set
319*819833afSPeter Tyser  * @addr: the address to start counting from
320*819833afSPeter Tyser  *
321*819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
322*819833afSPeter Tyser  * if you do not require the atomic guarantees.
323*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
324*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
325*819833afSPeter Tyser  */
326*819833afSPeter Tyser static __inline__ void set_bit(int nr, volatile void * addr)
327*819833afSPeter Tyser {
328*819833afSPeter Tyser 	int	mask;
329*819833afSPeter Tyser 	volatile int	*a = addr;
330*819833afSPeter Tyser 	__bi_flags;
331*819833afSPeter Tyser 
332*819833afSPeter Tyser 	a += nr >> 5;
333*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
334*819833afSPeter Tyser 	__bi_save_and_cli(flags);
335*819833afSPeter Tyser 	*a |= mask;
336*819833afSPeter Tyser 	__bi_restore_flags(flags);
337*819833afSPeter Tyser }
338*819833afSPeter Tyser 
339*819833afSPeter Tyser /*
340*819833afSPeter Tyser  * __set_bit - Set a bit in memory
341*819833afSPeter Tyser  * @nr: the bit to set
342*819833afSPeter Tyser  * @addr: the address to start counting from
343*819833afSPeter Tyser  *
344*819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
345*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
346*819833afSPeter Tyser  * may be that only one operation succeeds.
347*819833afSPeter Tyser  */
348*819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
349*819833afSPeter Tyser {
350*819833afSPeter Tyser 	int	mask;
351*819833afSPeter Tyser 	volatile int	*a = addr;
352*819833afSPeter Tyser 
353*819833afSPeter Tyser 	a += nr >> 5;
354*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
355*819833afSPeter Tyser 	*a |= mask;
356*819833afSPeter Tyser }
357*819833afSPeter Tyser 
358*819833afSPeter Tyser /*
359*819833afSPeter Tyser  * clear_bit - Clears a bit in memory
360*819833afSPeter Tyser  * @nr: Bit to clear
361*819833afSPeter Tyser  * @addr: Address to start counting from
362*819833afSPeter Tyser  *
363*819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
364*819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
365*819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
366*819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
367*819833afSPeter Tyser  */
368*819833afSPeter Tyser static __inline__ void clear_bit(int nr, volatile void * addr)
369*819833afSPeter Tyser {
370*819833afSPeter Tyser 	int	mask;
371*819833afSPeter Tyser 	volatile int	*a = addr;
372*819833afSPeter Tyser 	__bi_flags;
373*819833afSPeter Tyser 
374*819833afSPeter Tyser 	a += nr >> 5;
375*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
376*819833afSPeter Tyser 	__bi_save_and_cli(flags);
377*819833afSPeter Tyser 	*a &= ~mask;
378*819833afSPeter Tyser 	__bi_restore_flags(flags);
379*819833afSPeter Tyser }
380*819833afSPeter Tyser 
381*819833afSPeter Tyser /*
382*819833afSPeter Tyser  * change_bit - Toggle a bit in memory
383*819833afSPeter Tyser  * @nr: Bit to clear
384*819833afSPeter Tyser  * @addr: Address to start counting from
385*819833afSPeter Tyser  *
386*819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
387*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
388*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
389*819833afSPeter Tyser  */
390*819833afSPeter Tyser static __inline__ void change_bit(int nr, volatile void * addr)
391*819833afSPeter Tyser {
392*819833afSPeter Tyser 	int	mask;
393*819833afSPeter Tyser 	volatile int	*a = addr;
394*819833afSPeter Tyser 	__bi_flags;
395*819833afSPeter Tyser 
396*819833afSPeter Tyser 	a += nr >> 5;
397*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
398*819833afSPeter Tyser 	__bi_save_and_cli(flags);
399*819833afSPeter Tyser 	*a ^= mask;
400*819833afSPeter Tyser 	__bi_restore_flags(flags);
401*819833afSPeter Tyser }
402*819833afSPeter Tyser 
403*819833afSPeter Tyser /*
404*819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
405*819833afSPeter Tyser  * @nr: the bit to set
406*819833afSPeter Tyser  * @addr: the address to start counting from
407*819833afSPeter Tyser  *
408*819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
409*819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
410*819833afSPeter Tyser  * may be that only one operation succeeds.
411*819833afSPeter Tyser  */
412*819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
413*819833afSPeter Tyser {
414*819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
415*819833afSPeter Tyser 
416*819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
417*819833afSPeter Tyser }
418*819833afSPeter Tyser 
419*819833afSPeter Tyser /*
420*819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
421*819833afSPeter Tyser  * @nr: Bit to set
422*819833afSPeter Tyser  * @addr: Address to count from
423*819833afSPeter Tyser  *
424*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
425*819833afSPeter Tyser  * It also implies a memory barrier.
426*819833afSPeter Tyser  */
427*819833afSPeter Tyser static __inline__ int test_and_set_bit(int nr, volatile void * addr)
428*819833afSPeter Tyser {
429*819833afSPeter Tyser 	int	mask, retval;
430*819833afSPeter Tyser 	volatile int	*a = addr;
431*819833afSPeter Tyser 	__bi_flags;
432*819833afSPeter Tyser 
433*819833afSPeter Tyser 	a += nr >> 5;
434*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
435*819833afSPeter Tyser 	__bi_save_and_cli(flags);
436*819833afSPeter Tyser 	retval = (mask & *a) != 0;
437*819833afSPeter Tyser 	*a |= mask;
438*819833afSPeter Tyser 	__bi_restore_flags(flags);
439*819833afSPeter Tyser 
440*819833afSPeter Tyser 	return retval;
441*819833afSPeter Tyser }
442*819833afSPeter Tyser 
443*819833afSPeter Tyser /*
444*819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
445*819833afSPeter Tyser  * @nr: Bit to set
446*819833afSPeter Tyser  * @addr: Address to count from
447*819833afSPeter Tyser  *
448*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
449*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
450*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
451*819833afSPeter Tyser  */
452*819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
453*819833afSPeter Tyser {
454*819833afSPeter Tyser 	int	mask, retval;
455*819833afSPeter Tyser 	volatile int	*a = addr;
456*819833afSPeter Tyser 
457*819833afSPeter Tyser 	a += nr >> 5;
458*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
459*819833afSPeter Tyser 	retval = (mask & *a) != 0;
460*819833afSPeter Tyser 	*a |= mask;
461*819833afSPeter Tyser 
462*819833afSPeter Tyser 	return retval;
463*819833afSPeter Tyser }
464*819833afSPeter Tyser 
465*819833afSPeter Tyser /*
466*819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
467*819833afSPeter Tyser  * @nr: Bit to set
468*819833afSPeter Tyser  * @addr: Address to count from
469*819833afSPeter Tyser  *
470*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
471*819833afSPeter Tyser  * It also implies a memory barrier.
472*819833afSPeter Tyser  */
473*819833afSPeter Tyser static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
474*819833afSPeter Tyser {
475*819833afSPeter Tyser 	int	mask, retval;
476*819833afSPeter Tyser 	volatile int	*a = addr;
477*819833afSPeter Tyser 	__bi_flags;
478*819833afSPeter Tyser 
479*819833afSPeter Tyser 	a += nr >> 5;
480*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
481*819833afSPeter Tyser 	__bi_save_and_cli(flags);
482*819833afSPeter Tyser 	retval = (mask & *a) != 0;
483*819833afSPeter Tyser 	*a &= ~mask;
484*819833afSPeter Tyser 	__bi_restore_flags(flags);
485*819833afSPeter Tyser 
486*819833afSPeter Tyser 	return retval;
487*819833afSPeter Tyser }
488*819833afSPeter Tyser 
489*819833afSPeter Tyser /*
490*819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
491*819833afSPeter Tyser  * @nr: Bit to set
492*819833afSPeter Tyser  * @addr: Address to count from
493*819833afSPeter Tyser  *
494*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
495*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
496*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
497*819833afSPeter Tyser  */
498*819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
499*819833afSPeter Tyser {
500*819833afSPeter Tyser 	int	mask, retval;
501*819833afSPeter Tyser 	volatile int	*a = addr;
502*819833afSPeter Tyser 
503*819833afSPeter Tyser 	a += nr >> 5;
504*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
505*819833afSPeter Tyser 	retval = (mask & *a) != 0;
506*819833afSPeter Tyser 	*a &= ~mask;
507*819833afSPeter Tyser 
508*819833afSPeter Tyser 	return retval;
509*819833afSPeter Tyser }
510*819833afSPeter Tyser 
511*819833afSPeter Tyser /*
512*819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
513*819833afSPeter Tyser  * @nr: Bit to set
514*819833afSPeter Tyser  * @addr: Address to count from
515*819833afSPeter Tyser  *
516*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
517*819833afSPeter Tyser  * It also implies a memory barrier.
518*819833afSPeter Tyser  */
519*819833afSPeter Tyser static __inline__ int test_and_change_bit(int nr, volatile void * addr)
520*819833afSPeter Tyser {
521*819833afSPeter Tyser 	int	mask, retval;
522*819833afSPeter Tyser 	volatile int	*a = addr;
523*819833afSPeter Tyser 	__bi_flags;
524*819833afSPeter Tyser 
525*819833afSPeter Tyser 	a += nr >> 5;
526*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
527*819833afSPeter Tyser 	__bi_save_and_cli(flags);
528*819833afSPeter Tyser 	retval = (mask & *a) != 0;
529*819833afSPeter Tyser 	*a ^= mask;
530*819833afSPeter Tyser 	__bi_restore_flags(flags);
531*819833afSPeter Tyser 
532*819833afSPeter Tyser 	return retval;
533*819833afSPeter Tyser }
534*819833afSPeter Tyser 
535*819833afSPeter Tyser /*
536*819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
537*819833afSPeter Tyser  * @nr: Bit to set
538*819833afSPeter Tyser  * @addr: Address to count from
539*819833afSPeter Tyser  *
540*819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
541*819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
542*819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
543*819833afSPeter Tyser  */
544*819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
545*819833afSPeter Tyser {
546*819833afSPeter Tyser 	int	mask, retval;
547*819833afSPeter Tyser 	volatile int	*a = addr;
548*819833afSPeter Tyser 
549*819833afSPeter Tyser 	a += nr >> 5;
550*819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
551*819833afSPeter Tyser 	retval = (mask & *a) != 0;
552*819833afSPeter Tyser 	*a ^= mask;
553*819833afSPeter Tyser 
554*819833afSPeter Tyser 	return retval;
555*819833afSPeter Tyser }
556*819833afSPeter Tyser 
557*819833afSPeter Tyser #undef __bi_flags
558*819833afSPeter Tyser #undef __bi_cli
559*819833afSPeter Tyser #undef __bi_save_flags
560*819833afSPeter Tyser #undef __bi_restore_flags
561*819833afSPeter Tyser 
562*819833afSPeter Tyser #endif /* MIPS I */
563*819833afSPeter Tyser 
564*819833afSPeter Tyser /*
565*819833afSPeter Tyser  * test_bit - Determine whether a bit is set
566*819833afSPeter Tyser  * @nr: bit number to test
567*819833afSPeter Tyser  * @addr: Address to start counting from
568*819833afSPeter Tyser  */
569*819833afSPeter Tyser static __inline__ int test_bit(int nr, volatile void *addr)
570*819833afSPeter Tyser {
571*819833afSPeter Tyser 	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
572*819833afSPeter Tyser }
573*819833afSPeter Tyser 
574*819833afSPeter Tyser #ifndef __MIPSEB__
575*819833afSPeter Tyser 
576*819833afSPeter Tyser /* Little endian versions. */
577*819833afSPeter Tyser 
578*819833afSPeter Tyser /*
579*819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
580*819833afSPeter Tyser  * @addr: The address to start the search at
581*819833afSPeter Tyser  * @size: The maximum size to search
582*819833afSPeter Tyser  *
583*819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
584*819833afSPeter Tyser  * containing a bit.
585*819833afSPeter Tyser  */
586*819833afSPeter Tyser static __inline__ int find_first_zero_bit (void *addr, unsigned size)
587*819833afSPeter Tyser {
588*819833afSPeter Tyser 	unsigned long dummy;
589*819833afSPeter Tyser 	int res;
590*819833afSPeter Tyser 
591*819833afSPeter Tyser 	if (!size)
592*819833afSPeter Tyser 		return 0;
593*819833afSPeter Tyser 
594*819833afSPeter Tyser 	__asm__ (".set\tnoreorder\n\t"
595*819833afSPeter Tyser 		".set\tnoat\n"
596*819833afSPeter Tyser 		"1:\tsubu\t$1,%6,%0\n\t"
597*819833afSPeter Tyser 		"blez\t$1,2f\n\t"
598*819833afSPeter Tyser 		"lw\t$1,(%5)\n\t"
599*819833afSPeter Tyser 		"addiu\t%5,4\n\t"
600*819833afSPeter Tyser #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
601*819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
602*819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
603*819833afSPeter Tyser 		"beql\t%1,$1,1b\n\t"
604*819833afSPeter Tyser 		"addiu\t%0,32\n\t"
605*819833afSPeter Tyser #else
606*819833afSPeter Tyser 		"addiu\t%0,32\n\t"
607*819833afSPeter Tyser 		"beq\t%1,$1,1b\n\t"
608*819833afSPeter Tyser 		"nop\n\t"
609*819833afSPeter Tyser 		"subu\t%0,32\n\t"
610*819833afSPeter Tyser #endif
611*819833afSPeter Tyser #ifdef __MIPSEB__
612*819833afSPeter Tyser #error "Fix this for big endian"
613*819833afSPeter Tyser #endif /* __MIPSEB__ */
614*819833afSPeter Tyser 		"li\t%1,1\n"
615*819833afSPeter Tyser 		"1:\tand\t%2,$1,%1\n\t"
616*819833afSPeter Tyser 		"beqz\t%2,2f\n\t"
617*819833afSPeter Tyser 		"sll\t%1,%1,1\n\t"
618*819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
619*819833afSPeter Tyser 		"add\t%0,%0,1\n\t"
620*819833afSPeter Tyser 		".set\tat\n\t"
621*819833afSPeter Tyser 		".set\treorder\n"
622*819833afSPeter Tyser 		"2:"
623*819833afSPeter Tyser 		: "=r" (res), "=r" (dummy), "=r" (addr)
624*819833afSPeter Tyser 		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
625*819833afSPeter Tyser 		  "2" (addr), "r" (size)
626*819833afSPeter Tyser 		: "$1");
627*819833afSPeter Tyser 
628*819833afSPeter Tyser 	return res;
629*819833afSPeter Tyser }
630*819833afSPeter Tyser 
631*819833afSPeter Tyser /*
632*819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
633*819833afSPeter Tyser  * @addr: The address to base the search on
634*819833afSPeter Tyser  * @offset: The bitnumber to start searching at
635*819833afSPeter Tyser  * @size: The maximum size to search
636*819833afSPeter Tyser  */
637*819833afSPeter Tyser static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
638*819833afSPeter Tyser {
639*819833afSPeter Tyser 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
640*819833afSPeter Tyser 	int set = 0, bit = offset & 31, res;
641*819833afSPeter Tyser 	unsigned long dummy;
642*819833afSPeter Tyser 
643*819833afSPeter Tyser 	if (bit) {
644*819833afSPeter Tyser 		/*
645*819833afSPeter Tyser 		 * Look for zero in first byte
646*819833afSPeter Tyser 		 */
647*819833afSPeter Tyser #ifdef __MIPSEB__
648*819833afSPeter Tyser #error "Fix this for big endian byte order"
649*819833afSPeter Tyser #endif
650*819833afSPeter Tyser 		__asm__(".set\tnoreorder\n\t"
651*819833afSPeter Tyser 			".set\tnoat\n"
652*819833afSPeter Tyser 			"1:\tand\t$1,%4,%1\n\t"
653*819833afSPeter Tyser 			"beqz\t$1,1f\n\t"
654*819833afSPeter Tyser 			"sll\t%1,%1,1\n\t"
655*819833afSPeter Tyser 			"bnez\t%1,1b\n\t"
656*819833afSPeter Tyser 			"addiu\t%0,1\n\t"
657*819833afSPeter Tyser 			".set\tat\n\t"
658*819833afSPeter Tyser 			".set\treorder\n"
659*819833afSPeter Tyser 			"1:"
660*819833afSPeter Tyser 			: "=r" (set), "=r" (dummy)
661*819833afSPeter Tyser 			: "0" (0), "1" (1 << bit), "r" (*p)
662*819833afSPeter Tyser 			: "$1");
663*819833afSPeter Tyser 		if (set < (32 - bit))
664*819833afSPeter Tyser 			return set + offset;
665*819833afSPeter Tyser 		set = 32 - bit;
666*819833afSPeter Tyser 		p++;
667*819833afSPeter Tyser 	}
668*819833afSPeter Tyser 	/*
669*819833afSPeter Tyser 	 * No zero yet, search remaining full bytes for a zero
670*819833afSPeter Tyser 	 */
671*819833afSPeter Tyser 	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
672*819833afSPeter Tyser 	return offset + set + res;
673*819833afSPeter Tyser }
674*819833afSPeter Tyser 
675*819833afSPeter Tyser #endif /* !(__MIPSEB__) */
676*819833afSPeter Tyser 
677*819833afSPeter Tyser /*
678*819833afSPeter Tyser  * ffz - find first zero in word.
679*819833afSPeter Tyser  * @word: The word to search
680*819833afSPeter Tyser  *
681*819833afSPeter Tyser  * Undefined if no zero exists, so code should check against ~0UL first.
682*819833afSPeter Tyser  */
683*819833afSPeter Tyser static __inline__ unsigned long ffz(unsigned long word)
684*819833afSPeter Tyser {
685*819833afSPeter Tyser 	unsigned int	__res;
686*819833afSPeter Tyser 	unsigned int	mask = 1;
687*819833afSPeter Tyser 
688*819833afSPeter Tyser 	__asm__ (
689*819833afSPeter Tyser 		".set\tnoreorder\n\t"
690*819833afSPeter Tyser 		".set\tnoat\n\t"
691*819833afSPeter Tyser 		"move\t%0,$0\n"
692*819833afSPeter Tyser 		"1:\tand\t$1,%2,%1\n\t"
693*819833afSPeter Tyser 		"beqz\t$1,2f\n\t"
694*819833afSPeter Tyser 		"sll\t%1,1\n\t"
695*819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
696*819833afSPeter Tyser 		"addiu\t%0,1\n\t"
697*819833afSPeter Tyser 		".set\tat\n\t"
698*819833afSPeter Tyser 		".set\treorder\n"
699*819833afSPeter Tyser 		"2:\n\t"
700*819833afSPeter Tyser 		: "=&r" (__res), "=r" (mask)
701*819833afSPeter Tyser 		: "r" (word), "1" (mask)
702*819833afSPeter Tyser 		: "$1");
703*819833afSPeter Tyser 
704*819833afSPeter Tyser 	return __res;
705*819833afSPeter Tyser }
706*819833afSPeter Tyser 
707*819833afSPeter Tyser #ifdef __KERNEL__
708*819833afSPeter Tyser 
709*819833afSPeter Tyser /*
710*819833afSPeter Tyser  * hweightN - returns the hamming weight of a N-bit word
711*819833afSPeter Tyser  * @x: the word to weigh
712*819833afSPeter Tyser  *
713*819833afSPeter Tyser  * The Hamming Weight of a number is the total number of bits set in it.
714*819833afSPeter Tyser  */
715*819833afSPeter Tyser 
716*819833afSPeter Tyser #define hweight32(x) generic_hweight32(x)
717*819833afSPeter Tyser #define hweight16(x) generic_hweight16(x)
718*819833afSPeter Tyser #define hweight8(x) generic_hweight8(x)
719*819833afSPeter Tyser 
720*819833afSPeter Tyser #endif /* __KERNEL__ */
721*819833afSPeter Tyser 
722*819833afSPeter Tyser #ifdef __MIPSEB__
723*819833afSPeter Tyser /*
724*819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
725*819833afSPeter Tyser  * @addr: The address to base the search on
726*819833afSPeter Tyser  * @offset: The bitnumber to start searching at
727*819833afSPeter Tyser  * @size: The maximum size to search
728*819833afSPeter Tyser  */
729*819833afSPeter Tyser static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
730*819833afSPeter Tyser {
731*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
732*819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
733*819833afSPeter Tyser 	unsigned long tmp;
734*819833afSPeter Tyser 
735*819833afSPeter Tyser 	if (offset >= size)
736*819833afSPeter Tyser 		return size;
737*819833afSPeter Tyser 	size -= result;
738*819833afSPeter Tyser 	offset &= 31UL;
739*819833afSPeter Tyser 	if (offset) {
740*819833afSPeter Tyser 		tmp = *(p++);
741*819833afSPeter Tyser 		tmp |= ~0UL >> (32-offset);
742*819833afSPeter Tyser 		if (size < 32)
743*819833afSPeter Tyser 			goto found_first;
744*819833afSPeter Tyser 		if (~tmp)
745*819833afSPeter Tyser 			goto found_middle;
746*819833afSPeter Tyser 		size -= 32;
747*819833afSPeter Tyser 		result += 32;
748*819833afSPeter Tyser 	}
749*819833afSPeter Tyser 	while (size & ~31UL) {
750*819833afSPeter Tyser 		if (~(tmp = *(p++)))
751*819833afSPeter Tyser 			goto found_middle;
752*819833afSPeter Tyser 		result += 32;
753*819833afSPeter Tyser 		size -= 32;
754*819833afSPeter Tyser 	}
755*819833afSPeter Tyser 	if (!size)
756*819833afSPeter Tyser 		return result;
757*819833afSPeter Tyser 	tmp = *p;
758*819833afSPeter Tyser 
759*819833afSPeter Tyser found_first:
760*819833afSPeter Tyser 	tmp |= ~0UL << size;
761*819833afSPeter Tyser found_middle:
762*819833afSPeter Tyser 	return result + ffz(tmp);
763*819833afSPeter Tyser }
764*819833afSPeter Tyser 
765*819833afSPeter Tyser /* Linus sez that gcc can optimize the following correctly, we'll see if this
766*819833afSPeter Tyser  * holds on the Sparc as it does for the ALPHA.
767*819833afSPeter Tyser  */
768*819833afSPeter Tyser 
769*819833afSPeter Tyser #if 0 /* Fool kernel-doc since it doesn't do macros yet */
770*819833afSPeter Tyser /*
771*819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
772*819833afSPeter Tyser  * @addr: The address to start the search at
773*819833afSPeter Tyser  * @size: The maximum size to search
774*819833afSPeter Tyser  *
775*819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
776*819833afSPeter Tyser  * containing a bit.
777*819833afSPeter Tyser  */
778*819833afSPeter Tyser static int find_first_zero_bit (void *addr, unsigned size);
779*819833afSPeter Tyser #endif
780*819833afSPeter Tyser 
781*819833afSPeter Tyser #define find_first_zero_bit(addr, size) \
782*819833afSPeter Tyser 	find_next_zero_bit((addr), (size), 0)
783*819833afSPeter Tyser 
784*819833afSPeter Tyser #endif /* (__MIPSEB__) */
785*819833afSPeter Tyser 
786*819833afSPeter Tyser /* Now for the ext2 filesystem bit operations and helper routines. */
787*819833afSPeter Tyser 
788*819833afSPeter Tyser #ifdef __MIPSEB__
789*819833afSPeter Tyser static __inline__ int ext2_set_bit(int nr, void * addr)
790*819833afSPeter Tyser {
791*819833afSPeter Tyser 	int		mask, retval, flags;
792*819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
793*819833afSPeter Tyser 
794*819833afSPeter Tyser 	ADDR += nr >> 3;
795*819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
796*819833afSPeter Tyser 	save_and_cli(flags);
797*819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
798*819833afSPeter Tyser 	*ADDR |= mask;
799*819833afSPeter Tyser 	restore_flags(flags);
800*819833afSPeter Tyser 	return retval;
801*819833afSPeter Tyser }
802*819833afSPeter Tyser 
803*819833afSPeter Tyser static __inline__ int ext2_clear_bit(int nr, void * addr)
804*819833afSPeter Tyser {
805*819833afSPeter Tyser 	int		mask, retval, flags;
806*819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
807*819833afSPeter Tyser 
808*819833afSPeter Tyser 	ADDR += nr >> 3;
809*819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
810*819833afSPeter Tyser 	save_and_cli(flags);
811*819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
812*819833afSPeter Tyser 	*ADDR &= ~mask;
813*819833afSPeter Tyser 	restore_flags(flags);
814*819833afSPeter Tyser 	return retval;
815*819833afSPeter Tyser }
816*819833afSPeter Tyser 
817*819833afSPeter Tyser static __inline__ int ext2_test_bit(int nr, const void * addr)
818*819833afSPeter Tyser {
819*819833afSPeter Tyser 	int			mask;
820*819833afSPeter Tyser 	const unsigned char	*ADDR = (const unsigned char *) addr;
821*819833afSPeter Tyser 
822*819833afSPeter Tyser 	ADDR += nr >> 3;
823*819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
824*819833afSPeter Tyser 	return ((mask & *ADDR) != 0);
825*819833afSPeter Tyser }
826*819833afSPeter Tyser 
827*819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) \
828*819833afSPeter Tyser 	ext2_find_next_zero_bit((addr), (size), 0)
829*819833afSPeter Tyser 
830*819833afSPeter Tyser static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
831*819833afSPeter Tyser {
832*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
833*819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
834*819833afSPeter Tyser 	unsigned long tmp;
835*819833afSPeter Tyser 
836*819833afSPeter Tyser 	if (offset >= size)
837*819833afSPeter Tyser 		return size;
838*819833afSPeter Tyser 	size -= result;
839*819833afSPeter Tyser 	offset &= 31UL;
840*819833afSPeter Tyser 	if(offset) {
841*819833afSPeter Tyser 		/* We hold the little endian value in tmp, but then the
842*819833afSPeter Tyser 		 * shift is illegal. So we could keep a big endian value
843*819833afSPeter Tyser 		 * in tmp, like this:
844*819833afSPeter Tyser 		 *
845*819833afSPeter Tyser 		 * tmp = __swab32(*(p++));
846*819833afSPeter Tyser 		 * tmp |= ~0UL >> (32-offset);
847*819833afSPeter Tyser 		 *
848*819833afSPeter Tyser 		 * but this would decrease preformance, so we change the
849*819833afSPeter Tyser 		 * shift:
850*819833afSPeter Tyser 		 */
851*819833afSPeter Tyser 		tmp = *(p++);
852*819833afSPeter Tyser 		tmp |= __swab32(~0UL >> (32-offset));
853*819833afSPeter Tyser 		if(size < 32)
854*819833afSPeter Tyser 			goto found_first;
855*819833afSPeter Tyser 		if(~tmp)
856*819833afSPeter Tyser 			goto found_middle;
857*819833afSPeter Tyser 		size -= 32;
858*819833afSPeter Tyser 		result += 32;
859*819833afSPeter Tyser 	}
860*819833afSPeter Tyser 	while(size & ~31UL) {
861*819833afSPeter Tyser 		if(~(tmp = *(p++)))
862*819833afSPeter Tyser 			goto found_middle;
863*819833afSPeter Tyser 		result += 32;
864*819833afSPeter Tyser 		size -= 32;
865*819833afSPeter Tyser 	}
866*819833afSPeter Tyser 	if(!size)
867*819833afSPeter Tyser 		return result;
868*819833afSPeter Tyser 	tmp = *p;
869*819833afSPeter Tyser 
870*819833afSPeter Tyser found_first:
871*819833afSPeter Tyser 	/* tmp is little endian, so we would have to swab the shift,
872*819833afSPeter Tyser 	 * see above. But then we have to swab tmp below for ffz, so
873*819833afSPeter Tyser 	 * we might as well do this here.
874*819833afSPeter Tyser 	 */
875*819833afSPeter Tyser 	return result + ffz(__swab32(tmp) | (~0UL << size));
876*819833afSPeter Tyser found_middle:
877*819833afSPeter Tyser 	return result + ffz(__swab32(tmp));
878*819833afSPeter Tyser }
879*819833afSPeter Tyser #else /* !(__MIPSEB__) */
880*819833afSPeter Tyser 
881*819833afSPeter Tyser /* Native ext2 byte ordering, just collapse using defines. */
882*819833afSPeter Tyser #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
883*819833afSPeter Tyser #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
884*819833afSPeter Tyser #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
885*819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
886*819833afSPeter Tyser #define ext2_find_next_zero_bit(addr, size, offset) \
887*819833afSPeter Tyser 		find_next_zero_bit((addr), (size), (offset))
888*819833afSPeter Tyser 
889*819833afSPeter Tyser #endif /* !(__MIPSEB__) */
890*819833afSPeter Tyser 
891*819833afSPeter Tyser /*
892*819833afSPeter Tyser  * Bitmap functions for the minix filesystem.
893*819833afSPeter Tyser  * FIXME: These assume that Minix uses the native byte/bitorder.
894*819833afSPeter Tyser  * This limits the Minix filesystem's value for data exchange very much.
895*819833afSPeter Tyser  */
896*819833afSPeter Tyser #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
897*819833afSPeter Tyser #define minix_set_bit(nr,addr) set_bit(nr,addr)
898*819833afSPeter Tyser #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
899*819833afSPeter Tyser #define minix_test_bit(nr,addr) test_bit(nr,addr)
900*819833afSPeter Tyser #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
901*819833afSPeter Tyser 
902*819833afSPeter Tyser #endif /* _ASM_BITOPS_H */
903