xref: /rk3399_rockchip-uboot/arch/mips/include/asm/bitops.h (revision b810aa1dd1b2c7facf47ca607cd780ec7976fd16)
1819833afSPeter Tyser /*
2819833afSPeter Tyser  * This file is subject to the terms and conditions of the GNU General Public
3819833afSPeter Tyser  * License.  See the file "COPYING" in the main directory of this archive
4819833afSPeter Tyser  * for more details.
5819833afSPeter Tyser  *
6819833afSPeter Tyser  * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
7819833afSPeter Tyser  * Copyright (c) 2000  Silicon Graphics, Inc.
8819833afSPeter Tyser  */
9819833afSPeter Tyser #ifndef _ASM_BITOPS_H
10819833afSPeter Tyser #define _ASM_BITOPS_H
11819833afSPeter Tyser 
12819833afSPeter Tyser #include <linux/types.h>
13819833afSPeter Tyser #include <asm/byteorder.h>		/* sigh ... */
14819833afSPeter Tyser 
15819833afSPeter Tyser #ifdef __KERNEL__
16819833afSPeter Tyser 
17819833afSPeter Tyser #include <asm/sgidefs.h>
18819833afSPeter Tyser #include <asm/system.h>
19819833afSPeter Tyser 
20*b810aa1dSFabio Estevam #include <asm-generic/bitops/fls.h>
21*b810aa1dSFabio Estevam #include <asm-generic/bitops/__fls.h>
22*b810aa1dSFabio Estevam #include <asm-generic/bitops/fls64.h>
23*b810aa1dSFabio Estevam #include <asm-generic/bitops/__ffs.h>
24*b810aa1dSFabio Estevam 
25819833afSPeter Tyser /*
26819833afSPeter Tyser  * clear_bit() doesn't provide any barrier for the compiler.
27819833afSPeter Tyser  */
28819833afSPeter Tyser #define smp_mb__before_clear_bit()	barrier()
29819833afSPeter Tyser #define smp_mb__after_clear_bit()	barrier()
30819833afSPeter Tyser 
31819833afSPeter Tyser /*
32819833afSPeter Tyser  * Only disable interrupt for kernel mode stuff to keep usermode stuff
33819833afSPeter Tyser  * that dares to use kernel include files alive.
34819833afSPeter Tyser  */
35819833afSPeter Tyser #define __bi_flags unsigned long flags
36819833afSPeter Tyser #define __bi_cli() __cli()
37819833afSPeter Tyser #define __bi_save_flags(x) __save_flags(x)
38819833afSPeter Tyser #define __bi_save_and_cli(x) __save_and_cli(x)
39819833afSPeter Tyser #define __bi_restore_flags(x) __restore_flags(x)
40819833afSPeter Tyser #else
41819833afSPeter Tyser #define __bi_flags
42819833afSPeter Tyser #define __bi_cli()
43819833afSPeter Tyser #define __bi_save_flags(x)
44819833afSPeter Tyser #define __bi_save_and_cli(x)
45819833afSPeter Tyser #define __bi_restore_flags(x)
46819833afSPeter Tyser #endif /* __KERNEL__ */
47819833afSPeter Tyser 
48819833afSPeter Tyser #ifdef CONFIG_CPU_HAS_LLSC
49819833afSPeter Tyser 
50819833afSPeter Tyser #include <asm/mipsregs.h>
51819833afSPeter Tyser 
52819833afSPeter Tyser /*
53819833afSPeter Tyser  * These functions for MIPS ISA > 1 are interrupt and SMP proof and
54819833afSPeter Tyser  * interrupt friendly
55819833afSPeter Tyser  */
56819833afSPeter Tyser 
57819833afSPeter Tyser /*
58819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
59819833afSPeter Tyser  * @nr: the bit to set
60819833afSPeter Tyser  * @addr: the address to start counting from
61819833afSPeter Tyser  *
62819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
63819833afSPeter Tyser  * if you do not require the atomic guarantees.
64819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
65819833afSPeter Tyser  * restricted to acting on a single-word quantity.
66819833afSPeter Tyser  */
67819833afSPeter Tyser static __inline__ void
68819833afSPeter Tyser set_bit(int nr, volatile void *addr)
69819833afSPeter Tyser {
70819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
71819833afSPeter Tyser 	unsigned long temp;
72819833afSPeter Tyser 
73819833afSPeter Tyser 	__asm__ __volatile__(
74819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# set_bit\n\t"
75819833afSPeter Tyser 		"or\t%0, %2\n\t"
76819833afSPeter Tyser 		"sc\t%0, %1\n\t"
77819833afSPeter Tyser 		"beqz\t%0, 1b"
78819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
79819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
80819833afSPeter Tyser }
81819833afSPeter Tyser 
82819833afSPeter Tyser /*
83819833afSPeter Tyser  * __set_bit - Set a bit in memory
84819833afSPeter Tyser  * @nr: the bit to set
85819833afSPeter Tyser  * @addr: the address to start counting from
86819833afSPeter Tyser  *
87819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
88819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
89819833afSPeter Tyser  * may be that only one operation succeeds.
90819833afSPeter Tyser  */
91819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
92819833afSPeter Tyser {
93819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
94819833afSPeter Tyser 
95819833afSPeter Tyser 	*m |= 1UL << (nr & 31);
96819833afSPeter Tyser }
97819833afSPeter Tyser #define PLATFORM__SET_BIT
98819833afSPeter Tyser 
99819833afSPeter Tyser /*
100819833afSPeter Tyser  * clear_bit - Clears a bit in memory
101819833afSPeter Tyser  * @nr: Bit to clear
102819833afSPeter Tyser  * @addr: Address to start counting from
103819833afSPeter Tyser  *
104819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
105819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
106819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
107819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
108819833afSPeter Tyser  */
109819833afSPeter Tyser static __inline__ void
110819833afSPeter Tyser clear_bit(int nr, volatile void *addr)
111819833afSPeter Tyser {
112819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
113819833afSPeter Tyser 	unsigned long temp;
114819833afSPeter Tyser 
115819833afSPeter Tyser 	__asm__ __volatile__(
116819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
117819833afSPeter Tyser 		"and\t%0, %2\n\t"
118819833afSPeter Tyser 		"sc\t%0, %1\n\t"
119819833afSPeter Tyser 		"beqz\t%0, 1b\n\t"
120819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
121819833afSPeter Tyser 		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
122819833afSPeter Tyser }
123819833afSPeter Tyser 
124819833afSPeter Tyser /*
125819833afSPeter Tyser  * change_bit - Toggle a bit in memory
126819833afSPeter Tyser  * @nr: Bit to clear
127819833afSPeter Tyser  * @addr: Address to start counting from
128819833afSPeter Tyser  *
129819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
130819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
131819833afSPeter Tyser  * restricted to acting on a single-word quantity.
132819833afSPeter Tyser  */
133819833afSPeter Tyser static __inline__ void
134819833afSPeter Tyser change_bit(int nr, volatile void *addr)
135819833afSPeter Tyser {
136819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
137819833afSPeter Tyser 	unsigned long temp;
138819833afSPeter Tyser 
139819833afSPeter Tyser 	__asm__ __volatile__(
140819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# change_bit\n\t"
141819833afSPeter Tyser 		"xor\t%0, %2\n\t"
142819833afSPeter Tyser 		"sc\t%0, %1\n\t"
143819833afSPeter Tyser 		"beqz\t%0, 1b"
144819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
145819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
146819833afSPeter Tyser }
147819833afSPeter Tyser 
148819833afSPeter Tyser /*
149819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
150819833afSPeter Tyser  * @nr: the bit to set
151819833afSPeter Tyser  * @addr: the address to start counting from
152819833afSPeter Tyser  *
153819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
154819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
155819833afSPeter Tyser  * may be that only one operation succeeds.
156819833afSPeter Tyser  */
157819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
158819833afSPeter Tyser {
159819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
160819833afSPeter Tyser 
161819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
162819833afSPeter Tyser }
163819833afSPeter Tyser 
164819833afSPeter Tyser /*
165819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
166819833afSPeter Tyser  * @nr: Bit to set
167819833afSPeter Tyser  * @addr: Address to count from
168819833afSPeter Tyser  *
169819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
170819833afSPeter Tyser  * It also implies a memory barrier.
171819833afSPeter Tyser  */
172819833afSPeter Tyser static __inline__ int
173819833afSPeter Tyser test_and_set_bit(int nr, volatile void *addr)
174819833afSPeter Tyser {
175819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
176819833afSPeter Tyser 	unsigned long temp, res;
177819833afSPeter Tyser 
178819833afSPeter Tyser 	__asm__ __volatile__(
179819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_set_bit\n"
180819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
181819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
182819833afSPeter Tyser 		"sc\t%2, %1\n\t"
183819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
184819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
185819833afSPeter Tyser 		".set\treorder"
186819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
187819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
188819833afSPeter Tyser 		: "memory");
189819833afSPeter Tyser 
190819833afSPeter Tyser 	return res != 0;
191819833afSPeter Tyser }
192819833afSPeter Tyser 
193819833afSPeter Tyser /*
194819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
195819833afSPeter Tyser  * @nr: Bit to set
196819833afSPeter Tyser  * @addr: Address to count from
197819833afSPeter Tyser  *
198819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
199819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
200819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
201819833afSPeter Tyser  */
202819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
203819833afSPeter Tyser {
204819833afSPeter Tyser 	int mask, retval;
205819833afSPeter Tyser 	volatile int *a = addr;
206819833afSPeter Tyser 
207819833afSPeter Tyser 	a += nr >> 5;
208819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
209819833afSPeter Tyser 	retval = (mask & *a) != 0;
210819833afSPeter Tyser 	*a |= mask;
211819833afSPeter Tyser 
212819833afSPeter Tyser 	return retval;
213819833afSPeter Tyser }
214819833afSPeter Tyser 
215819833afSPeter Tyser /*
216819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
217819833afSPeter Tyser  * @nr: Bit to set
218819833afSPeter Tyser  * @addr: Address to count from
219819833afSPeter Tyser  *
220819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
221819833afSPeter Tyser  * It also implies a memory barrier.
222819833afSPeter Tyser  */
223819833afSPeter Tyser static __inline__ int
224819833afSPeter Tyser test_and_clear_bit(int nr, volatile void *addr)
225819833afSPeter Tyser {
226819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
227819833afSPeter Tyser 	unsigned long temp, res;
228819833afSPeter Tyser 
229819833afSPeter Tyser 	__asm__ __volatile__(
230819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_clear_bit\n"
231819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
232819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
233819833afSPeter Tyser 		"xor\t%2, %3\n\t"
234819833afSPeter Tyser 		"sc\t%2, %1\n\t"
235819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
236819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
237819833afSPeter Tyser 		".set\treorder"
238819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
239819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
240819833afSPeter Tyser 		: "memory");
241819833afSPeter Tyser 
242819833afSPeter Tyser 	return res != 0;
243819833afSPeter Tyser }
244819833afSPeter Tyser 
245819833afSPeter Tyser /*
246819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
247819833afSPeter Tyser  * @nr: Bit to set
248819833afSPeter Tyser  * @addr: Address to count from
249819833afSPeter Tyser  *
250819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
251819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
252819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
253819833afSPeter Tyser  */
254819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
255819833afSPeter Tyser {
256819833afSPeter Tyser 	int	mask, retval;
257819833afSPeter Tyser 	volatile int	*a = addr;
258819833afSPeter Tyser 
259819833afSPeter Tyser 	a += nr >> 5;
260819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
261819833afSPeter Tyser 	retval = (mask & *a) != 0;
262819833afSPeter Tyser 	*a &= ~mask;
263819833afSPeter Tyser 
264819833afSPeter Tyser 	return retval;
265819833afSPeter Tyser }
266819833afSPeter Tyser 
267819833afSPeter Tyser /*
268819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
269819833afSPeter Tyser  * @nr: Bit to set
270819833afSPeter Tyser  * @addr: Address to count from
271819833afSPeter Tyser  *
272819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
273819833afSPeter Tyser  * It also implies a memory barrier.
274819833afSPeter Tyser  */
275819833afSPeter Tyser static __inline__ int
276819833afSPeter Tyser test_and_change_bit(int nr, volatile void *addr)
277819833afSPeter Tyser {
278819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
279819833afSPeter Tyser 	unsigned long temp, res;
280819833afSPeter Tyser 
281819833afSPeter Tyser 	__asm__ __volatile__(
282819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_change_bit\n"
283819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
284819833afSPeter Tyser 		"xor\t%2, %0, %3\n\t"
285819833afSPeter Tyser 		"sc\t%2, %1\n\t"
286819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
287819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
288819833afSPeter Tyser 		".set\treorder"
289819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
290819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
291819833afSPeter Tyser 		: "memory");
292819833afSPeter Tyser 
293819833afSPeter Tyser 	return res != 0;
294819833afSPeter Tyser }
295819833afSPeter Tyser 
296819833afSPeter Tyser /*
297819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
298819833afSPeter Tyser  * @nr: Bit to set
299819833afSPeter Tyser  * @addr: Address to count from
300819833afSPeter Tyser  *
301819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
302819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
303819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
304819833afSPeter Tyser  */
305819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
306819833afSPeter Tyser {
307819833afSPeter Tyser 	int	mask, retval;
308819833afSPeter Tyser 	volatile int	*a = addr;
309819833afSPeter Tyser 
310819833afSPeter Tyser 	a += nr >> 5;
311819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
312819833afSPeter Tyser 	retval = (mask & *a) != 0;
313819833afSPeter Tyser 	*a ^= mask;
314819833afSPeter Tyser 
315819833afSPeter Tyser 	return retval;
316819833afSPeter Tyser }
317819833afSPeter Tyser 
318819833afSPeter Tyser #else /* MIPS I */
319819833afSPeter Tyser 
320819833afSPeter Tyser /*
321819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
322819833afSPeter Tyser  * @nr: the bit to set
323819833afSPeter Tyser  * @addr: the address to start counting from
324819833afSPeter Tyser  *
325819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
326819833afSPeter Tyser  * if you do not require the atomic guarantees.
327819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
328819833afSPeter Tyser  * restricted to acting on a single-word quantity.
329819833afSPeter Tyser  */
330819833afSPeter Tyser static __inline__ void set_bit(int nr, volatile void * addr)
331819833afSPeter Tyser {
332819833afSPeter Tyser 	int	mask;
333819833afSPeter Tyser 	volatile int	*a = addr;
334819833afSPeter Tyser 	__bi_flags;
335819833afSPeter Tyser 
336819833afSPeter Tyser 	a += nr >> 5;
337819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
338819833afSPeter Tyser 	__bi_save_and_cli(flags);
339819833afSPeter Tyser 	*a |= mask;
340819833afSPeter Tyser 	__bi_restore_flags(flags);
341819833afSPeter Tyser }
342819833afSPeter Tyser 
343819833afSPeter Tyser /*
344819833afSPeter Tyser  * __set_bit - Set a bit in memory
345819833afSPeter Tyser  * @nr: the bit to set
346819833afSPeter Tyser  * @addr: the address to start counting from
347819833afSPeter Tyser  *
348819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
349819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
350819833afSPeter Tyser  * may be that only one operation succeeds.
351819833afSPeter Tyser  */
352819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
353819833afSPeter Tyser {
354819833afSPeter Tyser 	int	mask;
355819833afSPeter Tyser 	volatile int	*a = addr;
356819833afSPeter Tyser 
357819833afSPeter Tyser 	a += nr >> 5;
358819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
359819833afSPeter Tyser 	*a |= mask;
360819833afSPeter Tyser }
361819833afSPeter Tyser 
362819833afSPeter Tyser /*
363819833afSPeter Tyser  * clear_bit - Clears a bit in memory
364819833afSPeter Tyser  * @nr: Bit to clear
365819833afSPeter Tyser  * @addr: Address to start counting from
366819833afSPeter Tyser  *
367819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
368819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
369819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
370819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
371819833afSPeter Tyser  */
372819833afSPeter Tyser static __inline__ void clear_bit(int nr, volatile void * addr)
373819833afSPeter Tyser {
374819833afSPeter Tyser 	int	mask;
375819833afSPeter Tyser 	volatile int	*a = addr;
376819833afSPeter Tyser 	__bi_flags;
377819833afSPeter Tyser 
378819833afSPeter Tyser 	a += nr >> 5;
379819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
380819833afSPeter Tyser 	__bi_save_and_cli(flags);
381819833afSPeter Tyser 	*a &= ~mask;
382819833afSPeter Tyser 	__bi_restore_flags(flags);
383819833afSPeter Tyser }
384819833afSPeter Tyser 
385819833afSPeter Tyser /*
386819833afSPeter Tyser  * change_bit - Toggle a bit in memory
387819833afSPeter Tyser  * @nr: Bit to clear
388819833afSPeter Tyser  * @addr: Address to start counting from
389819833afSPeter Tyser  *
390819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
391819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
392819833afSPeter Tyser  * restricted to acting on a single-word quantity.
393819833afSPeter Tyser  */
394819833afSPeter Tyser static __inline__ void change_bit(int nr, volatile void * addr)
395819833afSPeter Tyser {
396819833afSPeter Tyser 	int	mask;
397819833afSPeter Tyser 	volatile int	*a = addr;
398819833afSPeter Tyser 	__bi_flags;
399819833afSPeter Tyser 
400819833afSPeter Tyser 	a += nr >> 5;
401819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
402819833afSPeter Tyser 	__bi_save_and_cli(flags);
403819833afSPeter Tyser 	*a ^= mask;
404819833afSPeter Tyser 	__bi_restore_flags(flags);
405819833afSPeter Tyser }
406819833afSPeter Tyser 
407819833afSPeter Tyser /*
408819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
409819833afSPeter Tyser  * @nr: the bit to set
410819833afSPeter Tyser  * @addr: the address to start counting from
411819833afSPeter Tyser  *
412819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
413819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
414819833afSPeter Tyser  * may be that only one operation succeeds.
415819833afSPeter Tyser  */
416819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
417819833afSPeter Tyser {
418819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
419819833afSPeter Tyser 
420819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
421819833afSPeter Tyser }
422819833afSPeter Tyser 
423819833afSPeter Tyser /*
424819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
425819833afSPeter Tyser  * @nr: Bit to set
426819833afSPeter Tyser  * @addr: Address to count from
427819833afSPeter Tyser  *
428819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
429819833afSPeter Tyser  * It also implies a memory barrier.
430819833afSPeter Tyser  */
431819833afSPeter Tyser static __inline__ int test_and_set_bit(int nr, volatile void * addr)
432819833afSPeter Tyser {
433819833afSPeter Tyser 	int	mask, retval;
434819833afSPeter Tyser 	volatile int	*a = addr;
435819833afSPeter Tyser 	__bi_flags;
436819833afSPeter Tyser 
437819833afSPeter Tyser 	a += nr >> 5;
438819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
439819833afSPeter Tyser 	__bi_save_and_cli(flags);
440819833afSPeter Tyser 	retval = (mask & *a) != 0;
441819833afSPeter Tyser 	*a |= mask;
442819833afSPeter Tyser 	__bi_restore_flags(flags);
443819833afSPeter Tyser 
444819833afSPeter Tyser 	return retval;
445819833afSPeter Tyser }
446819833afSPeter Tyser 
447819833afSPeter Tyser /*
448819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
449819833afSPeter Tyser  * @nr: Bit to set
450819833afSPeter Tyser  * @addr: Address to count from
451819833afSPeter Tyser  *
452819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
453819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
454819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
455819833afSPeter Tyser  */
456819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
457819833afSPeter Tyser {
458819833afSPeter Tyser 	int	mask, retval;
459819833afSPeter Tyser 	volatile int	*a = addr;
460819833afSPeter Tyser 
461819833afSPeter Tyser 	a += nr >> 5;
462819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
463819833afSPeter Tyser 	retval = (mask & *a) != 0;
464819833afSPeter Tyser 	*a |= mask;
465819833afSPeter Tyser 
466819833afSPeter Tyser 	return retval;
467819833afSPeter Tyser }
468819833afSPeter Tyser 
469819833afSPeter Tyser /*
470819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
471819833afSPeter Tyser  * @nr: Bit to set
472819833afSPeter Tyser  * @addr: Address to count from
473819833afSPeter Tyser  *
474819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
475819833afSPeter Tyser  * It also implies a memory barrier.
476819833afSPeter Tyser  */
477819833afSPeter Tyser static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
478819833afSPeter Tyser {
479819833afSPeter Tyser 	int	mask, retval;
480819833afSPeter Tyser 	volatile int	*a = addr;
481819833afSPeter Tyser 	__bi_flags;
482819833afSPeter Tyser 
483819833afSPeter Tyser 	a += nr >> 5;
484819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
485819833afSPeter Tyser 	__bi_save_and_cli(flags);
486819833afSPeter Tyser 	retval = (mask & *a) != 0;
487819833afSPeter Tyser 	*a &= ~mask;
488819833afSPeter Tyser 	__bi_restore_flags(flags);
489819833afSPeter Tyser 
490819833afSPeter Tyser 	return retval;
491819833afSPeter Tyser }
492819833afSPeter Tyser 
493819833afSPeter Tyser /*
494819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
495819833afSPeter Tyser  * @nr: Bit to set
496819833afSPeter Tyser  * @addr: Address to count from
497819833afSPeter Tyser  *
498819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
499819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
500819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
501819833afSPeter Tyser  */
502819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
503819833afSPeter Tyser {
504819833afSPeter Tyser 	int	mask, retval;
505819833afSPeter Tyser 	volatile int	*a = addr;
506819833afSPeter Tyser 
507819833afSPeter Tyser 	a += nr >> 5;
508819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
509819833afSPeter Tyser 	retval = (mask & *a) != 0;
510819833afSPeter Tyser 	*a &= ~mask;
511819833afSPeter Tyser 
512819833afSPeter Tyser 	return retval;
513819833afSPeter Tyser }
514819833afSPeter Tyser 
515819833afSPeter Tyser /*
516819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
517819833afSPeter Tyser  * @nr: Bit to set
518819833afSPeter Tyser  * @addr: Address to count from
519819833afSPeter Tyser  *
520819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
521819833afSPeter Tyser  * It also implies a memory barrier.
522819833afSPeter Tyser  */
523819833afSPeter Tyser static __inline__ int test_and_change_bit(int nr, volatile void * addr)
524819833afSPeter Tyser {
525819833afSPeter Tyser 	int	mask, retval;
526819833afSPeter Tyser 	volatile int	*a = addr;
527819833afSPeter Tyser 	__bi_flags;
528819833afSPeter Tyser 
529819833afSPeter Tyser 	a += nr >> 5;
530819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
531819833afSPeter Tyser 	__bi_save_and_cli(flags);
532819833afSPeter Tyser 	retval = (mask & *a) != 0;
533819833afSPeter Tyser 	*a ^= mask;
534819833afSPeter Tyser 	__bi_restore_flags(flags);
535819833afSPeter Tyser 
536819833afSPeter Tyser 	return retval;
537819833afSPeter Tyser }
538819833afSPeter Tyser 
539819833afSPeter Tyser /*
540819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
541819833afSPeter Tyser  * @nr: Bit to set
542819833afSPeter Tyser  * @addr: Address to count from
543819833afSPeter Tyser  *
544819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
545819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
546819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
547819833afSPeter Tyser  */
548819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
549819833afSPeter Tyser {
550819833afSPeter Tyser 	int	mask, retval;
551819833afSPeter Tyser 	volatile int	*a = addr;
552819833afSPeter Tyser 
553819833afSPeter Tyser 	a += nr >> 5;
554819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
555819833afSPeter Tyser 	retval = (mask & *a) != 0;
556819833afSPeter Tyser 	*a ^= mask;
557819833afSPeter Tyser 
558819833afSPeter Tyser 	return retval;
559819833afSPeter Tyser }
560819833afSPeter Tyser 
561819833afSPeter Tyser #undef __bi_flags
562819833afSPeter Tyser #undef __bi_cli
563819833afSPeter Tyser #undef __bi_save_flags
564819833afSPeter Tyser #undef __bi_restore_flags
565819833afSPeter Tyser 
566819833afSPeter Tyser #endif /* MIPS I */
567819833afSPeter Tyser 
568819833afSPeter Tyser /*
569819833afSPeter Tyser  * test_bit - Determine whether a bit is set
570819833afSPeter Tyser  * @nr: bit number to test
571819833afSPeter Tyser  * @addr: Address to start counting from
572819833afSPeter Tyser  */
573ea40a054SDaniel Schwierzeck static __inline__ int test_bit(int nr, const volatile void *addr)
574819833afSPeter Tyser {
575819833afSPeter Tyser 	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
576819833afSPeter Tyser }
577819833afSPeter Tyser 
578819833afSPeter Tyser #ifndef __MIPSEB__
579819833afSPeter Tyser 
580819833afSPeter Tyser /* Little endian versions. */
581819833afSPeter Tyser 
582819833afSPeter Tyser /*
583819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
584819833afSPeter Tyser  * @addr: The address to start the search at
585819833afSPeter Tyser  * @size: The maximum size to search
586819833afSPeter Tyser  *
587819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
588819833afSPeter Tyser  * containing a bit.
589819833afSPeter Tyser  */
590819833afSPeter Tyser static __inline__ int find_first_zero_bit (void *addr, unsigned size)
591819833afSPeter Tyser {
592819833afSPeter Tyser 	unsigned long dummy;
593819833afSPeter Tyser 	int res;
594819833afSPeter Tyser 
595819833afSPeter Tyser 	if (!size)
596819833afSPeter Tyser 		return 0;
597819833afSPeter Tyser 
598819833afSPeter Tyser 	__asm__ (".set\tnoreorder\n\t"
599819833afSPeter Tyser 		".set\tnoat\n"
600819833afSPeter Tyser 		"1:\tsubu\t$1,%6,%0\n\t"
601819833afSPeter Tyser 		"blez\t$1,2f\n\t"
602819833afSPeter Tyser 		"lw\t$1,(%5)\n\t"
603819833afSPeter Tyser 		"addiu\t%5,4\n\t"
604819833afSPeter Tyser #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
605819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
606819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
607819833afSPeter Tyser 		"beql\t%1,$1,1b\n\t"
608819833afSPeter Tyser 		"addiu\t%0,32\n\t"
609819833afSPeter Tyser #else
610819833afSPeter Tyser 		"addiu\t%0,32\n\t"
611819833afSPeter Tyser 		"beq\t%1,$1,1b\n\t"
612819833afSPeter Tyser 		"nop\n\t"
613819833afSPeter Tyser 		"subu\t%0,32\n\t"
614819833afSPeter Tyser #endif
615819833afSPeter Tyser #ifdef __MIPSEB__
616819833afSPeter Tyser #error "Fix this for big endian"
617819833afSPeter Tyser #endif /* __MIPSEB__ */
618819833afSPeter Tyser 		"li\t%1,1\n"
619819833afSPeter Tyser 		"1:\tand\t%2,$1,%1\n\t"
620819833afSPeter Tyser 		"beqz\t%2,2f\n\t"
621819833afSPeter Tyser 		"sll\t%1,%1,1\n\t"
622819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
623819833afSPeter Tyser 		"add\t%0,%0,1\n\t"
624819833afSPeter Tyser 		".set\tat\n\t"
625819833afSPeter Tyser 		".set\treorder\n"
626819833afSPeter Tyser 		"2:"
627819833afSPeter Tyser 		: "=r" (res), "=r" (dummy), "=r" (addr)
628819833afSPeter Tyser 		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
629819833afSPeter Tyser 		  "2" (addr), "r" (size)
630819833afSPeter Tyser 		: "$1");
631819833afSPeter Tyser 
632819833afSPeter Tyser 	return res;
633819833afSPeter Tyser }
634819833afSPeter Tyser 
635819833afSPeter Tyser /*
636819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
637819833afSPeter Tyser  * @addr: The address to base the search on
638819833afSPeter Tyser  * @offset: The bitnumber to start searching at
639819833afSPeter Tyser  * @size: The maximum size to search
640819833afSPeter Tyser  */
641819833afSPeter Tyser static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
642819833afSPeter Tyser {
643819833afSPeter Tyser 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
644819833afSPeter Tyser 	int set = 0, bit = offset & 31, res;
645819833afSPeter Tyser 	unsigned long dummy;
646819833afSPeter Tyser 
647819833afSPeter Tyser 	if (bit) {
648819833afSPeter Tyser 		/*
649819833afSPeter Tyser 		 * Look for zero in first byte
650819833afSPeter Tyser 		 */
651819833afSPeter Tyser #ifdef __MIPSEB__
652819833afSPeter Tyser #error "Fix this for big endian byte order"
653819833afSPeter Tyser #endif
654819833afSPeter Tyser 		__asm__(".set\tnoreorder\n\t"
655819833afSPeter Tyser 			".set\tnoat\n"
656819833afSPeter Tyser 			"1:\tand\t$1,%4,%1\n\t"
657819833afSPeter Tyser 			"beqz\t$1,1f\n\t"
658819833afSPeter Tyser 			"sll\t%1,%1,1\n\t"
659819833afSPeter Tyser 			"bnez\t%1,1b\n\t"
660819833afSPeter Tyser 			"addiu\t%0,1\n\t"
661819833afSPeter Tyser 			".set\tat\n\t"
662819833afSPeter Tyser 			".set\treorder\n"
663819833afSPeter Tyser 			"1:"
664819833afSPeter Tyser 			: "=r" (set), "=r" (dummy)
665819833afSPeter Tyser 			: "0" (0), "1" (1 << bit), "r" (*p)
666819833afSPeter Tyser 			: "$1");
667819833afSPeter Tyser 		if (set < (32 - bit))
668819833afSPeter Tyser 			return set + offset;
669819833afSPeter Tyser 		set = 32 - bit;
670819833afSPeter Tyser 		p++;
671819833afSPeter Tyser 	}
672819833afSPeter Tyser 	/*
673819833afSPeter Tyser 	 * No zero yet, search remaining full bytes for a zero
674819833afSPeter Tyser 	 */
675819833afSPeter Tyser 	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
676819833afSPeter Tyser 	return offset + set + res;
677819833afSPeter Tyser }
678819833afSPeter Tyser 
679819833afSPeter Tyser #endif /* !(__MIPSEB__) */
680819833afSPeter Tyser 
681819833afSPeter Tyser /*
682819833afSPeter Tyser  * ffz - find first zero in word.
683819833afSPeter Tyser  * @word: The word to search
684819833afSPeter Tyser  *
685819833afSPeter Tyser  * Undefined if no zero exists, so code should check against ~0UL first.
686819833afSPeter Tyser  */
687819833afSPeter Tyser static __inline__ unsigned long ffz(unsigned long word)
688819833afSPeter Tyser {
689819833afSPeter Tyser 	unsigned int	__res;
690819833afSPeter Tyser 	unsigned int	mask = 1;
691819833afSPeter Tyser 
692819833afSPeter Tyser 	__asm__ (
693819833afSPeter Tyser 		".set\tnoreorder\n\t"
694819833afSPeter Tyser 		".set\tnoat\n\t"
695819833afSPeter Tyser 		"move\t%0,$0\n"
696819833afSPeter Tyser 		"1:\tand\t$1,%2,%1\n\t"
697819833afSPeter Tyser 		"beqz\t$1,2f\n\t"
698819833afSPeter Tyser 		"sll\t%1,1\n\t"
699819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
700819833afSPeter Tyser 		"addiu\t%0,1\n\t"
701819833afSPeter Tyser 		".set\tat\n\t"
702819833afSPeter Tyser 		".set\treorder\n"
703819833afSPeter Tyser 		"2:\n\t"
704819833afSPeter Tyser 		: "=&r" (__res), "=r" (mask)
705819833afSPeter Tyser 		: "r" (word), "1" (mask)
706819833afSPeter Tyser 		: "$1");
707819833afSPeter Tyser 
708819833afSPeter Tyser 	return __res;
709819833afSPeter Tyser }
710819833afSPeter Tyser 
711819833afSPeter Tyser #ifdef __KERNEL__
712819833afSPeter Tyser 
713819833afSPeter Tyser /*
714819833afSPeter Tyser  * hweightN - returns the hamming weight of a N-bit word
715819833afSPeter Tyser  * @x: the word to weigh
716819833afSPeter Tyser  *
717819833afSPeter Tyser  * The Hamming Weight of a number is the total number of bits set in it.
718819833afSPeter Tyser  */
719819833afSPeter Tyser 
720819833afSPeter Tyser #define hweight32(x) generic_hweight32(x)
721819833afSPeter Tyser #define hweight16(x) generic_hweight16(x)
722819833afSPeter Tyser #define hweight8(x) generic_hweight8(x)
723819833afSPeter Tyser 
724819833afSPeter Tyser #endif /* __KERNEL__ */
725819833afSPeter Tyser 
726819833afSPeter Tyser #ifdef __MIPSEB__
727819833afSPeter Tyser /*
728819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
729819833afSPeter Tyser  * @addr: The address to base the search on
730819833afSPeter Tyser  * @offset: The bitnumber to start searching at
731819833afSPeter Tyser  * @size: The maximum size to search
732819833afSPeter Tyser  */
733819833afSPeter Tyser static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
734819833afSPeter Tyser {
735819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
736819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
737819833afSPeter Tyser 	unsigned long tmp;
738819833afSPeter Tyser 
739819833afSPeter Tyser 	if (offset >= size)
740819833afSPeter Tyser 		return size;
741819833afSPeter Tyser 	size -= result;
742819833afSPeter Tyser 	offset &= 31UL;
743819833afSPeter Tyser 	if (offset) {
744819833afSPeter Tyser 		tmp = *(p++);
745819833afSPeter Tyser 		tmp |= ~0UL >> (32-offset);
746819833afSPeter Tyser 		if (size < 32)
747819833afSPeter Tyser 			goto found_first;
748819833afSPeter Tyser 		if (~tmp)
749819833afSPeter Tyser 			goto found_middle;
750819833afSPeter Tyser 		size -= 32;
751819833afSPeter Tyser 		result += 32;
752819833afSPeter Tyser 	}
753819833afSPeter Tyser 	while (size & ~31UL) {
754819833afSPeter Tyser 		if (~(tmp = *(p++)))
755819833afSPeter Tyser 			goto found_middle;
756819833afSPeter Tyser 		result += 32;
757819833afSPeter Tyser 		size -= 32;
758819833afSPeter Tyser 	}
759819833afSPeter Tyser 	if (!size)
760819833afSPeter Tyser 		return result;
761819833afSPeter Tyser 	tmp = *p;
762819833afSPeter Tyser 
763819833afSPeter Tyser found_first:
764819833afSPeter Tyser 	tmp |= ~0UL << size;
765819833afSPeter Tyser found_middle:
766819833afSPeter Tyser 	return result + ffz(tmp);
767819833afSPeter Tyser }
768819833afSPeter Tyser 
769819833afSPeter Tyser /* Linus sez that gcc can optimize the following correctly, we'll see if this
770819833afSPeter Tyser  * holds on the Sparc as it does for the ALPHA.
771819833afSPeter Tyser  */
772819833afSPeter Tyser 
773819833afSPeter Tyser #if 0 /* Fool kernel-doc since it doesn't do macros yet */
774819833afSPeter Tyser /*
775819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
776819833afSPeter Tyser  * @addr: The address to start the search at
777819833afSPeter Tyser  * @size: The maximum size to search
778819833afSPeter Tyser  *
779819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
780819833afSPeter Tyser  * containing a bit.
781819833afSPeter Tyser  */
782819833afSPeter Tyser static int find_first_zero_bit (void *addr, unsigned size);
783819833afSPeter Tyser #endif
784819833afSPeter Tyser 
785819833afSPeter Tyser #define find_first_zero_bit(addr, size) \
786819833afSPeter Tyser 	find_next_zero_bit((addr), (size), 0)
787819833afSPeter Tyser 
788819833afSPeter Tyser #endif /* (__MIPSEB__) */
789819833afSPeter Tyser 
790819833afSPeter Tyser /* Now for the ext2 filesystem bit operations and helper routines. */
791819833afSPeter Tyser 
792819833afSPeter Tyser #ifdef __MIPSEB__
793819833afSPeter Tyser static __inline__ int ext2_set_bit(int nr, void * addr)
794819833afSPeter Tyser {
795819833afSPeter Tyser 	int		mask, retval, flags;
796819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
797819833afSPeter Tyser 
798819833afSPeter Tyser 	ADDR += nr >> 3;
799819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
800819833afSPeter Tyser 	save_and_cli(flags);
801819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
802819833afSPeter Tyser 	*ADDR |= mask;
803819833afSPeter Tyser 	restore_flags(flags);
804819833afSPeter Tyser 	return retval;
805819833afSPeter Tyser }
806819833afSPeter Tyser 
807819833afSPeter Tyser static __inline__ int ext2_clear_bit(int nr, void * addr)
808819833afSPeter Tyser {
809819833afSPeter Tyser 	int		mask, retval, flags;
810819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
811819833afSPeter Tyser 
812819833afSPeter Tyser 	ADDR += nr >> 3;
813819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
814819833afSPeter Tyser 	save_and_cli(flags);
815819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
816819833afSPeter Tyser 	*ADDR &= ~mask;
817819833afSPeter Tyser 	restore_flags(flags);
818819833afSPeter Tyser 	return retval;
819819833afSPeter Tyser }
820819833afSPeter Tyser 
821819833afSPeter Tyser static __inline__ int ext2_test_bit(int nr, const void * addr)
822819833afSPeter Tyser {
823819833afSPeter Tyser 	int			mask;
824819833afSPeter Tyser 	const unsigned char	*ADDR = (const unsigned char *) addr;
825819833afSPeter Tyser 
826819833afSPeter Tyser 	ADDR += nr >> 3;
827819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
828819833afSPeter Tyser 	return ((mask & *ADDR) != 0);
829819833afSPeter Tyser }
830819833afSPeter Tyser 
831819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) \
832819833afSPeter Tyser 	ext2_find_next_zero_bit((addr), (size), 0)
833819833afSPeter Tyser 
834819833afSPeter Tyser static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
835819833afSPeter Tyser {
836819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
837819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
838819833afSPeter Tyser 	unsigned long tmp;
839819833afSPeter Tyser 
840819833afSPeter Tyser 	if (offset >= size)
841819833afSPeter Tyser 		return size;
842819833afSPeter Tyser 	size -= result;
843819833afSPeter Tyser 	offset &= 31UL;
844819833afSPeter Tyser 	if(offset) {
845819833afSPeter Tyser 		/* We hold the little endian value in tmp, but then the
846819833afSPeter Tyser 		 * shift is illegal. So we could keep a big endian value
847819833afSPeter Tyser 		 * in tmp, like this:
848819833afSPeter Tyser 		 *
849819833afSPeter Tyser 		 * tmp = __swab32(*(p++));
850819833afSPeter Tyser 		 * tmp |= ~0UL >> (32-offset);
851819833afSPeter Tyser 		 *
852819833afSPeter Tyser 		 * but this would decrease preformance, so we change the
853819833afSPeter Tyser 		 * shift:
854819833afSPeter Tyser 		 */
855819833afSPeter Tyser 		tmp = *(p++);
856819833afSPeter Tyser 		tmp |= __swab32(~0UL >> (32-offset));
857819833afSPeter Tyser 		if(size < 32)
858819833afSPeter Tyser 			goto found_first;
859819833afSPeter Tyser 		if(~tmp)
860819833afSPeter Tyser 			goto found_middle;
861819833afSPeter Tyser 		size -= 32;
862819833afSPeter Tyser 		result += 32;
863819833afSPeter Tyser 	}
864819833afSPeter Tyser 	while(size & ~31UL) {
865819833afSPeter Tyser 		if(~(tmp = *(p++)))
866819833afSPeter Tyser 			goto found_middle;
867819833afSPeter Tyser 		result += 32;
868819833afSPeter Tyser 		size -= 32;
869819833afSPeter Tyser 	}
870819833afSPeter Tyser 	if(!size)
871819833afSPeter Tyser 		return result;
872819833afSPeter Tyser 	tmp = *p;
873819833afSPeter Tyser 
874819833afSPeter Tyser found_first:
875819833afSPeter Tyser 	/* tmp is little endian, so we would have to swab the shift,
876819833afSPeter Tyser 	 * see above. But then we have to swab tmp below for ffz, so
877819833afSPeter Tyser 	 * we might as well do this here.
878819833afSPeter Tyser 	 */
879819833afSPeter Tyser 	return result + ffz(__swab32(tmp) | (~0UL << size));
880819833afSPeter Tyser found_middle:
881819833afSPeter Tyser 	return result + ffz(__swab32(tmp));
882819833afSPeter Tyser }
883819833afSPeter Tyser #else /* !(__MIPSEB__) */
884819833afSPeter Tyser 
885819833afSPeter Tyser /* Native ext2 byte ordering, just collapse using defines. */
886819833afSPeter Tyser #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
887819833afSPeter Tyser #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
888819833afSPeter Tyser #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
889819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
890819833afSPeter Tyser #define ext2_find_next_zero_bit(addr, size, offset) \
891819833afSPeter Tyser 		find_next_zero_bit((addr), (size), (offset))
892819833afSPeter Tyser 
893819833afSPeter Tyser #endif /* !(__MIPSEB__) */
894819833afSPeter Tyser 
895819833afSPeter Tyser /*
896819833afSPeter Tyser  * Bitmap functions for the minix filesystem.
897819833afSPeter Tyser  * FIXME: These assume that Minix uses the native byte/bitorder.
898819833afSPeter Tyser  * This limits the Minix filesystem's value for data exchange very much.
899819833afSPeter Tyser  */
900819833afSPeter Tyser #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
901819833afSPeter Tyser #define minix_set_bit(nr,addr) set_bit(nr,addr)
902819833afSPeter Tyser #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
903819833afSPeter Tyser #define minix_test_bit(nr,addr) test_bit(nr,addr)
904819833afSPeter Tyser #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
905819833afSPeter Tyser 
906819833afSPeter Tyser #endif /* _ASM_BITOPS_H */
907