xref: /rk3399_rockchip-uboot/arch/mips/include/asm/bitops.h (revision ea40a05422bdc87a7af5dc349e8adce59f982e72)
1819833afSPeter Tyser /*
2819833afSPeter Tyser  * This file is subject to the terms and conditions of the GNU General Public
3819833afSPeter Tyser  * License.  See the file "COPYING" in the main directory of this archive
4819833afSPeter Tyser  * for more details.
5819833afSPeter Tyser  *
6819833afSPeter Tyser  * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
7819833afSPeter Tyser  * Copyright (c) 2000  Silicon Graphics, Inc.
8819833afSPeter Tyser  */
9819833afSPeter Tyser #ifndef _ASM_BITOPS_H
10819833afSPeter Tyser #define _ASM_BITOPS_H
11819833afSPeter Tyser 
12819833afSPeter Tyser #include <linux/types.h>
13819833afSPeter Tyser #include <asm/byteorder.h>		/* sigh ... */
14819833afSPeter Tyser 
15819833afSPeter Tyser #ifdef __KERNEL__
16819833afSPeter Tyser 
17819833afSPeter Tyser #include <asm/sgidefs.h>
18819833afSPeter Tyser #include <asm/system.h>
19819833afSPeter Tyser #include <linux/config.h>
20819833afSPeter Tyser 
21819833afSPeter Tyser /*
22819833afSPeter Tyser  * clear_bit() doesn't provide any barrier for the compiler.
23819833afSPeter Tyser  */
24819833afSPeter Tyser #define smp_mb__before_clear_bit()	barrier()
25819833afSPeter Tyser #define smp_mb__after_clear_bit()	barrier()
26819833afSPeter Tyser 
27819833afSPeter Tyser /*
28819833afSPeter Tyser  * Only disable interrupt for kernel mode stuff to keep usermode stuff
29819833afSPeter Tyser  * that dares to use kernel include files alive.
30819833afSPeter Tyser  */
31819833afSPeter Tyser #define __bi_flags unsigned long flags
32819833afSPeter Tyser #define __bi_cli() __cli()
33819833afSPeter Tyser #define __bi_save_flags(x) __save_flags(x)
34819833afSPeter Tyser #define __bi_save_and_cli(x) __save_and_cli(x)
35819833afSPeter Tyser #define __bi_restore_flags(x) __restore_flags(x)
36819833afSPeter Tyser #else
37819833afSPeter Tyser #define __bi_flags
38819833afSPeter Tyser #define __bi_cli()
39819833afSPeter Tyser #define __bi_save_flags(x)
40819833afSPeter Tyser #define __bi_save_and_cli(x)
41819833afSPeter Tyser #define __bi_restore_flags(x)
42819833afSPeter Tyser #endif /* __KERNEL__ */
43819833afSPeter Tyser 
44819833afSPeter Tyser #ifdef CONFIG_CPU_HAS_LLSC
45819833afSPeter Tyser 
46819833afSPeter Tyser #include <asm/mipsregs.h>
47819833afSPeter Tyser 
48819833afSPeter Tyser /*
49819833afSPeter Tyser  * These functions for MIPS ISA > 1 are interrupt and SMP proof and
50819833afSPeter Tyser  * interrupt friendly
51819833afSPeter Tyser  */
52819833afSPeter Tyser 
53819833afSPeter Tyser /*
54819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
55819833afSPeter Tyser  * @nr: the bit to set
56819833afSPeter Tyser  * @addr: the address to start counting from
57819833afSPeter Tyser  *
58819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
59819833afSPeter Tyser  * if you do not require the atomic guarantees.
60819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
61819833afSPeter Tyser  * restricted to acting on a single-word quantity.
62819833afSPeter Tyser  */
63819833afSPeter Tyser static __inline__ void
64819833afSPeter Tyser set_bit(int nr, volatile void *addr)
65819833afSPeter Tyser {
66819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
67819833afSPeter Tyser 	unsigned long temp;
68819833afSPeter Tyser 
69819833afSPeter Tyser 	__asm__ __volatile__(
70819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# set_bit\n\t"
71819833afSPeter Tyser 		"or\t%0, %2\n\t"
72819833afSPeter Tyser 		"sc\t%0, %1\n\t"
73819833afSPeter Tyser 		"beqz\t%0, 1b"
74819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
75819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
76819833afSPeter Tyser }
77819833afSPeter Tyser 
78819833afSPeter Tyser /*
79819833afSPeter Tyser  * __set_bit - Set a bit in memory
80819833afSPeter Tyser  * @nr: the bit to set
81819833afSPeter Tyser  * @addr: the address to start counting from
82819833afSPeter Tyser  *
83819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
84819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
85819833afSPeter Tyser  * may be that only one operation succeeds.
86819833afSPeter Tyser  */
87819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
88819833afSPeter Tyser {
89819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
90819833afSPeter Tyser 
91819833afSPeter Tyser 	*m |= 1UL << (nr & 31);
92819833afSPeter Tyser }
93819833afSPeter Tyser #define PLATFORM__SET_BIT
94819833afSPeter Tyser 
95819833afSPeter Tyser /*
96819833afSPeter Tyser  * clear_bit - Clears a bit in memory
97819833afSPeter Tyser  * @nr: Bit to clear
98819833afSPeter Tyser  * @addr: Address to start counting from
99819833afSPeter Tyser  *
100819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
101819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
102819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
103819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
104819833afSPeter Tyser  */
105819833afSPeter Tyser static __inline__ void
106819833afSPeter Tyser clear_bit(int nr, volatile void *addr)
107819833afSPeter Tyser {
108819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
109819833afSPeter Tyser 	unsigned long temp;
110819833afSPeter Tyser 
111819833afSPeter Tyser 	__asm__ __volatile__(
112819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
113819833afSPeter Tyser 		"and\t%0, %2\n\t"
114819833afSPeter Tyser 		"sc\t%0, %1\n\t"
115819833afSPeter Tyser 		"beqz\t%0, 1b\n\t"
116819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
117819833afSPeter Tyser 		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
118819833afSPeter Tyser }
119819833afSPeter Tyser 
120819833afSPeter Tyser /*
121819833afSPeter Tyser  * change_bit - Toggle a bit in memory
122819833afSPeter Tyser  * @nr: Bit to clear
123819833afSPeter Tyser  * @addr: Address to start counting from
124819833afSPeter Tyser  *
125819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
126819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
127819833afSPeter Tyser  * restricted to acting on a single-word quantity.
128819833afSPeter Tyser  */
129819833afSPeter Tyser static __inline__ void
130819833afSPeter Tyser change_bit(int nr, volatile void *addr)
131819833afSPeter Tyser {
132819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
133819833afSPeter Tyser 	unsigned long temp;
134819833afSPeter Tyser 
135819833afSPeter Tyser 	__asm__ __volatile__(
136819833afSPeter Tyser 		"1:\tll\t%0, %1\t\t# change_bit\n\t"
137819833afSPeter Tyser 		"xor\t%0, %2\n\t"
138819833afSPeter Tyser 		"sc\t%0, %1\n\t"
139819833afSPeter Tyser 		"beqz\t%0, 1b"
140819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m)
141819833afSPeter Tyser 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
142819833afSPeter Tyser }
143819833afSPeter Tyser 
144819833afSPeter Tyser /*
145819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
146819833afSPeter Tyser  * @nr: the bit to set
147819833afSPeter Tyser  * @addr: the address to start counting from
148819833afSPeter Tyser  *
149819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
150819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
151819833afSPeter Tyser  * may be that only one operation succeeds.
152819833afSPeter Tyser  */
153819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
154819833afSPeter Tyser {
155819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
156819833afSPeter Tyser 
157819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
158819833afSPeter Tyser }
159819833afSPeter Tyser 
160819833afSPeter Tyser /*
161819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
162819833afSPeter Tyser  * @nr: Bit to set
163819833afSPeter Tyser  * @addr: Address to count from
164819833afSPeter Tyser  *
165819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
166819833afSPeter Tyser  * It also implies a memory barrier.
167819833afSPeter Tyser  */
168819833afSPeter Tyser static __inline__ int
169819833afSPeter Tyser test_and_set_bit(int nr, volatile void *addr)
170819833afSPeter Tyser {
171819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
172819833afSPeter Tyser 	unsigned long temp, res;
173819833afSPeter Tyser 
174819833afSPeter Tyser 	__asm__ __volatile__(
175819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_set_bit\n"
176819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
177819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
178819833afSPeter Tyser 		"sc\t%2, %1\n\t"
179819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
180819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
181819833afSPeter Tyser 		".set\treorder"
182819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
183819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
184819833afSPeter Tyser 		: "memory");
185819833afSPeter Tyser 
186819833afSPeter Tyser 	return res != 0;
187819833afSPeter Tyser }
188819833afSPeter Tyser 
189819833afSPeter Tyser /*
190819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
191819833afSPeter Tyser  * @nr: Bit to set
192819833afSPeter Tyser  * @addr: Address to count from
193819833afSPeter Tyser  *
194819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
195819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
196819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
197819833afSPeter Tyser  */
198819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
199819833afSPeter Tyser {
200819833afSPeter Tyser 	int mask, retval;
201819833afSPeter Tyser 	volatile int *a = addr;
202819833afSPeter Tyser 
203819833afSPeter Tyser 	a += nr >> 5;
204819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
205819833afSPeter Tyser 	retval = (mask & *a) != 0;
206819833afSPeter Tyser 	*a |= mask;
207819833afSPeter Tyser 
208819833afSPeter Tyser 	return retval;
209819833afSPeter Tyser }
210819833afSPeter Tyser 
211819833afSPeter Tyser /*
212819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
213819833afSPeter Tyser  * @nr: Bit to set
214819833afSPeter Tyser  * @addr: Address to count from
215819833afSPeter Tyser  *
216819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
217819833afSPeter Tyser  * It also implies a memory barrier.
218819833afSPeter Tyser  */
219819833afSPeter Tyser static __inline__ int
220819833afSPeter Tyser test_and_clear_bit(int nr, volatile void *addr)
221819833afSPeter Tyser {
222819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
223819833afSPeter Tyser 	unsigned long temp, res;
224819833afSPeter Tyser 
225819833afSPeter Tyser 	__asm__ __volatile__(
226819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_clear_bit\n"
227819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
228819833afSPeter Tyser 		"or\t%2, %0, %3\n\t"
229819833afSPeter Tyser 		"xor\t%2, %3\n\t"
230819833afSPeter Tyser 		"sc\t%2, %1\n\t"
231819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
232819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
233819833afSPeter Tyser 		".set\treorder"
234819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
235819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
236819833afSPeter Tyser 		: "memory");
237819833afSPeter Tyser 
238819833afSPeter Tyser 	return res != 0;
239819833afSPeter Tyser }
240819833afSPeter Tyser 
241819833afSPeter Tyser /*
242819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
243819833afSPeter Tyser  * @nr: Bit to set
244819833afSPeter Tyser  * @addr: Address to count from
245819833afSPeter Tyser  *
246819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
247819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
248819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
249819833afSPeter Tyser  */
250819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
251819833afSPeter Tyser {
252819833afSPeter Tyser 	int	mask, retval;
253819833afSPeter Tyser 	volatile int	*a = addr;
254819833afSPeter Tyser 
255819833afSPeter Tyser 	a += nr >> 5;
256819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
257819833afSPeter Tyser 	retval = (mask & *a) != 0;
258819833afSPeter Tyser 	*a &= ~mask;
259819833afSPeter Tyser 
260819833afSPeter Tyser 	return retval;
261819833afSPeter Tyser }
262819833afSPeter Tyser 
263819833afSPeter Tyser /*
264819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
265819833afSPeter Tyser  * @nr: Bit to set
266819833afSPeter Tyser  * @addr: Address to count from
267819833afSPeter Tyser  *
268819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
269819833afSPeter Tyser  * It also implies a memory barrier.
270819833afSPeter Tyser  */
271819833afSPeter Tyser static __inline__ int
272819833afSPeter Tyser test_and_change_bit(int nr, volatile void *addr)
273819833afSPeter Tyser {
274819833afSPeter Tyser 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
275819833afSPeter Tyser 	unsigned long temp, res;
276819833afSPeter Tyser 
277819833afSPeter Tyser 	__asm__ __volatile__(
278819833afSPeter Tyser 		".set\tnoreorder\t\t# test_and_change_bit\n"
279819833afSPeter Tyser 		"1:\tll\t%0, %1\n\t"
280819833afSPeter Tyser 		"xor\t%2, %0, %3\n\t"
281819833afSPeter Tyser 		"sc\t%2, %1\n\t"
282819833afSPeter Tyser 		"beqz\t%2, 1b\n\t"
283819833afSPeter Tyser 		" and\t%2, %0, %3\n\t"
284819833afSPeter Tyser 		".set\treorder"
285819833afSPeter Tyser 		: "=&r" (temp), "=m" (*m), "=&r" (res)
286819833afSPeter Tyser 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
287819833afSPeter Tyser 		: "memory");
288819833afSPeter Tyser 
289819833afSPeter Tyser 	return res != 0;
290819833afSPeter Tyser }
291819833afSPeter Tyser 
292819833afSPeter Tyser /*
293819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
294819833afSPeter Tyser  * @nr: Bit to set
295819833afSPeter Tyser  * @addr: Address to count from
296819833afSPeter Tyser  *
297819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
298819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
299819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
300819833afSPeter Tyser  */
301819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
302819833afSPeter Tyser {
303819833afSPeter Tyser 	int	mask, retval;
304819833afSPeter Tyser 	volatile int	*a = addr;
305819833afSPeter Tyser 
306819833afSPeter Tyser 	a += nr >> 5;
307819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
308819833afSPeter Tyser 	retval = (mask & *a) != 0;
309819833afSPeter Tyser 	*a ^= mask;
310819833afSPeter Tyser 
311819833afSPeter Tyser 	return retval;
312819833afSPeter Tyser }
313819833afSPeter Tyser 
314819833afSPeter Tyser #else /* MIPS I */
315819833afSPeter Tyser 
316819833afSPeter Tyser /*
317819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
318819833afSPeter Tyser  * @nr: the bit to set
319819833afSPeter Tyser  * @addr: the address to start counting from
320819833afSPeter Tyser  *
321819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
322819833afSPeter Tyser  * if you do not require the atomic guarantees.
323819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
324819833afSPeter Tyser  * restricted to acting on a single-word quantity.
325819833afSPeter Tyser  */
326819833afSPeter Tyser static __inline__ void set_bit(int nr, volatile void * addr)
327819833afSPeter Tyser {
328819833afSPeter Tyser 	int	mask;
329819833afSPeter Tyser 	volatile int	*a = addr;
330819833afSPeter Tyser 	__bi_flags;
331819833afSPeter Tyser 
332819833afSPeter Tyser 	a += nr >> 5;
333819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
334819833afSPeter Tyser 	__bi_save_and_cli(flags);
335819833afSPeter Tyser 	*a |= mask;
336819833afSPeter Tyser 	__bi_restore_flags(flags);
337819833afSPeter Tyser }
338819833afSPeter Tyser 
339819833afSPeter Tyser /*
340819833afSPeter Tyser  * __set_bit - Set a bit in memory
341819833afSPeter Tyser  * @nr: the bit to set
342819833afSPeter Tyser  * @addr: the address to start counting from
343819833afSPeter Tyser  *
344819833afSPeter Tyser  * Unlike set_bit(), this function is non-atomic and may be reordered.
345819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
346819833afSPeter Tyser  * may be that only one operation succeeds.
347819833afSPeter Tyser  */
348819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
349819833afSPeter Tyser {
350819833afSPeter Tyser 	int	mask;
351819833afSPeter Tyser 	volatile int	*a = addr;
352819833afSPeter Tyser 
353819833afSPeter Tyser 	a += nr >> 5;
354819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
355819833afSPeter Tyser 	*a |= mask;
356819833afSPeter Tyser }
357819833afSPeter Tyser 
358819833afSPeter Tyser /*
359819833afSPeter Tyser  * clear_bit - Clears a bit in memory
360819833afSPeter Tyser  * @nr: Bit to clear
361819833afSPeter Tyser  * @addr: Address to start counting from
362819833afSPeter Tyser  *
363819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
364819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
365819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
366819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
367819833afSPeter Tyser  */
368819833afSPeter Tyser static __inline__ void clear_bit(int nr, volatile void * addr)
369819833afSPeter Tyser {
370819833afSPeter Tyser 	int	mask;
371819833afSPeter Tyser 	volatile int	*a = addr;
372819833afSPeter Tyser 	__bi_flags;
373819833afSPeter Tyser 
374819833afSPeter Tyser 	a += nr >> 5;
375819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
376819833afSPeter Tyser 	__bi_save_and_cli(flags);
377819833afSPeter Tyser 	*a &= ~mask;
378819833afSPeter Tyser 	__bi_restore_flags(flags);
379819833afSPeter Tyser }
380819833afSPeter Tyser 
381819833afSPeter Tyser /*
382819833afSPeter Tyser  * change_bit - Toggle a bit in memory
383819833afSPeter Tyser  * @nr: Bit to clear
384819833afSPeter Tyser  * @addr: Address to start counting from
385819833afSPeter Tyser  *
386819833afSPeter Tyser  * change_bit() is atomic and may not be reordered.
387819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
388819833afSPeter Tyser  * restricted to acting on a single-word quantity.
389819833afSPeter Tyser  */
390819833afSPeter Tyser static __inline__ void change_bit(int nr, volatile void * addr)
391819833afSPeter Tyser {
392819833afSPeter Tyser 	int	mask;
393819833afSPeter Tyser 	volatile int	*a = addr;
394819833afSPeter Tyser 	__bi_flags;
395819833afSPeter Tyser 
396819833afSPeter Tyser 	a += nr >> 5;
397819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
398819833afSPeter Tyser 	__bi_save_and_cli(flags);
399819833afSPeter Tyser 	*a ^= mask;
400819833afSPeter Tyser 	__bi_restore_flags(flags);
401819833afSPeter Tyser }
402819833afSPeter Tyser 
403819833afSPeter Tyser /*
404819833afSPeter Tyser  * __change_bit - Toggle a bit in memory
405819833afSPeter Tyser  * @nr: the bit to set
406819833afSPeter Tyser  * @addr: the address to start counting from
407819833afSPeter Tyser  *
408819833afSPeter Tyser  * Unlike change_bit(), this function is non-atomic and may be reordered.
409819833afSPeter Tyser  * If it's called on the same region of memory simultaneously, the effect
410819833afSPeter Tyser  * may be that only one operation succeeds.
411819833afSPeter Tyser  */
412819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
413819833afSPeter Tyser {
414819833afSPeter Tyser 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
415819833afSPeter Tyser 
416819833afSPeter Tyser 	*m ^= 1UL << (nr & 31);
417819833afSPeter Tyser }
418819833afSPeter Tyser 
419819833afSPeter Tyser /*
420819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
421819833afSPeter Tyser  * @nr: Bit to set
422819833afSPeter Tyser  * @addr: Address to count from
423819833afSPeter Tyser  *
424819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
425819833afSPeter Tyser  * It also implies a memory barrier.
426819833afSPeter Tyser  */
427819833afSPeter Tyser static __inline__ int test_and_set_bit(int nr, volatile void * addr)
428819833afSPeter Tyser {
429819833afSPeter Tyser 	int	mask, retval;
430819833afSPeter Tyser 	volatile int	*a = addr;
431819833afSPeter Tyser 	__bi_flags;
432819833afSPeter Tyser 
433819833afSPeter Tyser 	a += nr >> 5;
434819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
435819833afSPeter Tyser 	__bi_save_and_cli(flags);
436819833afSPeter Tyser 	retval = (mask & *a) != 0;
437819833afSPeter Tyser 	*a |= mask;
438819833afSPeter Tyser 	__bi_restore_flags(flags);
439819833afSPeter Tyser 
440819833afSPeter Tyser 	return retval;
441819833afSPeter Tyser }
442819833afSPeter Tyser 
443819833afSPeter Tyser /*
444819833afSPeter Tyser  * __test_and_set_bit - Set a bit and return its old value
445819833afSPeter Tyser  * @nr: Bit to set
446819833afSPeter Tyser  * @addr: Address to count from
447819833afSPeter Tyser  *
448819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
449819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
450819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
451819833afSPeter Tyser  */
452819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
453819833afSPeter Tyser {
454819833afSPeter Tyser 	int	mask, retval;
455819833afSPeter Tyser 	volatile int	*a = addr;
456819833afSPeter Tyser 
457819833afSPeter Tyser 	a += nr >> 5;
458819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
459819833afSPeter Tyser 	retval = (mask & *a) != 0;
460819833afSPeter Tyser 	*a |= mask;
461819833afSPeter Tyser 
462819833afSPeter Tyser 	return retval;
463819833afSPeter Tyser }
464819833afSPeter Tyser 
465819833afSPeter Tyser /*
466819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
467819833afSPeter Tyser  * @nr: Bit to set
468819833afSPeter Tyser  * @addr: Address to count from
469819833afSPeter Tyser  *
470819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
471819833afSPeter Tyser  * It also implies a memory barrier.
472819833afSPeter Tyser  */
473819833afSPeter Tyser static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
474819833afSPeter Tyser {
475819833afSPeter Tyser 	int	mask, retval;
476819833afSPeter Tyser 	volatile int	*a = addr;
477819833afSPeter Tyser 	__bi_flags;
478819833afSPeter Tyser 
479819833afSPeter Tyser 	a += nr >> 5;
480819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
481819833afSPeter Tyser 	__bi_save_and_cli(flags);
482819833afSPeter Tyser 	retval = (mask & *a) != 0;
483819833afSPeter Tyser 	*a &= ~mask;
484819833afSPeter Tyser 	__bi_restore_flags(flags);
485819833afSPeter Tyser 
486819833afSPeter Tyser 	return retval;
487819833afSPeter Tyser }
488819833afSPeter Tyser 
489819833afSPeter Tyser /*
490819833afSPeter Tyser  * __test_and_clear_bit - Clear a bit and return its old value
491819833afSPeter Tyser  * @nr: Bit to set
492819833afSPeter Tyser  * @addr: Address to count from
493819833afSPeter Tyser  *
494819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
495819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
496819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
497819833afSPeter Tyser  */
498819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
499819833afSPeter Tyser {
500819833afSPeter Tyser 	int	mask, retval;
501819833afSPeter Tyser 	volatile int	*a = addr;
502819833afSPeter Tyser 
503819833afSPeter Tyser 	a += nr >> 5;
504819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
505819833afSPeter Tyser 	retval = (mask & *a) != 0;
506819833afSPeter Tyser 	*a &= ~mask;
507819833afSPeter Tyser 
508819833afSPeter Tyser 	return retval;
509819833afSPeter Tyser }
510819833afSPeter Tyser 
511819833afSPeter Tyser /*
512819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its new value
513819833afSPeter Tyser  * @nr: Bit to set
514819833afSPeter Tyser  * @addr: Address to count from
515819833afSPeter Tyser  *
516819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
517819833afSPeter Tyser  * It also implies a memory barrier.
518819833afSPeter Tyser  */
519819833afSPeter Tyser static __inline__ int test_and_change_bit(int nr, volatile void * addr)
520819833afSPeter Tyser {
521819833afSPeter Tyser 	int	mask, retval;
522819833afSPeter Tyser 	volatile int	*a = addr;
523819833afSPeter Tyser 	__bi_flags;
524819833afSPeter Tyser 
525819833afSPeter Tyser 	a += nr >> 5;
526819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
527819833afSPeter Tyser 	__bi_save_and_cli(flags);
528819833afSPeter Tyser 	retval = (mask & *a) != 0;
529819833afSPeter Tyser 	*a ^= mask;
530819833afSPeter Tyser 	__bi_restore_flags(flags);
531819833afSPeter Tyser 
532819833afSPeter Tyser 	return retval;
533819833afSPeter Tyser }
534819833afSPeter Tyser 
535819833afSPeter Tyser /*
536819833afSPeter Tyser  * __test_and_change_bit - Change a bit and return its old value
537819833afSPeter Tyser  * @nr: Bit to set
538819833afSPeter Tyser  * @addr: Address to count from
539819833afSPeter Tyser  *
540819833afSPeter Tyser  * This operation is non-atomic and can be reordered.
541819833afSPeter Tyser  * If two examples of this operation race, one can appear to succeed
542819833afSPeter Tyser  * but actually fail.  You must protect multiple accesses with a lock.
543819833afSPeter Tyser  */
544819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
545819833afSPeter Tyser {
546819833afSPeter Tyser 	int	mask, retval;
547819833afSPeter Tyser 	volatile int	*a = addr;
548819833afSPeter Tyser 
549819833afSPeter Tyser 	a += nr >> 5;
550819833afSPeter Tyser 	mask = 1 << (nr & 0x1f);
551819833afSPeter Tyser 	retval = (mask & *a) != 0;
552819833afSPeter Tyser 	*a ^= mask;
553819833afSPeter Tyser 
554819833afSPeter Tyser 	return retval;
555819833afSPeter Tyser }
556819833afSPeter Tyser 
557819833afSPeter Tyser #undef __bi_flags
558819833afSPeter Tyser #undef __bi_cli
559819833afSPeter Tyser #undef __bi_save_flags
560819833afSPeter Tyser #undef __bi_restore_flags
561819833afSPeter Tyser 
562819833afSPeter Tyser #endif /* MIPS I */
563819833afSPeter Tyser 
564819833afSPeter Tyser /*
565819833afSPeter Tyser  * test_bit - Determine whether a bit is set
566819833afSPeter Tyser  * @nr: bit number to test
567819833afSPeter Tyser  * @addr: Address to start counting from
568819833afSPeter Tyser  */
569*ea40a054SDaniel Schwierzeck static __inline__ int test_bit(int nr, const volatile void *addr)
570819833afSPeter Tyser {
571819833afSPeter Tyser 	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
572819833afSPeter Tyser }
573819833afSPeter Tyser 
574819833afSPeter Tyser #ifndef __MIPSEB__
575819833afSPeter Tyser 
576819833afSPeter Tyser /* Little endian versions. */
577819833afSPeter Tyser 
578819833afSPeter Tyser /*
579819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
580819833afSPeter Tyser  * @addr: The address to start the search at
581819833afSPeter Tyser  * @size: The maximum size to search
582819833afSPeter Tyser  *
583819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
584819833afSPeter Tyser  * containing a bit.
585819833afSPeter Tyser  */
586819833afSPeter Tyser static __inline__ int find_first_zero_bit (void *addr, unsigned size)
587819833afSPeter Tyser {
588819833afSPeter Tyser 	unsigned long dummy;
589819833afSPeter Tyser 	int res;
590819833afSPeter Tyser 
591819833afSPeter Tyser 	if (!size)
592819833afSPeter Tyser 		return 0;
593819833afSPeter Tyser 
594819833afSPeter Tyser 	__asm__ (".set\tnoreorder\n\t"
595819833afSPeter Tyser 		".set\tnoat\n"
596819833afSPeter Tyser 		"1:\tsubu\t$1,%6,%0\n\t"
597819833afSPeter Tyser 		"blez\t$1,2f\n\t"
598819833afSPeter Tyser 		"lw\t$1,(%5)\n\t"
599819833afSPeter Tyser 		"addiu\t%5,4\n\t"
600819833afSPeter Tyser #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
601819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
602819833afSPeter Tyser     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
603819833afSPeter Tyser 		"beql\t%1,$1,1b\n\t"
604819833afSPeter Tyser 		"addiu\t%0,32\n\t"
605819833afSPeter Tyser #else
606819833afSPeter Tyser 		"addiu\t%0,32\n\t"
607819833afSPeter Tyser 		"beq\t%1,$1,1b\n\t"
608819833afSPeter Tyser 		"nop\n\t"
609819833afSPeter Tyser 		"subu\t%0,32\n\t"
610819833afSPeter Tyser #endif
611819833afSPeter Tyser #ifdef __MIPSEB__
612819833afSPeter Tyser #error "Fix this for big endian"
613819833afSPeter Tyser #endif /* __MIPSEB__ */
614819833afSPeter Tyser 		"li\t%1,1\n"
615819833afSPeter Tyser 		"1:\tand\t%2,$1,%1\n\t"
616819833afSPeter Tyser 		"beqz\t%2,2f\n\t"
617819833afSPeter Tyser 		"sll\t%1,%1,1\n\t"
618819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
619819833afSPeter Tyser 		"add\t%0,%0,1\n\t"
620819833afSPeter Tyser 		".set\tat\n\t"
621819833afSPeter Tyser 		".set\treorder\n"
622819833afSPeter Tyser 		"2:"
623819833afSPeter Tyser 		: "=r" (res), "=r" (dummy), "=r" (addr)
624819833afSPeter Tyser 		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
625819833afSPeter Tyser 		  "2" (addr), "r" (size)
626819833afSPeter Tyser 		: "$1");
627819833afSPeter Tyser 
628819833afSPeter Tyser 	return res;
629819833afSPeter Tyser }
630819833afSPeter Tyser 
631819833afSPeter Tyser /*
632819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
633819833afSPeter Tyser  * @addr: The address to base the search on
634819833afSPeter Tyser  * @offset: The bitnumber to start searching at
635819833afSPeter Tyser  * @size: The maximum size to search
636819833afSPeter Tyser  */
637819833afSPeter Tyser static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
638819833afSPeter Tyser {
639819833afSPeter Tyser 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
640819833afSPeter Tyser 	int set = 0, bit = offset & 31, res;
641819833afSPeter Tyser 	unsigned long dummy;
642819833afSPeter Tyser 
643819833afSPeter Tyser 	if (bit) {
644819833afSPeter Tyser 		/*
645819833afSPeter Tyser 		 * Look for zero in first byte
646819833afSPeter Tyser 		 */
647819833afSPeter Tyser #ifdef __MIPSEB__
648819833afSPeter Tyser #error "Fix this for big endian byte order"
649819833afSPeter Tyser #endif
650819833afSPeter Tyser 		__asm__(".set\tnoreorder\n\t"
651819833afSPeter Tyser 			".set\tnoat\n"
652819833afSPeter Tyser 			"1:\tand\t$1,%4,%1\n\t"
653819833afSPeter Tyser 			"beqz\t$1,1f\n\t"
654819833afSPeter Tyser 			"sll\t%1,%1,1\n\t"
655819833afSPeter Tyser 			"bnez\t%1,1b\n\t"
656819833afSPeter Tyser 			"addiu\t%0,1\n\t"
657819833afSPeter Tyser 			".set\tat\n\t"
658819833afSPeter Tyser 			".set\treorder\n"
659819833afSPeter Tyser 			"1:"
660819833afSPeter Tyser 			: "=r" (set), "=r" (dummy)
661819833afSPeter Tyser 			: "0" (0), "1" (1 << bit), "r" (*p)
662819833afSPeter Tyser 			: "$1");
663819833afSPeter Tyser 		if (set < (32 - bit))
664819833afSPeter Tyser 			return set + offset;
665819833afSPeter Tyser 		set = 32 - bit;
666819833afSPeter Tyser 		p++;
667819833afSPeter Tyser 	}
668819833afSPeter Tyser 	/*
669819833afSPeter Tyser 	 * No zero yet, search remaining full bytes for a zero
670819833afSPeter Tyser 	 */
671819833afSPeter Tyser 	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
672819833afSPeter Tyser 	return offset + set + res;
673819833afSPeter Tyser }
674819833afSPeter Tyser 
675819833afSPeter Tyser #endif /* !(__MIPSEB__) */
676819833afSPeter Tyser 
677819833afSPeter Tyser /*
678819833afSPeter Tyser  * ffz - find first zero in word.
679819833afSPeter Tyser  * @word: The word to search
680819833afSPeter Tyser  *
681819833afSPeter Tyser  * Undefined if no zero exists, so code should check against ~0UL first.
682819833afSPeter Tyser  */
683819833afSPeter Tyser static __inline__ unsigned long ffz(unsigned long word)
684819833afSPeter Tyser {
685819833afSPeter Tyser 	unsigned int	__res;
686819833afSPeter Tyser 	unsigned int	mask = 1;
687819833afSPeter Tyser 
688819833afSPeter Tyser 	__asm__ (
689819833afSPeter Tyser 		".set\tnoreorder\n\t"
690819833afSPeter Tyser 		".set\tnoat\n\t"
691819833afSPeter Tyser 		"move\t%0,$0\n"
692819833afSPeter Tyser 		"1:\tand\t$1,%2,%1\n\t"
693819833afSPeter Tyser 		"beqz\t$1,2f\n\t"
694819833afSPeter Tyser 		"sll\t%1,1\n\t"
695819833afSPeter Tyser 		"bnez\t%1,1b\n\t"
696819833afSPeter Tyser 		"addiu\t%0,1\n\t"
697819833afSPeter Tyser 		".set\tat\n\t"
698819833afSPeter Tyser 		".set\treorder\n"
699819833afSPeter Tyser 		"2:\n\t"
700819833afSPeter Tyser 		: "=&r" (__res), "=r" (mask)
701819833afSPeter Tyser 		: "r" (word), "1" (mask)
702819833afSPeter Tyser 		: "$1");
703819833afSPeter Tyser 
704819833afSPeter Tyser 	return __res;
705819833afSPeter Tyser }
706819833afSPeter Tyser 
707819833afSPeter Tyser #ifdef __KERNEL__
708819833afSPeter Tyser 
709819833afSPeter Tyser /*
710819833afSPeter Tyser  * hweightN - returns the hamming weight of a N-bit word
711819833afSPeter Tyser  * @x: the word to weigh
712819833afSPeter Tyser  *
713819833afSPeter Tyser  * The Hamming Weight of a number is the total number of bits set in it.
714819833afSPeter Tyser  */
715819833afSPeter Tyser 
716819833afSPeter Tyser #define hweight32(x) generic_hweight32(x)
717819833afSPeter Tyser #define hweight16(x) generic_hweight16(x)
718819833afSPeter Tyser #define hweight8(x) generic_hweight8(x)
719819833afSPeter Tyser 
720819833afSPeter Tyser #endif /* __KERNEL__ */
721819833afSPeter Tyser 
722819833afSPeter Tyser #ifdef __MIPSEB__
723819833afSPeter Tyser /*
724819833afSPeter Tyser  * find_next_zero_bit - find the first zero bit in a memory region
725819833afSPeter Tyser  * @addr: The address to base the search on
726819833afSPeter Tyser  * @offset: The bitnumber to start searching at
727819833afSPeter Tyser  * @size: The maximum size to search
728819833afSPeter Tyser  */
729819833afSPeter Tyser static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
730819833afSPeter Tyser {
731819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
732819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
733819833afSPeter Tyser 	unsigned long tmp;
734819833afSPeter Tyser 
735819833afSPeter Tyser 	if (offset >= size)
736819833afSPeter Tyser 		return size;
737819833afSPeter Tyser 	size -= result;
738819833afSPeter Tyser 	offset &= 31UL;
739819833afSPeter Tyser 	if (offset) {
740819833afSPeter Tyser 		tmp = *(p++);
741819833afSPeter Tyser 		tmp |= ~0UL >> (32-offset);
742819833afSPeter Tyser 		if (size < 32)
743819833afSPeter Tyser 			goto found_first;
744819833afSPeter Tyser 		if (~tmp)
745819833afSPeter Tyser 			goto found_middle;
746819833afSPeter Tyser 		size -= 32;
747819833afSPeter Tyser 		result += 32;
748819833afSPeter Tyser 	}
749819833afSPeter Tyser 	while (size & ~31UL) {
750819833afSPeter Tyser 		if (~(tmp = *(p++)))
751819833afSPeter Tyser 			goto found_middle;
752819833afSPeter Tyser 		result += 32;
753819833afSPeter Tyser 		size -= 32;
754819833afSPeter Tyser 	}
755819833afSPeter Tyser 	if (!size)
756819833afSPeter Tyser 		return result;
757819833afSPeter Tyser 	tmp = *p;
758819833afSPeter Tyser 
759819833afSPeter Tyser found_first:
760819833afSPeter Tyser 	tmp |= ~0UL << size;
761819833afSPeter Tyser found_middle:
762819833afSPeter Tyser 	return result + ffz(tmp);
763819833afSPeter Tyser }
764819833afSPeter Tyser 
765819833afSPeter Tyser /* Linus sez that gcc can optimize the following correctly, we'll see if this
766819833afSPeter Tyser  * holds on the Sparc as it does for the ALPHA.
767819833afSPeter Tyser  */
768819833afSPeter Tyser 
769819833afSPeter Tyser #if 0 /* Fool kernel-doc since it doesn't do macros yet */
770819833afSPeter Tyser /*
771819833afSPeter Tyser  * find_first_zero_bit - find the first zero bit in a memory region
772819833afSPeter Tyser  * @addr: The address to start the search at
773819833afSPeter Tyser  * @size: The maximum size to search
774819833afSPeter Tyser  *
775819833afSPeter Tyser  * Returns the bit-number of the first zero bit, not the number of the byte
776819833afSPeter Tyser  * containing a bit.
777819833afSPeter Tyser  */
778819833afSPeter Tyser static int find_first_zero_bit (void *addr, unsigned size);
779819833afSPeter Tyser #endif
780819833afSPeter Tyser 
781819833afSPeter Tyser #define find_first_zero_bit(addr, size) \
782819833afSPeter Tyser 	find_next_zero_bit((addr), (size), 0)
783819833afSPeter Tyser 
784819833afSPeter Tyser #endif /* (__MIPSEB__) */
785819833afSPeter Tyser 
786819833afSPeter Tyser /* Now for the ext2 filesystem bit operations and helper routines. */
787819833afSPeter Tyser 
788819833afSPeter Tyser #ifdef __MIPSEB__
789819833afSPeter Tyser static __inline__ int ext2_set_bit(int nr, void * addr)
790819833afSPeter Tyser {
791819833afSPeter Tyser 	int		mask, retval, flags;
792819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
793819833afSPeter Tyser 
794819833afSPeter Tyser 	ADDR += nr >> 3;
795819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
796819833afSPeter Tyser 	save_and_cli(flags);
797819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
798819833afSPeter Tyser 	*ADDR |= mask;
799819833afSPeter Tyser 	restore_flags(flags);
800819833afSPeter Tyser 	return retval;
801819833afSPeter Tyser }
802819833afSPeter Tyser 
803819833afSPeter Tyser static __inline__ int ext2_clear_bit(int nr, void * addr)
804819833afSPeter Tyser {
805819833afSPeter Tyser 	int		mask, retval, flags;
806819833afSPeter Tyser 	unsigned char	*ADDR = (unsigned char *) addr;
807819833afSPeter Tyser 
808819833afSPeter Tyser 	ADDR += nr >> 3;
809819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
810819833afSPeter Tyser 	save_and_cli(flags);
811819833afSPeter Tyser 	retval = (mask & *ADDR) != 0;
812819833afSPeter Tyser 	*ADDR &= ~mask;
813819833afSPeter Tyser 	restore_flags(flags);
814819833afSPeter Tyser 	return retval;
815819833afSPeter Tyser }
816819833afSPeter Tyser 
817819833afSPeter Tyser static __inline__ int ext2_test_bit(int nr, const void * addr)
818819833afSPeter Tyser {
819819833afSPeter Tyser 	int			mask;
820819833afSPeter Tyser 	const unsigned char	*ADDR = (const unsigned char *) addr;
821819833afSPeter Tyser 
822819833afSPeter Tyser 	ADDR += nr >> 3;
823819833afSPeter Tyser 	mask = 1 << (nr & 0x07);
824819833afSPeter Tyser 	return ((mask & *ADDR) != 0);
825819833afSPeter Tyser }
826819833afSPeter Tyser 
827819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) \
828819833afSPeter Tyser 	ext2_find_next_zero_bit((addr), (size), 0)
829819833afSPeter Tyser 
830819833afSPeter Tyser static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
831819833afSPeter Tyser {
832819833afSPeter Tyser 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
833819833afSPeter Tyser 	unsigned long result = offset & ~31UL;
834819833afSPeter Tyser 	unsigned long tmp;
835819833afSPeter Tyser 
836819833afSPeter Tyser 	if (offset >= size)
837819833afSPeter Tyser 		return size;
838819833afSPeter Tyser 	size -= result;
839819833afSPeter Tyser 	offset &= 31UL;
840819833afSPeter Tyser 	if(offset) {
841819833afSPeter Tyser 		/* We hold the little endian value in tmp, but then the
842819833afSPeter Tyser 		 * shift is illegal. So we could keep a big endian value
843819833afSPeter Tyser 		 * in tmp, like this:
844819833afSPeter Tyser 		 *
845819833afSPeter Tyser 		 * tmp = __swab32(*(p++));
846819833afSPeter Tyser 		 * tmp |= ~0UL >> (32-offset);
847819833afSPeter Tyser 		 *
848819833afSPeter Tyser 		 * but this would decrease preformance, so we change the
849819833afSPeter Tyser 		 * shift:
850819833afSPeter Tyser 		 */
851819833afSPeter Tyser 		tmp = *(p++);
852819833afSPeter Tyser 		tmp |= __swab32(~0UL >> (32-offset));
853819833afSPeter Tyser 		if(size < 32)
854819833afSPeter Tyser 			goto found_first;
855819833afSPeter Tyser 		if(~tmp)
856819833afSPeter Tyser 			goto found_middle;
857819833afSPeter Tyser 		size -= 32;
858819833afSPeter Tyser 		result += 32;
859819833afSPeter Tyser 	}
860819833afSPeter Tyser 	while(size & ~31UL) {
861819833afSPeter Tyser 		if(~(tmp = *(p++)))
862819833afSPeter Tyser 			goto found_middle;
863819833afSPeter Tyser 		result += 32;
864819833afSPeter Tyser 		size -= 32;
865819833afSPeter Tyser 	}
866819833afSPeter Tyser 	if(!size)
867819833afSPeter Tyser 		return result;
868819833afSPeter Tyser 	tmp = *p;
869819833afSPeter Tyser 
870819833afSPeter Tyser found_first:
871819833afSPeter Tyser 	/* tmp is little endian, so we would have to swab the shift,
872819833afSPeter Tyser 	 * see above. But then we have to swab tmp below for ffz, so
873819833afSPeter Tyser 	 * we might as well do this here.
874819833afSPeter Tyser 	 */
875819833afSPeter Tyser 	return result + ffz(__swab32(tmp) | (~0UL << size));
876819833afSPeter Tyser found_middle:
877819833afSPeter Tyser 	return result + ffz(__swab32(tmp));
878819833afSPeter Tyser }
879819833afSPeter Tyser #else /* !(__MIPSEB__) */
880819833afSPeter Tyser 
881819833afSPeter Tyser /* Native ext2 byte ordering, just collapse using defines. */
882819833afSPeter Tyser #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
883819833afSPeter Tyser #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
884819833afSPeter Tyser #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
885819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
886819833afSPeter Tyser #define ext2_find_next_zero_bit(addr, size, offset) \
887819833afSPeter Tyser 		find_next_zero_bit((addr), (size), (offset))
888819833afSPeter Tyser 
889819833afSPeter Tyser #endif /* !(__MIPSEB__) */
890819833afSPeter Tyser 
891819833afSPeter Tyser /*
892819833afSPeter Tyser  * Bitmap functions for the minix filesystem.
893819833afSPeter Tyser  * FIXME: These assume that Minix uses the native byte/bitorder.
894819833afSPeter Tyser  * This limits the Minix filesystem's value for data exchange very much.
895819833afSPeter Tyser  */
896819833afSPeter Tyser #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
897819833afSPeter Tyser #define minix_set_bit(nr,addr) set_bit(nr,addr)
898819833afSPeter Tyser #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
899819833afSPeter Tyser #define minix_test_bit(nr,addr) test_bit(nr,addr)
900819833afSPeter Tyser #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
901819833afSPeter Tyser 
902819833afSPeter Tyser #endif /* _ASM_BITOPS_H */
903