1819833afSPeter Tyser /*
2819833afSPeter Tyser * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
3819833afSPeter Tyser * Copyright (c) 2000 Silicon Graphics, Inc.
4*898582bdSDaniel Schwierzeck *
5*898582bdSDaniel Schwierzeck * SPDX-License-Identifier: GPL-2.0
6819833afSPeter Tyser */
7819833afSPeter Tyser #ifndef _ASM_BITOPS_H
8819833afSPeter Tyser #define _ASM_BITOPS_H
9819833afSPeter Tyser
10819833afSPeter Tyser #include <linux/types.h>
11819833afSPeter Tyser #include <asm/byteorder.h> /* sigh ... */
12819833afSPeter Tyser
13819833afSPeter Tyser #ifdef __KERNEL__
14819833afSPeter Tyser
15819833afSPeter Tyser #include <asm/sgidefs.h>
16819833afSPeter Tyser #include <asm/system.h>
17819833afSPeter Tyser
18b810aa1dSFabio Estevam #include <asm-generic/bitops/fls.h>
19b810aa1dSFabio Estevam #include <asm-generic/bitops/__fls.h>
20b810aa1dSFabio Estevam #include <asm-generic/bitops/fls64.h>
21b810aa1dSFabio Estevam #include <asm-generic/bitops/__ffs.h>
22b810aa1dSFabio Estevam
23819833afSPeter Tyser /*
24819833afSPeter Tyser * clear_bit() doesn't provide any barrier for the compiler.
25819833afSPeter Tyser */
26819833afSPeter Tyser #define smp_mb__before_clear_bit() barrier()
27819833afSPeter Tyser #define smp_mb__after_clear_bit() barrier()
28819833afSPeter Tyser
29819833afSPeter Tyser /*
30819833afSPeter Tyser * Only disable interrupt for kernel mode stuff to keep usermode stuff
31819833afSPeter Tyser * that dares to use kernel include files alive.
32819833afSPeter Tyser */
33819833afSPeter Tyser #define __bi_flags unsigned long flags
34819833afSPeter Tyser #define __bi_cli() __cli()
35819833afSPeter Tyser #define __bi_save_flags(x) __save_flags(x)
36819833afSPeter Tyser #define __bi_save_and_cli(x) __save_and_cli(x)
37819833afSPeter Tyser #define __bi_restore_flags(x) __restore_flags(x)
38819833afSPeter Tyser #else
39819833afSPeter Tyser #define __bi_flags
40819833afSPeter Tyser #define __bi_cli()
41819833afSPeter Tyser #define __bi_save_flags(x)
42819833afSPeter Tyser #define __bi_save_and_cli(x)
43819833afSPeter Tyser #define __bi_restore_flags(x)
44819833afSPeter Tyser #endif /* __KERNEL__ */
45819833afSPeter Tyser
46819833afSPeter Tyser #ifdef CONFIG_CPU_HAS_LLSC
47819833afSPeter Tyser
48819833afSPeter Tyser #include <asm/mipsregs.h>
49819833afSPeter Tyser
50819833afSPeter Tyser /*
51819833afSPeter Tyser * These functions for MIPS ISA > 1 are interrupt and SMP proof and
52819833afSPeter Tyser * interrupt friendly
53819833afSPeter Tyser */
54819833afSPeter Tyser
55819833afSPeter Tyser /*
56819833afSPeter Tyser * set_bit - Atomically set a bit in memory
57819833afSPeter Tyser * @nr: the bit to set
58819833afSPeter Tyser * @addr: the address to start counting from
59819833afSPeter Tyser *
60819833afSPeter Tyser * This function is atomic and may not be reordered. See __set_bit()
61819833afSPeter Tyser * if you do not require the atomic guarantees.
62819833afSPeter Tyser * Note that @nr may be almost arbitrarily large; this function is not
63819833afSPeter Tyser * restricted to acting on a single-word quantity.
64819833afSPeter Tyser */
65819833afSPeter Tyser static __inline__ void
set_bit(int nr,volatile void * addr)66819833afSPeter Tyser set_bit(int nr, volatile void *addr)
67819833afSPeter Tyser {
68819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
69819833afSPeter Tyser unsigned long temp;
70819833afSPeter Tyser
71819833afSPeter Tyser __asm__ __volatile__(
72819833afSPeter Tyser "1:\tll\t%0, %1\t\t# set_bit\n\t"
73819833afSPeter Tyser "or\t%0, %2\n\t"
74819833afSPeter Tyser "sc\t%0, %1\n\t"
75819833afSPeter Tyser "beqz\t%0, 1b"
76819833afSPeter Tyser : "=&r" (temp), "=m" (*m)
77819833afSPeter Tyser : "ir" (1UL << (nr & 0x1f)), "m" (*m));
78819833afSPeter Tyser }
79819833afSPeter Tyser
80819833afSPeter Tyser /*
81819833afSPeter Tyser * __set_bit - Set a bit in memory
82819833afSPeter Tyser * @nr: the bit to set
83819833afSPeter Tyser * @addr: the address to start counting from
84819833afSPeter Tyser *
85819833afSPeter Tyser * Unlike set_bit(), this function is non-atomic and may be reordered.
86819833afSPeter Tyser * If it's called on the same region of memory simultaneously, the effect
87819833afSPeter Tyser * may be that only one operation succeeds.
88819833afSPeter Tyser */
__set_bit(int nr,volatile void * addr)89819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
90819833afSPeter Tyser {
91819833afSPeter Tyser unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
92819833afSPeter Tyser
93819833afSPeter Tyser *m |= 1UL << (nr & 31);
94819833afSPeter Tyser }
95819833afSPeter Tyser #define PLATFORM__SET_BIT
96819833afSPeter Tyser
97819833afSPeter Tyser /*
98819833afSPeter Tyser * clear_bit - Clears a bit in memory
99819833afSPeter Tyser * @nr: Bit to clear
100819833afSPeter Tyser * @addr: Address to start counting from
101819833afSPeter Tyser *
102819833afSPeter Tyser * clear_bit() is atomic and may not be reordered. However, it does
103819833afSPeter Tyser * not contain a memory barrier, so if it is used for locking purposes,
104819833afSPeter Tyser * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
105819833afSPeter Tyser * in order to ensure changes are visible on other processors.
106819833afSPeter Tyser */
107819833afSPeter Tyser static __inline__ void
clear_bit(int nr,volatile void * addr)108819833afSPeter Tyser clear_bit(int nr, volatile void *addr)
109819833afSPeter Tyser {
110819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
111819833afSPeter Tyser unsigned long temp;
112819833afSPeter Tyser
113819833afSPeter Tyser __asm__ __volatile__(
114819833afSPeter Tyser "1:\tll\t%0, %1\t\t# clear_bit\n\t"
115819833afSPeter Tyser "and\t%0, %2\n\t"
116819833afSPeter Tyser "sc\t%0, %1\n\t"
117819833afSPeter Tyser "beqz\t%0, 1b\n\t"
118819833afSPeter Tyser : "=&r" (temp), "=m" (*m)
119819833afSPeter Tyser : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
120819833afSPeter Tyser }
121819833afSPeter Tyser
122819833afSPeter Tyser /*
123819833afSPeter Tyser * change_bit - Toggle a bit in memory
124819833afSPeter Tyser * @nr: Bit to clear
125819833afSPeter Tyser * @addr: Address to start counting from
126819833afSPeter Tyser *
127819833afSPeter Tyser * change_bit() is atomic and may not be reordered.
128819833afSPeter Tyser * Note that @nr may be almost arbitrarily large; this function is not
129819833afSPeter Tyser * restricted to acting on a single-word quantity.
130819833afSPeter Tyser */
131819833afSPeter Tyser static __inline__ void
change_bit(int nr,volatile void * addr)132819833afSPeter Tyser change_bit(int nr, volatile void *addr)
133819833afSPeter Tyser {
134819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
135819833afSPeter Tyser unsigned long temp;
136819833afSPeter Tyser
137819833afSPeter Tyser __asm__ __volatile__(
138819833afSPeter Tyser "1:\tll\t%0, %1\t\t# change_bit\n\t"
139819833afSPeter Tyser "xor\t%0, %2\n\t"
140819833afSPeter Tyser "sc\t%0, %1\n\t"
141819833afSPeter Tyser "beqz\t%0, 1b"
142819833afSPeter Tyser : "=&r" (temp), "=m" (*m)
143819833afSPeter Tyser : "ir" (1UL << (nr & 0x1f)), "m" (*m));
144819833afSPeter Tyser }
145819833afSPeter Tyser
146819833afSPeter Tyser /*
147819833afSPeter Tyser * __change_bit - Toggle a bit in memory
148819833afSPeter Tyser * @nr: the bit to set
149819833afSPeter Tyser * @addr: the address to start counting from
150819833afSPeter Tyser *
151819833afSPeter Tyser * Unlike change_bit(), this function is non-atomic and may be reordered.
152819833afSPeter Tyser * If it's called on the same region of memory simultaneously, the effect
153819833afSPeter Tyser * may be that only one operation succeeds.
154819833afSPeter Tyser */
__change_bit(int nr,volatile void * addr)155819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
156819833afSPeter Tyser {
157819833afSPeter Tyser unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
158819833afSPeter Tyser
159819833afSPeter Tyser *m ^= 1UL << (nr & 31);
160819833afSPeter Tyser }
161819833afSPeter Tyser
162819833afSPeter Tyser /*
163819833afSPeter Tyser * test_and_set_bit - Set a bit and return its old value
164819833afSPeter Tyser * @nr: Bit to set
165819833afSPeter Tyser * @addr: Address to count from
166819833afSPeter Tyser *
167819833afSPeter Tyser * This operation is atomic and cannot be reordered.
168819833afSPeter Tyser * It also implies a memory barrier.
169819833afSPeter Tyser */
170819833afSPeter Tyser static __inline__ int
test_and_set_bit(int nr,volatile void * addr)171819833afSPeter Tyser test_and_set_bit(int nr, volatile void *addr)
172819833afSPeter Tyser {
173819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
174819833afSPeter Tyser unsigned long temp, res;
175819833afSPeter Tyser
176819833afSPeter Tyser __asm__ __volatile__(
177819833afSPeter Tyser ".set\tnoreorder\t\t# test_and_set_bit\n"
178819833afSPeter Tyser "1:\tll\t%0, %1\n\t"
179819833afSPeter Tyser "or\t%2, %0, %3\n\t"
180819833afSPeter Tyser "sc\t%2, %1\n\t"
181819833afSPeter Tyser "beqz\t%2, 1b\n\t"
182819833afSPeter Tyser " and\t%2, %0, %3\n\t"
183819833afSPeter Tyser ".set\treorder"
184819833afSPeter Tyser : "=&r" (temp), "=m" (*m), "=&r" (res)
185819833afSPeter Tyser : "r" (1UL << (nr & 0x1f)), "m" (*m)
186819833afSPeter Tyser : "memory");
187819833afSPeter Tyser
188819833afSPeter Tyser return res != 0;
189819833afSPeter Tyser }
190819833afSPeter Tyser
191819833afSPeter Tyser /*
192819833afSPeter Tyser * __test_and_set_bit - Set a bit and return its old value
193819833afSPeter Tyser * @nr: Bit to set
194819833afSPeter Tyser * @addr: Address to count from
195819833afSPeter Tyser *
196819833afSPeter Tyser * This operation is non-atomic and can be reordered.
197819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
198819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
199819833afSPeter Tyser */
__test_and_set_bit(int nr,volatile void * addr)200819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
201819833afSPeter Tyser {
202819833afSPeter Tyser int mask, retval;
203819833afSPeter Tyser volatile int *a = addr;
204819833afSPeter Tyser
205819833afSPeter Tyser a += nr >> 5;
206819833afSPeter Tyser mask = 1 << (nr & 0x1f);
207819833afSPeter Tyser retval = (mask & *a) != 0;
208819833afSPeter Tyser *a |= mask;
209819833afSPeter Tyser
210819833afSPeter Tyser return retval;
211819833afSPeter Tyser }
212819833afSPeter Tyser
213819833afSPeter Tyser /*
214819833afSPeter Tyser * test_and_clear_bit - Clear a bit and return its old value
215819833afSPeter Tyser * @nr: Bit to set
216819833afSPeter Tyser * @addr: Address to count from
217819833afSPeter Tyser *
218819833afSPeter Tyser * This operation is atomic and cannot be reordered.
219819833afSPeter Tyser * It also implies a memory barrier.
220819833afSPeter Tyser */
221819833afSPeter Tyser static __inline__ int
test_and_clear_bit(int nr,volatile void * addr)222819833afSPeter Tyser test_and_clear_bit(int nr, volatile void *addr)
223819833afSPeter Tyser {
224819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
225819833afSPeter Tyser unsigned long temp, res;
226819833afSPeter Tyser
227819833afSPeter Tyser __asm__ __volatile__(
228819833afSPeter Tyser ".set\tnoreorder\t\t# test_and_clear_bit\n"
229819833afSPeter Tyser "1:\tll\t%0, %1\n\t"
230819833afSPeter Tyser "or\t%2, %0, %3\n\t"
231819833afSPeter Tyser "xor\t%2, %3\n\t"
232819833afSPeter Tyser "sc\t%2, %1\n\t"
233819833afSPeter Tyser "beqz\t%2, 1b\n\t"
234819833afSPeter Tyser " and\t%2, %0, %3\n\t"
235819833afSPeter Tyser ".set\treorder"
236819833afSPeter Tyser : "=&r" (temp), "=m" (*m), "=&r" (res)
237819833afSPeter Tyser : "r" (1UL << (nr & 0x1f)), "m" (*m)
238819833afSPeter Tyser : "memory");
239819833afSPeter Tyser
240819833afSPeter Tyser return res != 0;
241819833afSPeter Tyser }
242819833afSPeter Tyser
243819833afSPeter Tyser /*
244819833afSPeter Tyser * __test_and_clear_bit - Clear a bit and return its old value
245819833afSPeter Tyser * @nr: Bit to set
246819833afSPeter Tyser * @addr: Address to count from
247819833afSPeter Tyser *
248819833afSPeter Tyser * This operation is non-atomic and can be reordered.
249819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
250819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
251819833afSPeter Tyser */
__test_and_clear_bit(int nr,volatile void * addr)252819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
253819833afSPeter Tyser {
254819833afSPeter Tyser int mask, retval;
255819833afSPeter Tyser volatile int *a = addr;
256819833afSPeter Tyser
257819833afSPeter Tyser a += nr >> 5;
258819833afSPeter Tyser mask = 1 << (nr & 0x1f);
259819833afSPeter Tyser retval = (mask & *a) != 0;
260819833afSPeter Tyser *a &= ~mask;
261819833afSPeter Tyser
262819833afSPeter Tyser return retval;
263819833afSPeter Tyser }
264819833afSPeter Tyser
265819833afSPeter Tyser /*
266819833afSPeter Tyser * test_and_change_bit - Change a bit and return its new value
267819833afSPeter Tyser * @nr: Bit to set
268819833afSPeter Tyser * @addr: Address to count from
269819833afSPeter Tyser *
270819833afSPeter Tyser * This operation is atomic and cannot be reordered.
271819833afSPeter Tyser * It also implies a memory barrier.
272819833afSPeter Tyser */
273819833afSPeter Tyser static __inline__ int
test_and_change_bit(int nr,volatile void * addr)274819833afSPeter Tyser test_and_change_bit(int nr, volatile void *addr)
275819833afSPeter Tyser {
276819833afSPeter Tyser unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
277819833afSPeter Tyser unsigned long temp, res;
278819833afSPeter Tyser
279819833afSPeter Tyser __asm__ __volatile__(
280819833afSPeter Tyser ".set\tnoreorder\t\t# test_and_change_bit\n"
281819833afSPeter Tyser "1:\tll\t%0, %1\n\t"
282819833afSPeter Tyser "xor\t%2, %0, %3\n\t"
283819833afSPeter Tyser "sc\t%2, %1\n\t"
284819833afSPeter Tyser "beqz\t%2, 1b\n\t"
285819833afSPeter Tyser " and\t%2, %0, %3\n\t"
286819833afSPeter Tyser ".set\treorder"
287819833afSPeter Tyser : "=&r" (temp), "=m" (*m), "=&r" (res)
288819833afSPeter Tyser : "r" (1UL << (nr & 0x1f)), "m" (*m)
289819833afSPeter Tyser : "memory");
290819833afSPeter Tyser
291819833afSPeter Tyser return res != 0;
292819833afSPeter Tyser }
293819833afSPeter Tyser
294819833afSPeter Tyser /*
295819833afSPeter Tyser * __test_and_change_bit - Change a bit and return its old value
296819833afSPeter Tyser * @nr: Bit to set
297819833afSPeter Tyser * @addr: Address to count from
298819833afSPeter Tyser *
299819833afSPeter Tyser * This operation is non-atomic and can be reordered.
300819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
301819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
302819833afSPeter Tyser */
__test_and_change_bit(int nr,volatile void * addr)303819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
304819833afSPeter Tyser {
305819833afSPeter Tyser int mask, retval;
306819833afSPeter Tyser volatile int *a = addr;
307819833afSPeter Tyser
308819833afSPeter Tyser a += nr >> 5;
309819833afSPeter Tyser mask = 1 << (nr & 0x1f);
310819833afSPeter Tyser retval = (mask & *a) != 0;
311819833afSPeter Tyser *a ^= mask;
312819833afSPeter Tyser
313819833afSPeter Tyser return retval;
314819833afSPeter Tyser }
315819833afSPeter Tyser
316819833afSPeter Tyser #else /* MIPS I */
317819833afSPeter Tyser
318819833afSPeter Tyser /*
319819833afSPeter Tyser * set_bit - Atomically set a bit in memory
320819833afSPeter Tyser * @nr: the bit to set
321819833afSPeter Tyser * @addr: the address to start counting from
322819833afSPeter Tyser *
323819833afSPeter Tyser * This function is atomic and may not be reordered. See __set_bit()
324819833afSPeter Tyser * if you do not require the atomic guarantees.
325819833afSPeter Tyser * Note that @nr may be almost arbitrarily large; this function is not
326819833afSPeter Tyser * restricted to acting on a single-word quantity.
327819833afSPeter Tyser */
set_bit(int nr,volatile void * addr)328819833afSPeter Tyser static __inline__ void set_bit(int nr, volatile void * addr)
329819833afSPeter Tyser {
330819833afSPeter Tyser int mask;
331819833afSPeter Tyser volatile int *a = addr;
332819833afSPeter Tyser __bi_flags;
333819833afSPeter Tyser
334819833afSPeter Tyser a += nr >> 5;
335819833afSPeter Tyser mask = 1 << (nr & 0x1f);
336819833afSPeter Tyser __bi_save_and_cli(flags);
337819833afSPeter Tyser *a |= mask;
338819833afSPeter Tyser __bi_restore_flags(flags);
339819833afSPeter Tyser }
340819833afSPeter Tyser
341819833afSPeter Tyser /*
342819833afSPeter Tyser * __set_bit - Set a bit in memory
343819833afSPeter Tyser * @nr: the bit to set
344819833afSPeter Tyser * @addr: the address to start counting from
345819833afSPeter Tyser *
346819833afSPeter Tyser * Unlike set_bit(), this function is non-atomic and may be reordered.
347819833afSPeter Tyser * If it's called on the same region of memory simultaneously, the effect
348819833afSPeter Tyser * may be that only one operation succeeds.
349819833afSPeter Tyser */
__set_bit(int nr,volatile void * addr)350819833afSPeter Tyser static __inline__ void __set_bit(int nr, volatile void * addr)
351819833afSPeter Tyser {
352819833afSPeter Tyser int mask;
353819833afSPeter Tyser volatile int *a = addr;
354819833afSPeter Tyser
355819833afSPeter Tyser a += nr >> 5;
356819833afSPeter Tyser mask = 1 << (nr & 0x1f);
357819833afSPeter Tyser *a |= mask;
358819833afSPeter Tyser }
359819833afSPeter Tyser
360819833afSPeter Tyser /*
361819833afSPeter Tyser * clear_bit - Clears a bit in memory
362819833afSPeter Tyser * @nr: Bit to clear
363819833afSPeter Tyser * @addr: Address to start counting from
364819833afSPeter Tyser *
365819833afSPeter Tyser * clear_bit() is atomic and may not be reordered. However, it does
366819833afSPeter Tyser * not contain a memory barrier, so if it is used for locking purposes,
367819833afSPeter Tyser * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
368819833afSPeter Tyser * in order to ensure changes are visible on other processors.
369819833afSPeter Tyser */
clear_bit(int nr,volatile void * addr)370819833afSPeter Tyser static __inline__ void clear_bit(int nr, volatile void * addr)
371819833afSPeter Tyser {
372819833afSPeter Tyser int mask;
373819833afSPeter Tyser volatile int *a = addr;
374819833afSPeter Tyser __bi_flags;
375819833afSPeter Tyser
376819833afSPeter Tyser a += nr >> 5;
377819833afSPeter Tyser mask = 1 << (nr & 0x1f);
378819833afSPeter Tyser __bi_save_and_cli(flags);
379819833afSPeter Tyser *a &= ~mask;
380819833afSPeter Tyser __bi_restore_flags(flags);
381819833afSPeter Tyser }
382819833afSPeter Tyser
383819833afSPeter Tyser /*
384819833afSPeter Tyser * change_bit - Toggle a bit in memory
385819833afSPeter Tyser * @nr: Bit to clear
386819833afSPeter Tyser * @addr: Address to start counting from
387819833afSPeter Tyser *
388819833afSPeter Tyser * change_bit() is atomic and may not be reordered.
389819833afSPeter Tyser * Note that @nr may be almost arbitrarily large; this function is not
390819833afSPeter Tyser * restricted to acting on a single-word quantity.
391819833afSPeter Tyser */
change_bit(int nr,volatile void * addr)392819833afSPeter Tyser static __inline__ void change_bit(int nr, volatile void * addr)
393819833afSPeter Tyser {
394819833afSPeter Tyser int mask;
395819833afSPeter Tyser volatile int *a = addr;
396819833afSPeter Tyser __bi_flags;
397819833afSPeter Tyser
398819833afSPeter Tyser a += nr >> 5;
399819833afSPeter Tyser mask = 1 << (nr & 0x1f);
400819833afSPeter Tyser __bi_save_and_cli(flags);
401819833afSPeter Tyser *a ^= mask;
402819833afSPeter Tyser __bi_restore_flags(flags);
403819833afSPeter Tyser }
404819833afSPeter Tyser
405819833afSPeter Tyser /*
406819833afSPeter Tyser * __change_bit - Toggle a bit in memory
407819833afSPeter Tyser * @nr: the bit to set
408819833afSPeter Tyser * @addr: the address to start counting from
409819833afSPeter Tyser *
410819833afSPeter Tyser * Unlike change_bit(), this function is non-atomic and may be reordered.
411819833afSPeter Tyser * If it's called on the same region of memory simultaneously, the effect
412819833afSPeter Tyser * may be that only one operation succeeds.
413819833afSPeter Tyser */
__change_bit(int nr,volatile void * addr)414819833afSPeter Tyser static __inline__ void __change_bit(int nr, volatile void * addr)
415819833afSPeter Tyser {
416819833afSPeter Tyser unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
417819833afSPeter Tyser
418819833afSPeter Tyser *m ^= 1UL << (nr & 31);
419819833afSPeter Tyser }
420819833afSPeter Tyser
421819833afSPeter Tyser /*
422819833afSPeter Tyser * test_and_set_bit - Set a bit and return its old value
423819833afSPeter Tyser * @nr: Bit to set
424819833afSPeter Tyser * @addr: Address to count from
425819833afSPeter Tyser *
426819833afSPeter Tyser * This operation is atomic and cannot be reordered.
427819833afSPeter Tyser * It also implies a memory barrier.
428819833afSPeter Tyser */
test_and_set_bit(int nr,volatile void * addr)429819833afSPeter Tyser static __inline__ int test_and_set_bit(int nr, volatile void * addr)
430819833afSPeter Tyser {
431819833afSPeter Tyser int mask, retval;
432819833afSPeter Tyser volatile int *a = addr;
433819833afSPeter Tyser __bi_flags;
434819833afSPeter Tyser
435819833afSPeter Tyser a += nr >> 5;
436819833afSPeter Tyser mask = 1 << (nr & 0x1f);
437819833afSPeter Tyser __bi_save_and_cli(flags);
438819833afSPeter Tyser retval = (mask & *a) != 0;
439819833afSPeter Tyser *a |= mask;
440819833afSPeter Tyser __bi_restore_flags(flags);
441819833afSPeter Tyser
442819833afSPeter Tyser return retval;
443819833afSPeter Tyser }
444819833afSPeter Tyser
445819833afSPeter Tyser /*
446819833afSPeter Tyser * __test_and_set_bit - Set a bit and return its old value
447819833afSPeter Tyser * @nr: Bit to set
448819833afSPeter Tyser * @addr: Address to count from
449819833afSPeter Tyser *
450819833afSPeter Tyser * This operation is non-atomic and can be reordered.
451819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
452819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
453819833afSPeter Tyser */
__test_and_set_bit(int nr,volatile void * addr)454819833afSPeter Tyser static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
455819833afSPeter Tyser {
456819833afSPeter Tyser int mask, retval;
457819833afSPeter Tyser volatile int *a = addr;
458819833afSPeter Tyser
459819833afSPeter Tyser a += nr >> 5;
460819833afSPeter Tyser mask = 1 << (nr & 0x1f);
461819833afSPeter Tyser retval = (mask & *a) != 0;
462819833afSPeter Tyser *a |= mask;
463819833afSPeter Tyser
464819833afSPeter Tyser return retval;
465819833afSPeter Tyser }
466819833afSPeter Tyser
467819833afSPeter Tyser /*
468819833afSPeter Tyser * test_and_clear_bit - Clear a bit and return its old value
469819833afSPeter Tyser * @nr: Bit to set
470819833afSPeter Tyser * @addr: Address to count from
471819833afSPeter Tyser *
472819833afSPeter Tyser * This operation is atomic and cannot be reordered.
473819833afSPeter Tyser * It also implies a memory barrier.
474819833afSPeter Tyser */
test_and_clear_bit(int nr,volatile void * addr)475819833afSPeter Tyser static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
476819833afSPeter Tyser {
477819833afSPeter Tyser int mask, retval;
478819833afSPeter Tyser volatile int *a = addr;
479819833afSPeter Tyser __bi_flags;
480819833afSPeter Tyser
481819833afSPeter Tyser a += nr >> 5;
482819833afSPeter Tyser mask = 1 << (nr & 0x1f);
483819833afSPeter Tyser __bi_save_and_cli(flags);
484819833afSPeter Tyser retval = (mask & *a) != 0;
485819833afSPeter Tyser *a &= ~mask;
486819833afSPeter Tyser __bi_restore_flags(flags);
487819833afSPeter Tyser
488819833afSPeter Tyser return retval;
489819833afSPeter Tyser }
490819833afSPeter Tyser
491819833afSPeter Tyser /*
492819833afSPeter Tyser * __test_and_clear_bit - Clear a bit and return its old value
493819833afSPeter Tyser * @nr: Bit to set
494819833afSPeter Tyser * @addr: Address to count from
495819833afSPeter Tyser *
496819833afSPeter Tyser * This operation is non-atomic and can be reordered.
497819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
498819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
499819833afSPeter Tyser */
__test_and_clear_bit(int nr,volatile void * addr)500819833afSPeter Tyser static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
501819833afSPeter Tyser {
502819833afSPeter Tyser int mask, retval;
503819833afSPeter Tyser volatile int *a = addr;
504819833afSPeter Tyser
505819833afSPeter Tyser a += nr >> 5;
506819833afSPeter Tyser mask = 1 << (nr & 0x1f);
507819833afSPeter Tyser retval = (mask & *a) != 0;
508819833afSPeter Tyser *a &= ~mask;
509819833afSPeter Tyser
510819833afSPeter Tyser return retval;
511819833afSPeter Tyser }
512819833afSPeter Tyser
513819833afSPeter Tyser /*
514819833afSPeter Tyser * test_and_change_bit - Change a bit and return its new value
515819833afSPeter Tyser * @nr: Bit to set
516819833afSPeter Tyser * @addr: Address to count from
517819833afSPeter Tyser *
518819833afSPeter Tyser * This operation is atomic and cannot be reordered.
519819833afSPeter Tyser * It also implies a memory barrier.
520819833afSPeter Tyser */
test_and_change_bit(int nr,volatile void * addr)521819833afSPeter Tyser static __inline__ int test_and_change_bit(int nr, volatile void * addr)
522819833afSPeter Tyser {
523819833afSPeter Tyser int mask, retval;
524819833afSPeter Tyser volatile int *a = addr;
525819833afSPeter Tyser __bi_flags;
526819833afSPeter Tyser
527819833afSPeter Tyser a += nr >> 5;
528819833afSPeter Tyser mask = 1 << (nr & 0x1f);
529819833afSPeter Tyser __bi_save_and_cli(flags);
530819833afSPeter Tyser retval = (mask & *a) != 0;
531819833afSPeter Tyser *a ^= mask;
532819833afSPeter Tyser __bi_restore_flags(flags);
533819833afSPeter Tyser
534819833afSPeter Tyser return retval;
535819833afSPeter Tyser }
536819833afSPeter Tyser
537819833afSPeter Tyser /*
538819833afSPeter Tyser * __test_and_change_bit - Change a bit and return its old value
539819833afSPeter Tyser * @nr: Bit to set
540819833afSPeter Tyser * @addr: Address to count from
541819833afSPeter Tyser *
542819833afSPeter Tyser * This operation is non-atomic and can be reordered.
543819833afSPeter Tyser * If two examples of this operation race, one can appear to succeed
544819833afSPeter Tyser * but actually fail. You must protect multiple accesses with a lock.
545819833afSPeter Tyser */
__test_and_change_bit(int nr,volatile void * addr)546819833afSPeter Tyser static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
547819833afSPeter Tyser {
548819833afSPeter Tyser int mask, retval;
549819833afSPeter Tyser volatile int *a = addr;
550819833afSPeter Tyser
551819833afSPeter Tyser a += nr >> 5;
552819833afSPeter Tyser mask = 1 << (nr & 0x1f);
553819833afSPeter Tyser retval = (mask & *a) != 0;
554819833afSPeter Tyser *a ^= mask;
555819833afSPeter Tyser
556819833afSPeter Tyser return retval;
557819833afSPeter Tyser }
558819833afSPeter Tyser
559819833afSPeter Tyser #undef __bi_flags
560819833afSPeter Tyser #undef __bi_cli
561819833afSPeter Tyser #undef __bi_save_flags
562819833afSPeter Tyser #undef __bi_restore_flags
563819833afSPeter Tyser
564819833afSPeter Tyser #endif /* MIPS I */
565819833afSPeter Tyser
566819833afSPeter Tyser /*
567819833afSPeter Tyser * test_bit - Determine whether a bit is set
568819833afSPeter Tyser * @nr: bit number to test
569819833afSPeter Tyser * @addr: Address to start counting from
570819833afSPeter Tyser */
test_bit(int nr,const volatile void * addr)571ea40a054SDaniel Schwierzeck static __inline__ int test_bit(int nr, const volatile void *addr)
572819833afSPeter Tyser {
573819833afSPeter Tyser return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
574819833afSPeter Tyser }
575819833afSPeter Tyser
576819833afSPeter Tyser #ifndef __MIPSEB__
577819833afSPeter Tyser
578819833afSPeter Tyser /* Little endian versions. */
579819833afSPeter Tyser
580819833afSPeter Tyser /*
581819833afSPeter Tyser * find_first_zero_bit - find the first zero bit in a memory region
582819833afSPeter Tyser * @addr: The address to start the search at
583819833afSPeter Tyser * @size: The maximum size to search
584819833afSPeter Tyser *
585819833afSPeter Tyser * Returns the bit-number of the first zero bit, not the number of the byte
586819833afSPeter Tyser * containing a bit.
587819833afSPeter Tyser */
find_first_zero_bit(void * addr,unsigned size)588819833afSPeter Tyser static __inline__ int find_first_zero_bit (void *addr, unsigned size)
589819833afSPeter Tyser {
590819833afSPeter Tyser unsigned long dummy;
591819833afSPeter Tyser int res;
592819833afSPeter Tyser
593819833afSPeter Tyser if (!size)
594819833afSPeter Tyser return 0;
595819833afSPeter Tyser
596819833afSPeter Tyser __asm__ (".set\tnoreorder\n\t"
597819833afSPeter Tyser ".set\tnoat\n"
598819833afSPeter Tyser "1:\tsubu\t$1,%6,%0\n\t"
599819833afSPeter Tyser "blez\t$1,2f\n\t"
600819833afSPeter Tyser "lw\t$1,(%5)\n\t"
601819833afSPeter Tyser "addiu\t%5,4\n\t"
602819833afSPeter Tyser #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
603819833afSPeter Tyser (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
604819833afSPeter Tyser (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
605819833afSPeter Tyser "beql\t%1,$1,1b\n\t"
606819833afSPeter Tyser "addiu\t%0,32\n\t"
607819833afSPeter Tyser #else
608819833afSPeter Tyser "addiu\t%0,32\n\t"
609819833afSPeter Tyser "beq\t%1,$1,1b\n\t"
610819833afSPeter Tyser "nop\n\t"
611819833afSPeter Tyser "subu\t%0,32\n\t"
612819833afSPeter Tyser #endif
613819833afSPeter Tyser #ifdef __MIPSEB__
614819833afSPeter Tyser #error "Fix this for big endian"
615819833afSPeter Tyser #endif /* __MIPSEB__ */
616819833afSPeter Tyser "li\t%1,1\n"
617819833afSPeter Tyser "1:\tand\t%2,$1,%1\n\t"
618819833afSPeter Tyser "beqz\t%2,2f\n\t"
619819833afSPeter Tyser "sll\t%1,%1,1\n\t"
620819833afSPeter Tyser "bnez\t%1,1b\n\t"
621819833afSPeter Tyser "add\t%0,%0,1\n\t"
622819833afSPeter Tyser ".set\tat\n\t"
623819833afSPeter Tyser ".set\treorder\n"
624819833afSPeter Tyser "2:"
625819833afSPeter Tyser : "=r" (res), "=r" (dummy), "=r" (addr)
626819833afSPeter Tyser : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
627819833afSPeter Tyser "2" (addr), "r" (size)
628819833afSPeter Tyser : "$1");
629819833afSPeter Tyser
630819833afSPeter Tyser return res;
631819833afSPeter Tyser }
632819833afSPeter Tyser
633819833afSPeter Tyser /*
634819833afSPeter Tyser * find_next_zero_bit - find the first zero bit in a memory region
635819833afSPeter Tyser * @addr: The address to base the search on
636819833afSPeter Tyser * @offset: The bitnumber to start searching at
637819833afSPeter Tyser * @size: The maximum size to search
638819833afSPeter Tyser */
find_next_zero_bit(void * addr,int size,int offset)639819833afSPeter Tyser static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
640819833afSPeter Tyser {
641819833afSPeter Tyser unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
642819833afSPeter Tyser int set = 0, bit = offset & 31, res;
643819833afSPeter Tyser unsigned long dummy;
644819833afSPeter Tyser
645819833afSPeter Tyser if (bit) {
646819833afSPeter Tyser /*
647819833afSPeter Tyser * Look for zero in first byte
648819833afSPeter Tyser */
649819833afSPeter Tyser #ifdef __MIPSEB__
650819833afSPeter Tyser #error "Fix this for big endian byte order"
651819833afSPeter Tyser #endif
652819833afSPeter Tyser __asm__(".set\tnoreorder\n\t"
653819833afSPeter Tyser ".set\tnoat\n"
654819833afSPeter Tyser "1:\tand\t$1,%4,%1\n\t"
655819833afSPeter Tyser "beqz\t$1,1f\n\t"
656819833afSPeter Tyser "sll\t%1,%1,1\n\t"
657819833afSPeter Tyser "bnez\t%1,1b\n\t"
658819833afSPeter Tyser "addiu\t%0,1\n\t"
659819833afSPeter Tyser ".set\tat\n\t"
660819833afSPeter Tyser ".set\treorder\n"
661819833afSPeter Tyser "1:"
662819833afSPeter Tyser : "=r" (set), "=r" (dummy)
663819833afSPeter Tyser : "0" (0), "1" (1 << bit), "r" (*p)
664819833afSPeter Tyser : "$1");
665819833afSPeter Tyser if (set < (32 - bit))
666819833afSPeter Tyser return set + offset;
667819833afSPeter Tyser set = 32 - bit;
668819833afSPeter Tyser p++;
669819833afSPeter Tyser }
670819833afSPeter Tyser /*
671819833afSPeter Tyser * No zero yet, search remaining full bytes for a zero
672819833afSPeter Tyser */
673819833afSPeter Tyser res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
674819833afSPeter Tyser return offset + set + res;
675819833afSPeter Tyser }
676819833afSPeter Tyser
677819833afSPeter Tyser #endif /* !(__MIPSEB__) */
678819833afSPeter Tyser
679819833afSPeter Tyser /*
680819833afSPeter Tyser * ffz - find first zero in word.
681819833afSPeter Tyser * @word: The word to search
682819833afSPeter Tyser *
683819833afSPeter Tyser * Undefined if no zero exists, so code should check against ~0UL first.
684819833afSPeter Tyser */
ffz(unsigned long word)685819833afSPeter Tyser static __inline__ unsigned long ffz(unsigned long word)
686819833afSPeter Tyser {
687819833afSPeter Tyser unsigned int __res;
688819833afSPeter Tyser unsigned int mask = 1;
689819833afSPeter Tyser
690819833afSPeter Tyser __asm__ (
691819833afSPeter Tyser ".set\tnoreorder\n\t"
692819833afSPeter Tyser ".set\tnoat\n\t"
693819833afSPeter Tyser "move\t%0,$0\n"
694819833afSPeter Tyser "1:\tand\t$1,%2,%1\n\t"
695819833afSPeter Tyser "beqz\t$1,2f\n\t"
696819833afSPeter Tyser "sll\t%1,1\n\t"
697819833afSPeter Tyser "bnez\t%1,1b\n\t"
698819833afSPeter Tyser "addiu\t%0,1\n\t"
699819833afSPeter Tyser ".set\tat\n\t"
700819833afSPeter Tyser ".set\treorder\n"
701819833afSPeter Tyser "2:\n\t"
702819833afSPeter Tyser : "=&r" (__res), "=r" (mask)
703819833afSPeter Tyser : "r" (word), "1" (mask)
704819833afSPeter Tyser : "$1");
705819833afSPeter Tyser
706819833afSPeter Tyser return __res;
707819833afSPeter Tyser }
708819833afSPeter Tyser
709819833afSPeter Tyser #ifdef __KERNEL__
710819833afSPeter Tyser
711819833afSPeter Tyser /*
712819833afSPeter Tyser * hweightN - returns the hamming weight of a N-bit word
713819833afSPeter Tyser * @x: the word to weigh
714819833afSPeter Tyser *
715819833afSPeter Tyser * The Hamming Weight of a number is the total number of bits set in it.
716819833afSPeter Tyser */
717819833afSPeter Tyser
718819833afSPeter Tyser #define hweight32(x) generic_hweight32(x)
719819833afSPeter Tyser #define hweight16(x) generic_hweight16(x)
720819833afSPeter Tyser #define hweight8(x) generic_hweight8(x)
721819833afSPeter Tyser
722819833afSPeter Tyser #endif /* __KERNEL__ */
723819833afSPeter Tyser
724819833afSPeter Tyser #ifdef __MIPSEB__
725819833afSPeter Tyser /*
726819833afSPeter Tyser * find_next_zero_bit - find the first zero bit in a memory region
727819833afSPeter Tyser * @addr: The address to base the search on
728819833afSPeter Tyser * @offset: The bitnumber to start searching at
729819833afSPeter Tyser * @size: The maximum size to search
730819833afSPeter Tyser */
find_next_zero_bit(void * addr,int size,int offset)731819833afSPeter Tyser static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
732819833afSPeter Tyser {
733819833afSPeter Tyser unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
734819833afSPeter Tyser unsigned long result = offset & ~31UL;
735819833afSPeter Tyser unsigned long tmp;
736819833afSPeter Tyser
737819833afSPeter Tyser if (offset >= size)
738819833afSPeter Tyser return size;
739819833afSPeter Tyser size -= result;
740819833afSPeter Tyser offset &= 31UL;
741819833afSPeter Tyser if (offset) {
742819833afSPeter Tyser tmp = *(p++);
743819833afSPeter Tyser tmp |= ~0UL >> (32-offset);
744819833afSPeter Tyser if (size < 32)
745819833afSPeter Tyser goto found_first;
746819833afSPeter Tyser if (~tmp)
747819833afSPeter Tyser goto found_middle;
748819833afSPeter Tyser size -= 32;
749819833afSPeter Tyser result += 32;
750819833afSPeter Tyser }
751819833afSPeter Tyser while (size & ~31UL) {
752819833afSPeter Tyser if (~(tmp = *(p++)))
753819833afSPeter Tyser goto found_middle;
754819833afSPeter Tyser result += 32;
755819833afSPeter Tyser size -= 32;
756819833afSPeter Tyser }
757819833afSPeter Tyser if (!size)
758819833afSPeter Tyser return result;
759819833afSPeter Tyser tmp = *p;
760819833afSPeter Tyser
761819833afSPeter Tyser found_first:
762819833afSPeter Tyser tmp |= ~0UL << size;
763819833afSPeter Tyser found_middle:
764819833afSPeter Tyser return result + ffz(tmp);
765819833afSPeter Tyser }
766819833afSPeter Tyser
767819833afSPeter Tyser /* Linus sez that gcc can optimize the following correctly, we'll see if this
768819833afSPeter Tyser * holds on the Sparc as it does for the ALPHA.
769819833afSPeter Tyser */
770819833afSPeter Tyser
771819833afSPeter Tyser #if 0 /* Fool kernel-doc since it doesn't do macros yet */
772819833afSPeter Tyser /*
773819833afSPeter Tyser * find_first_zero_bit - find the first zero bit in a memory region
774819833afSPeter Tyser * @addr: The address to start the search at
775819833afSPeter Tyser * @size: The maximum size to search
776819833afSPeter Tyser *
777819833afSPeter Tyser * Returns the bit-number of the first zero bit, not the number of the byte
778819833afSPeter Tyser * containing a bit.
779819833afSPeter Tyser */
780819833afSPeter Tyser static int find_first_zero_bit (void *addr, unsigned size);
781819833afSPeter Tyser #endif
782819833afSPeter Tyser
783819833afSPeter Tyser #define find_first_zero_bit(addr, size) \
784819833afSPeter Tyser find_next_zero_bit((addr), (size), 0)
785819833afSPeter Tyser
786819833afSPeter Tyser #endif /* (__MIPSEB__) */
787819833afSPeter Tyser
788819833afSPeter Tyser /* Now for the ext2 filesystem bit operations and helper routines. */
789819833afSPeter Tyser
790819833afSPeter Tyser #ifdef __MIPSEB__
ext2_set_bit(int nr,void * addr)791819833afSPeter Tyser static __inline__ int ext2_set_bit(int nr, void * addr)
792819833afSPeter Tyser {
793819833afSPeter Tyser int mask, retval, flags;
794819833afSPeter Tyser unsigned char *ADDR = (unsigned char *) addr;
795819833afSPeter Tyser
796819833afSPeter Tyser ADDR += nr >> 3;
797819833afSPeter Tyser mask = 1 << (nr & 0x07);
798819833afSPeter Tyser save_and_cli(flags);
799819833afSPeter Tyser retval = (mask & *ADDR) != 0;
800819833afSPeter Tyser *ADDR |= mask;
801819833afSPeter Tyser restore_flags(flags);
802819833afSPeter Tyser return retval;
803819833afSPeter Tyser }
804819833afSPeter Tyser
ext2_clear_bit(int nr,void * addr)805819833afSPeter Tyser static __inline__ int ext2_clear_bit(int nr, void * addr)
806819833afSPeter Tyser {
807819833afSPeter Tyser int mask, retval, flags;
808819833afSPeter Tyser unsigned char *ADDR = (unsigned char *) addr;
809819833afSPeter Tyser
810819833afSPeter Tyser ADDR += nr >> 3;
811819833afSPeter Tyser mask = 1 << (nr & 0x07);
812819833afSPeter Tyser save_and_cli(flags);
813819833afSPeter Tyser retval = (mask & *ADDR) != 0;
814819833afSPeter Tyser *ADDR &= ~mask;
815819833afSPeter Tyser restore_flags(flags);
816819833afSPeter Tyser return retval;
817819833afSPeter Tyser }
818819833afSPeter Tyser
ext2_test_bit(int nr,const void * addr)819819833afSPeter Tyser static __inline__ int ext2_test_bit(int nr, const void * addr)
820819833afSPeter Tyser {
821819833afSPeter Tyser int mask;
822819833afSPeter Tyser const unsigned char *ADDR = (const unsigned char *) addr;
823819833afSPeter Tyser
824819833afSPeter Tyser ADDR += nr >> 3;
825819833afSPeter Tyser mask = 1 << (nr & 0x07);
826819833afSPeter Tyser return ((mask & *ADDR) != 0);
827819833afSPeter Tyser }
828819833afSPeter Tyser
829819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) \
830819833afSPeter Tyser ext2_find_next_zero_bit((addr), (size), 0)
831819833afSPeter Tyser
ext2_find_next_zero_bit(void * addr,unsigned long size,unsigned long offset)832819833afSPeter Tyser static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
833819833afSPeter Tyser {
834819833afSPeter Tyser unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
835819833afSPeter Tyser unsigned long result = offset & ~31UL;
836819833afSPeter Tyser unsigned long tmp;
837819833afSPeter Tyser
838819833afSPeter Tyser if (offset >= size)
839819833afSPeter Tyser return size;
840819833afSPeter Tyser size -= result;
841819833afSPeter Tyser offset &= 31UL;
842819833afSPeter Tyser if(offset) {
843819833afSPeter Tyser /* We hold the little endian value in tmp, but then the
844819833afSPeter Tyser * shift is illegal. So we could keep a big endian value
845819833afSPeter Tyser * in tmp, like this:
846819833afSPeter Tyser *
847819833afSPeter Tyser * tmp = __swab32(*(p++));
848819833afSPeter Tyser * tmp |= ~0UL >> (32-offset);
849819833afSPeter Tyser *
850819833afSPeter Tyser * but this would decrease preformance, so we change the
851819833afSPeter Tyser * shift:
852819833afSPeter Tyser */
853819833afSPeter Tyser tmp = *(p++);
854819833afSPeter Tyser tmp |= __swab32(~0UL >> (32-offset));
855819833afSPeter Tyser if(size < 32)
856819833afSPeter Tyser goto found_first;
857819833afSPeter Tyser if(~tmp)
858819833afSPeter Tyser goto found_middle;
859819833afSPeter Tyser size -= 32;
860819833afSPeter Tyser result += 32;
861819833afSPeter Tyser }
862819833afSPeter Tyser while(size & ~31UL) {
863819833afSPeter Tyser if(~(tmp = *(p++)))
864819833afSPeter Tyser goto found_middle;
865819833afSPeter Tyser result += 32;
866819833afSPeter Tyser size -= 32;
867819833afSPeter Tyser }
868819833afSPeter Tyser if(!size)
869819833afSPeter Tyser return result;
870819833afSPeter Tyser tmp = *p;
871819833afSPeter Tyser
872819833afSPeter Tyser found_first:
873819833afSPeter Tyser /* tmp is little endian, so we would have to swab the shift,
874819833afSPeter Tyser * see above. But then we have to swab tmp below for ffz, so
875819833afSPeter Tyser * we might as well do this here.
876819833afSPeter Tyser */
877819833afSPeter Tyser return result + ffz(__swab32(tmp) | (~0UL << size));
878819833afSPeter Tyser found_middle:
879819833afSPeter Tyser return result + ffz(__swab32(tmp));
880819833afSPeter Tyser }
881819833afSPeter Tyser #else /* !(__MIPSEB__) */
882819833afSPeter Tyser
883819833afSPeter Tyser /* Native ext2 byte ordering, just collapse using defines. */
884819833afSPeter Tyser #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
885819833afSPeter Tyser #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
886819833afSPeter Tyser #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
887819833afSPeter Tyser #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
888819833afSPeter Tyser #define ext2_find_next_zero_bit(addr, size, offset) \
889819833afSPeter Tyser find_next_zero_bit((addr), (size), (offset))
890819833afSPeter Tyser
891819833afSPeter Tyser #endif /* !(__MIPSEB__) */
892819833afSPeter Tyser
893819833afSPeter Tyser /*
894819833afSPeter Tyser * Bitmap functions for the minix filesystem.
895819833afSPeter Tyser * FIXME: These assume that Minix uses the native byte/bitorder.
896819833afSPeter Tyser * This limits the Minix filesystem's value for data exchange very much.
897819833afSPeter Tyser */
898819833afSPeter Tyser #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
899819833afSPeter Tyser #define minix_set_bit(nr,addr) set_bit(nr,addr)
900819833afSPeter Tyser #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
901819833afSPeter Tyser #define minix_test_bit(nr,addr) test_bit(nr,addr)
902819833afSPeter Tyser #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
903819833afSPeter Tyser
904819833afSPeter Tyser #endif /* _ASM_BITOPS_H */
905