xref: /OK3568_Linux_fs/kernel/include/linux/spinlock_up.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef __LINUX_SPINLOCK_UP_H
2*4882a593Smuzhiyun #define __LINUX_SPINLOCK_UP_H
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef __LINUX_SPINLOCK_H
5*4882a593Smuzhiyun # error "please don't include this file directly"
6*4882a593Smuzhiyun #endif
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/processor.h>	/* for cpu_relax() */
9*4882a593Smuzhiyun #include <asm/barrier.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15*4882a593Smuzhiyun  * Released under the General Public License (GPL).
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * In the debug case, 1 means unlocked, 0 means locked. (the values
18*4882a593Smuzhiyun  * are inverted, to catch initialization bugs)
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * No atomicity anywhere, we are on UP. However, we still need
21*4882a593Smuzhiyun  * the compiler barriers, because we do not want the compiler to
22*4882a593Smuzhiyun  * move potentially faulting instructions (notably user accesses)
23*4882a593Smuzhiyun  * into the locked sequence, resulting in non-atomic execution.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_SPINLOCK
27*4882a593Smuzhiyun #define arch_spin_is_locked(x)		((x)->slock == 0)
28*4882a593Smuzhiyun 
arch_spin_lock(arch_spinlock_t * lock)29*4882a593Smuzhiyun static inline void arch_spin_lock(arch_spinlock_t *lock)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	lock->slock = 0;
32*4882a593Smuzhiyun 	barrier();
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
arch_spin_trylock(arch_spinlock_t * lock)35*4882a593Smuzhiyun static inline int arch_spin_trylock(arch_spinlock_t *lock)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	char oldval = lock->slock;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	lock->slock = 0;
40*4882a593Smuzhiyun 	barrier();
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return oldval > 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
arch_spin_unlock(arch_spinlock_t * lock)45*4882a593Smuzhiyun static inline void arch_spin_unlock(arch_spinlock_t *lock)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	barrier();
48*4882a593Smuzhiyun 	lock->slock = 1;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Read-write spinlocks. No debug version.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun #define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0)
55*4882a593Smuzhiyun #define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0)
56*4882a593Smuzhiyun #define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; })
57*4882a593Smuzhiyun #define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; })
58*4882a593Smuzhiyun #define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0)
59*4882a593Smuzhiyun #define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #else /* DEBUG_SPINLOCK */
62*4882a593Smuzhiyun #define arch_spin_is_locked(lock)	((void)(lock), 0)
63*4882a593Smuzhiyun /* for sched/core.c and kernel_lock.c: */
64*4882a593Smuzhiyun # define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
65*4882a593Smuzhiyun # define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
66*4882a593Smuzhiyun # define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
67*4882a593Smuzhiyun # define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
68*4882a593Smuzhiyun #endif /* DEBUG_SPINLOCK */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define arch_spin_is_contended(lock)	(((void)(lock), 0))
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #endif /* __LINUX_SPINLOCK_UP_H */
73