1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef __ASM_BARRIER_H
9*4882a593Smuzhiyun #define __ASM_BARRIER_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/addrspace.h>
12*4882a593Smuzhiyun #include <asm/sync.h>
13*4882a593Smuzhiyun
__sync(void)14*4882a593Smuzhiyun static inline void __sync(void)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun asm volatile(__SYNC(full, always) ::: "memory");
17*4882a593Smuzhiyun }
18*4882a593Smuzhiyun
rmb(void)19*4882a593Smuzhiyun static inline void rmb(void)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun asm volatile(__SYNC(rmb, always) ::: "memory");
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun #define rmb rmb
24*4882a593Smuzhiyun
wmb(void)25*4882a593Smuzhiyun static inline void wmb(void)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun asm volatile(__SYNC(wmb, always) ::: "memory");
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun #define wmb wmb
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define fast_mb() __sync()
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define __fast_iob() \
34*4882a593Smuzhiyun __asm__ __volatile__( \
35*4882a593Smuzhiyun ".set push\n\t" \
36*4882a593Smuzhiyun ".set noreorder\n\t" \
37*4882a593Smuzhiyun "lw $0,%0\n\t" \
38*4882a593Smuzhiyun "nop\n\t" \
39*4882a593Smuzhiyun ".set pop" \
40*4882a593Smuzhiyun : /* no output */ \
41*4882a593Smuzhiyun : "m" (*(int *)CKSEG1) \
42*4882a593Smuzhiyun : "memory")
43*4882a593Smuzhiyun #ifdef CONFIG_CPU_CAVIUM_OCTEON
44*4882a593Smuzhiyun # define fast_iob() do { } while (0)
45*4882a593Smuzhiyun #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
46*4882a593Smuzhiyun # ifdef CONFIG_SGI_IP28
47*4882a593Smuzhiyun # define fast_iob() \
48*4882a593Smuzhiyun __asm__ __volatile__( \
49*4882a593Smuzhiyun ".set push\n\t" \
50*4882a593Smuzhiyun ".set noreorder\n\t" \
51*4882a593Smuzhiyun "lw $0,%0\n\t" \
52*4882a593Smuzhiyun "sync\n\t" \
53*4882a593Smuzhiyun "lw $0,%0\n\t" \
54*4882a593Smuzhiyun ".set pop" \
55*4882a593Smuzhiyun : /* no output */ \
56*4882a593Smuzhiyun : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
57*4882a593Smuzhiyun : "memory")
58*4882a593Smuzhiyun # else
59*4882a593Smuzhiyun # define fast_iob() \
60*4882a593Smuzhiyun do { \
61*4882a593Smuzhiyun __sync(); \
62*4882a593Smuzhiyun __fast_iob(); \
63*4882a593Smuzhiyun } while (0)
64*4882a593Smuzhiyun # endif
65*4882a593Smuzhiyun #endif /* CONFIG_CPU_CAVIUM_OCTEON */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #ifdef CONFIG_CPU_HAS_WB
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #include <asm/wbflush.h>
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define mb() wbflush()
72*4882a593Smuzhiyun #define iob() wbflush()
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #else /* !CONFIG_CPU_HAS_WB */
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define mb() fast_mb()
77*4882a593Smuzhiyun #define iob() fast_iob()
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #endif /* !CONFIG_CPU_HAS_WB */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #if defined(CONFIG_WEAK_ORDERING)
82*4882a593Smuzhiyun # define __smp_mb() __sync()
83*4882a593Smuzhiyun # define __smp_rmb() rmb()
84*4882a593Smuzhiyun # define __smp_wmb() wmb()
85*4882a593Smuzhiyun #else
86*4882a593Smuzhiyun # define __smp_mb() barrier()
87*4882a593Smuzhiyun # define __smp_rmb() barrier()
88*4882a593Smuzhiyun # define __smp_wmb() barrier()
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * When LL/SC does imply order, it must also be a compiler barrier to avoid the
93*4882a593Smuzhiyun * compiler from reordering where the CPU will not. When it does not imply
94*4882a593Smuzhiyun * order, the compiler is also free to reorder across the LL/SC loop and
95*4882a593Smuzhiyun * ordering will be done by smp_llsc_mb() and friends.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
98*4882a593Smuzhiyun # define __WEAK_LLSC_MB sync
99*4882a593Smuzhiyun # define smp_llsc_mb() \
100*4882a593Smuzhiyun __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
101*4882a593Smuzhiyun # define __LLSC_CLOBBER
102*4882a593Smuzhiyun #else
103*4882a593Smuzhiyun # define __WEAK_LLSC_MB
104*4882a593Smuzhiyun # define smp_llsc_mb() do { } while (0)
105*4882a593Smuzhiyun # define __LLSC_CLOBBER "memory"
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun #ifdef CONFIG_CPU_CAVIUM_OCTEON
109*4882a593Smuzhiyun #define smp_mb__before_llsc() smp_wmb()
110*4882a593Smuzhiyun #define __smp_mb__before_llsc() __smp_wmb()
111*4882a593Smuzhiyun /* Cause previous writes to become visible on all CPUs as soon as possible */
112*4882a593Smuzhiyun #define nudge_writes() __asm__ __volatile__(".set push\n\t" \
113*4882a593Smuzhiyun ".set arch=octeon\n\t" \
114*4882a593Smuzhiyun "syncw\n\t" \
115*4882a593Smuzhiyun ".set pop" : : : "memory")
116*4882a593Smuzhiyun #else
117*4882a593Smuzhiyun #define smp_mb__before_llsc() smp_llsc_mb()
118*4882a593Smuzhiyun #define __smp_mb__before_llsc() smp_llsc_mb()
119*4882a593Smuzhiyun #define nudge_writes() mb()
120*4882a593Smuzhiyun #endif
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
124*4882a593Smuzhiyun * a completion barrier immediately preceding the LL instruction. Therefore we
125*4882a593Smuzhiyun * can skip emitting a barrier from __smp_mb__before_atomic().
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
128*4882a593Smuzhiyun # define __smp_mb__before_atomic()
129*4882a593Smuzhiyun #else
130*4882a593Smuzhiyun # define __smp_mb__before_atomic() __smp_mb__before_llsc()
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define __smp_mb__after_atomic() smp_llsc_mb()
134*4882a593Smuzhiyun
sync_ginv(void)135*4882a593Smuzhiyun static inline void sync_ginv(void)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun asm volatile(__SYNC(ginv, always));
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun #include <asm-generic/barrier.h>
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #endif /* __ASM_BARRIER_H */
143