1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7*4882a593Smuzhiyun * Copyright (C) 1996 by Paul M. Antoine
8*4882a593Smuzhiyun * Copyright (C) 1999 Silicon Graphics
9*4882a593Smuzhiyun * Copyright (C) 2000 MIPS Technologies, Inc.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #ifndef _ASM_IRQFLAGS_H
12*4882a593Smuzhiyun #define _ASM_IRQFLAGS_H
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/compiler.h>
17*4882a593Smuzhiyun #include <linux/stringify.h>
18*4882a593Smuzhiyun #include <asm/compiler.h>
19*4882a593Smuzhiyun #include <asm/hazards.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #if defined(CONFIG_CPU_HAS_DIEI)
22*4882a593Smuzhiyun
arch_local_irq_disable(void)23*4882a593Smuzhiyun static inline void arch_local_irq_disable(void)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun __asm__ __volatile__(
26*4882a593Smuzhiyun " .set push \n"
27*4882a593Smuzhiyun " .set noat \n"
28*4882a593Smuzhiyun " di \n"
29*4882a593Smuzhiyun " " __stringify(__irq_disable_hazard) " \n"
30*4882a593Smuzhiyun " .set pop \n"
31*4882a593Smuzhiyun : /* no outputs */
32*4882a593Smuzhiyun : /* no inputs */
33*4882a593Smuzhiyun : "memory");
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
arch_local_irq_save(void)36*4882a593Smuzhiyun static inline unsigned long arch_local_irq_save(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun unsigned long flags;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun asm __volatile__(
41*4882a593Smuzhiyun " .set push \n"
42*4882a593Smuzhiyun " .set reorder \n"
43*4882a593Smuzhiyun " .set noat \n"
44*4882a593Smuzhiyun #if defined(CONFIG_CPU_LOONGSON64) || defined(CONFIG_CPU_LOONGSON32)
45*4882a593Smuzhiyun " mfc0 %[flags], $12 \n"
46*4882a593Smuzhiyun " di \n"
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun " di %[flags] \n"
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun " andi %[flags], 1 \n"
51*4882a593Smuzhiyun " " __stringify(__irq_disable_hazard) " \n"
52*4882a593Smuzhiyun " .set pop \n"
53*4882a593Smuzhiyun : [flags] "=r" (flags)
54*4882a593Smuzhiyun : /* no inputs */
55*4882a593Smuzhiyun : "memory");
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return flags;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
arch_local_irq_restore(unsigned long flags)60*4882a593Smuzhiyun static inline void arch_local_irq_restore(unsigned long flags)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun unsigned long __tmp1;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun __asm__ __volatile__(
65*4882a593Smuzhiyun " .set push \n"
66*4882a593Smuzhiyun " .set noreorder \n"
67*4882a593Smuzhiyun " .set noat \n"
68*4882a593Smuzhiyun #if defined(CONFIG_IRQ_MIPS_CPU)
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Slow, but doesn't suffer from a relatively unlikely race
71*4882a593Smuzhiyun * condition we're having since days 1.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun " beqz %[flags], 1f \n"
74*4882a593Smuzhiyun " di \n"
75*4882a593Smuzhiyun " ei \n"
76*4882a593Smuzhiyun "1: \n"
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Fast, dangerous. Life is fun, life is good.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun " mfc0 $1, $12 \n"
82*4882a593Smuzhiyun " ins $1, %[flags], 0, 1 \n"
83*4882a593Smuzhiyun " mtc0 $1, $12 \n"
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun " " __stringify(__irq_disable_hazard) " \n"
86*4882a593Smuzhiyun " .set pop \n"
87*4882a593Smuzhiyun : [flags] "=r" (__tmp1)
88*4882a593Smuzhiyun : "0" (flags)
89*4882a593Smuzhiyun : "memory");
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #else
93*4882a593Smuzhiyun /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
94*4882a593Smuzhiyun void arch_local_irq_disable(void);
95*4882a593Smuzhiyun unsigned long arch_local_irq_save(void);
96*4882a593Smuzhiyun void arch_local_irq_restore(unsigned long flags);
97*4882a593Smuzhiyun #endif /* CONFIG_CPU_HAS_DIEI */
98*4882a593Smuzhiyun
arch_local_irq_enable(void)99*4882a593Smuzhiyun static inline void arch_local_irq_enable(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun __asm__ __volatile__(
102*4882a593Smuzhiyun " .set push \n"
103*4882a593Smuzhiyun " .set reorder \n"
104*4882a593Smuzhiyun " .set noat \n"
105*4882a593Smuzhiyun #if defined(CONFIG_CPU_HAS_DIEI)
106*4882a593Smuzhiyun " ei \n"
107*4882a593Smuzhiyun #else
108*4882a593Smuzhiyun " mfc0 $1,$12 \n"
109*4882a593Smuzhiyun " ori $1,0x1f \n"
110*4882a593Smuzhiyun " xori $1,0x1e \n"
111*4882a593Smuzhiyun " mtc0 $1,$12 \n"
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun " " __stringify(__irq_enable_hazard) " \n"
114*4882a593Smuzhiyun " .set pop \n"
115*4882a593Smuzhiyun : /* no outputs */
116*4882a593Smuzhiyun : /* no inputs */
117*4882a593Smuzhiyun : "memory");
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
arch_local_save_flags(void)120*4882a593Smuzhiyun static inline unsigned long arch_local_save_flags(void)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun unsigned long flags;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun asm __volatile__(
125*4882a593Smuzhiyun " .set push \n"
126*4882a593Smuzhiyun " .set reorder \n"
127*4882a593Smuzhiyun " mfc0 %[flags], $12 \n"
128*4882a593Smuzhiyun " .set pop \n"
129*4882a593Smuzhiyun : [flags] "=r" (flags));
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return flags;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun
arch_irqs_disabled_flags(unsigned long flags)135*4882a593Smuzhiyun static inline int arch_irqs_disabled_flags(unsigned long flags)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun return !(flags & 1);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
arch_irqs_disabled(void)140*4882a593Smuzhiyun static inline int arch_irqs_disabled(void)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun return arch_irqs_disabled_flags(arch_local_save_flags());
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun #endif /* #ifndef __ASSEMBLY__ */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * Do the CPU's IRQ-state tracing from assembly code.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun #ifdef CONFIG_TRACE_IRQFLAGS
151*4882a593Smuzhiyun /* Reload some registers clobbered by trace_hardirqs_on */
152*4882a593Smuzhiyun #ifdef CONFIG_64BIT
153*4882a593Smuzhiyun # define TRACE_IRQS_RELOAD_REGS \
154*4882a593Smuzhiyun LONG_L $11, PT_R11(sp); \
155*4882a593Smuzhiyun LONG_L $10, PT_R10(sp); \
156*4882a593Smuzhiyun LONG_L $9, PT_R9(sp); \
157*4882a593Smuzhiyun LONG_L $8, PT_R8(sp); \
158*4882a593Smuzhiyun LONG_L $7, PT_R7(sp); \
159*4882a593Smuzhiyun LONG_L $6, PT_R6(sp); \
160*4882a593Smuzhiyun LONG_L $5, PT_R5(sp); \
161*4882a593Smuzhiyun LONG_L $4, PT_R4(sp); \
162*4882a593Smuzhiyun LONG_L $2, PT_R2(sp)
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun # define TRACE_IRQS_RELOAD_REGS \
165*4882a593Smuzhiyun LONG_L $7, PT_R7(sp); \
166*4882a593Smuzhiyun LONG_L $6, PT_R6(sp); \
167*4882a593Smuzhiyun LONG_L $5, PT_R5(sp); \
168*4882a593Smuzhiyun LONG_L $4, PT_R4(sp); \
169*4882a593Smuzhiyun LONG_L $2, PT_R2(sp)
170*4882a593Smuzhiyun #endif
171*4882a593Smuzhiyun # define TRACE_IRQS_ON \
172*4882a593Smuzhiyun CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
173*4882a593Smuzhiyun jal trace_hardirqs_on
174*4882a593Smuzhiyun # define TRACE_IRQS_ON_RELOAD \
175*4882a593Smuzhiyun TRACE_IRQS_ON; \
176*4882a593Smuzhiyun TRACE_IRQS_RELOAD_REGS
177*4882a593Smuzhiyun # define TRACE_IRQS_OFF \
178*4882a593Smuzhiyun jal trace_hardirqs_off
179*4882a593Smuzhiyun #else
180*4882a593Smuzhiyun # define TRACE_IRQS_ON
181*4882a593Smuzhiyun # define TRACE_IRQS_ON_RELOAD
182*4882a593Smuzhiyun # define TRACE_IRQS_OFF
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #endif /* _ASM_IRQFLAGS_H */
186