1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * linux/include/asm-arm/proc-armv/system.h
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 1996 Russell King
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
8*4882a593Smuzhiyun * published by the Free Software Foundation.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #ifndef __ASM_PROC_SYSTEM_H
11*4882a593Smuzhiyun #define __ASM_PROC_SYSTEM_H
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * Save the current interrupt enable state & disable IRQs
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #ifdef CONFIG_ARM64
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * Save the current interrupt enable state
20*4882a593Smuzhiyun * and disable IRQs/FIQs
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun #define local_irq_save(flags) \
23*4882a593Smuzhiyun ({ \
24*4882a593Smuzhiyun asm volatile( \
25*4882a593Smuzhiyun "mrs %0, daif\n" \
26*4882a593Smuzhiyun "msr daifset, #3" \
27*4882a593Smuzhiyun : "=r" (flags) \
28*4882a593Smuzhiyun : \
29*4882a593Smuzhiyun : "memory"); \
30*4882a593Smuzhiyun })
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * restore saved IRQ & FIQ state
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun #define local_irq_restore(flags) \
36*4882a593Smuzhiyun ({ \
37*4882a593Smuzhiyun asm volatile( \
38*4882a593Smuzhiyun "msr daif, %0" \
39*4882a593Smuzhiyun : \
40*4882a593Smuzhiyun : "r" (flags) \
41*4882a593Smuzhiyun : "memory"); \
42*4882a593Smuzhiyun })
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Enable IRQs/FIQs
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define local_irq_enable() \
48*4882a593Smuzhiyun ({ \
49*4882a593Smuzhiyun asm volatile( \
50*4882a593Smuzhiyun "msr daifclr, #3" \
51*4882a593Smuzhiyun : \
52*4882a593Smuzhiyun : \
53*4882a593Smuzhiyun : "memory"); \
54*4882a593Smuzhiyun })
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Disable IRQs/FIQs
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #define local_irq_disable() \
60*4882a593Smuzhiyun ({ \
61*4882a593Smuzhiyun asm volatile( \
62*4882a593Smuzhiyun "msr daifset, #3" \
63*4882a593Smuzhiyun : \
64*4882a593Smuzhiyun : \
65*4882a593Smuzhiyun : "memory"); \
66*4882a593Smuzhiyun })
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #else /* CONFIG_ARM64 */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define local_irq_save(x) \
71*4882a593Smuzhiyun ({ \
72*4882a593Smuzhiyun unsigned long temp; \
73*4882a593Smuzhiyun __asm__ __volatile__( \
74*4882a593Smuzhiyun "mrs %0, cpsr @ local_irq_save\n" \
75*4882a593Smuzhiyun " orr %1, %0, #128\n" \
76*4882a593Smuzhiyun " msr cpsr_c, %1" \
77*4882a593Smuzhiyun : "=r" (x), "=r" (temp) \
78*4882a593Smuzhiyun : \
79*4882a593Smuzhiyun : "memory"); \
80*4882a593Smuzhiyun })
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Enable IRQs
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun #define local_irq_enable() \
86*4882a593Smuzhiyun ({ \
87*4882a593Smuzhiyun unsigned long temp; \
88*4882a593Smuzhiyun __asm__ __volatile__( \
89*4882a593Smuzhiyun "mrs %0, cpsr @ local_irq_enable\n" \
90*4882a593Smuzhiyun " bic %0, %0, #128\n" \
91*4882a593Smuzhiyun " msr cpsr_c, %0" \
92*4882a593Smuzhiyun : "=r" (temp) \
93*4882a593Smuzhiyun : \
94*4882a593Smuzhiyun : "memory"); \
95*4882a593Smuzhiyun })
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * Disable IRQs
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun #define local_irq_disable() \
101*4882a593Smuzhiyun ({ \
102*4882a593Smuzhiyun unsigned long temp; \
103*4882a593Smuzhiyun __asm__ __volatile__( \
104*4882a593Smuzhiyun "mrs %0, cpsr @ local_irq_disable\n" \
105*4882a593Smuzhiyun " orr %0, %0, #128\n" \
106*4882a593Smuzhiyun " msr cpsr_c, %0" \
107*4882a593Smuzhiyun : "=r" (temp) \
108*4882a593Smuzhiyun : \
109*4882a593Smuzhiyun : "memory"); \
110*4882a593Smuzhiyun })
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Enable FIQs
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun #define __stf() \
116*4882a593Smuzhiyun ({ \
117*4882a593Smuzhiyun unsigned long temp; \
118*4882a593Smuzhiyun __asm__ __volatile__( \
119*4882a593Smuzhiyun "mrs %0, cpsr @ stf\n" \
120*4882a593Smuzhiyun " bic %0, %0, #64\n" \
121*4882a593Smuzhiyun " msr cpsr_c, %0" \
122*4882a593Smuzhiyun : "=r" (temp) \
123*4882a593Smuzhiyun : \
124*4882a593Smuzhiyun : "memory"); \
125*4882a593Smuzhiyun })
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Disable FIQs
129*4882a593Smuzhiyun */
130*4882a593Smuzhiyun #define __clf() \
131*4882a593Smuzhiyun ({ \
132*4882a593Smuzhiyun unsigned long temp; \
133*4882a593Smuzhiyun __asm__ __volatile__( \
134*4882a593Smuzhiyun "mrs %0, cpsr @ clf\n" \
135*4882a593Smuzhiyun " orr %0, %0, #64\n" \
136*4882a593Smuzhiyun " msr cpsr_c, %0" \
137*4882a593Smuzhiyun : "=r" (temp) \
138*4882a593Smuzhiyun : \
139*4882a593Smuzhiyun : "memory"); \
140*4882a593Smuzhiyun })
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Save the current interrupt enable state.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun #define local_save_flags(x) \
146*4882a593Smuzhiyun ({ \
147*4882a593Smuzhiyun __asm__ __volatile__( \
148*4882a593Smuzhiyun "mrs %0, cpsr @ local_save_flags\n" \
149*4882a593Smuzhiyun : "=r" (x) \
150*4882a593Smuzhiyun : \
151*4882a593Smuzhiyun : "memory"); \
152*4882a593Smuzhiyun })
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * restore saved IRQ & FIQ state
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun #define local_irq_restore(x) \
158*4882a593Smuzhiyun __asm__ __volatile__( \
159*4882a593Smuzhiyun "msr cpsr_c, %0 @ local_irq_restore\n" \
160*4882a593Smuzhiyun : \
161*4882a593Smuzhiyun : "r" (x) \
162*4882a593Smuzhiyun : "memory")
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #endif /* CONFIG_ARM64 */
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \
167*4882a593Smuzhiyun defined(CONFIG_ARM64)
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * On the StrongARM, "swp" is terminally broken since it bypasses the
170*4882a593Smuzhiyun * cache totally. This means that the cache becomes inconsistent, and,
171*4882a593Smuzhiyun * since we use normal loads/stores as well, this is really bad.
172*4882a593Smuzhiyun * Typically, this causes oopsen in filp_close, but could have other,
173*4882a593Smuzhiyun * more disasterous effects. There are two work-arounds:
174*4882a593Smuzhiyun * 1. Disable interrupts and emulate the atomic swap
175*4882a593Smuzhiyun * 2. Clean the cache, perform atomic swap, flush the cache
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * We choose (1) since its the "easiest" to achieve here and is not
178*4882a593Smuzhiyun * dependent on the processor type.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun #define swp_is_buggy
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
__xchg(unsigned long x,volatile void * ptr,int size)183*4882a593Smuzhiyun static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun extern void __bad_xchg(volatile void *, int);
186*4882a593Smuzhiyun unsigned long ret;
187*4882a593Smuzhiyun #ifdef swp_is_buggy
188*4882a593Smuzhiyun unsigned long flags;
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun switch (size) {
192*4882a593Smuzhiyun #ifdef swp_is_buggy
193*4882a593Smuzhiyun case 1:
194*4882a593Smuzhiyun local_irq_save(flags);
195*4882a593Smuzhiyun ret = *(volatile unsigned char *)ptr;
196*4882a593Smuzhiyun *(volatile unsigned char *)ptr = x;
197*4882a593Smuzhiyun local_irq_restore(flags);
198*4882a593Smuzhiyun break;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun case 4:
201*4882a593Smuzhiyun local_irq_save(flags);
202*4882a593Smuzhiyun ret = *(volatile unsigned long *)ptr;
203*4882a593Smuzhiyun *(volatile unsigned long *)ptr = x;
204*4882a593Smuzhiyun local_irq_restore(flags);
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun #else
207*4882a593Smuzhiyun case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
208*4882a593Smuzhiyun : "=&r" (ret)
209*4882a593Smuzhiyun : "r" (x), "r" (ptr)
210*4882a593Smuzhiyun : "memory");
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
213*4882a593Smuzhiyun : "=&r" (ret)
214*4882a593Smuzhiyun : "r" (x), "r" (ptr)
215*4882a593Smuzhiyun : "memory");
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun default: __bad_xchg(ptr, size), ret = 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return ret;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun #endif
225