xref: /rk3399_rockchip-uboot/arch/arm/include/asm/proc-armv/system.h (revision 0ae7653128c80a4f2920cbe9b124792c2fd9d9e0)
1819833afSPeter Tyser /*
2819833afSPeter Tyser  *  linux/include/asm-arm/proc-armv/system.h
3819833afSPeter Tyser  *
4819833afSPeter Tyser  *  Copyright (C) 1996 Russell King
5819833afSPeter Tyser  *
6819833afSPeter Tyser  * This program is free software; you can redistribute it and/or modify
7819833afSPeter Tyser  * it under the terms of the GNU General Public License version 2 as
8819833afSPeter Tyser  * published by the Free Software Foundation.
9819833afSPeter Tyser  */
10819833afSPeter Tyser #ifndef __ASM_PROC_SYSTEM_H
11819833afSPeter Tyser #define __ASM_PROC_SYSTEM_H
12819833afSPeter Tyser 
13819833afSPeter Tyser /*
14819833afSPeter Tyser  * Save the current interrupt enable state & disable IRQs
15819833afSPeter Tyser  */
16*0ae76531SDavid Feng #ifdef CONFIG_ARM64
17*0ae76531SDavid Feng 
18*0ae76531SDavid Feng /*
19*0ae76531SDavid Feng  * Save the current interrupt enable state
20*0ae76531SDavid Feng  * and disable IRQs/FIQs
21*0ae76531SDavid Feng  */
22*0ae76531SDavid Feng #define local_irq_save(flags)					\
23*0ae76531SDavid Feng 	({							\
24*0ae76531SDavid Feng 	asm volatile(						\
25*0ae76531SDavid Feng 	"mrs	%0, daif"					\
26*0ae76531SDavid Feng 	"msr	daifset, #3"					\
27*0ae76531SDavid Feng 	: "=r" (flags)						\
28*0ae76531SDavid Feng 	:							\
29*0ae76531SDavid Feng 	: "memory");						\
30*0ae76531SDavid Feng 	})
31*0ae76531SDavid Feng 
32*0ae76531SDavid Feng /*
33*0ae76531SDavid Feng  * restore saved IRQ & FIQ state
34*0ae76531SDavid Feng  */
35*0ae76531SDavid Feng #define local_irq_restore(flags)				\
36*0ae76531SDavid Feng 	({							\
37*0ae76531SDavid Feng 	asm volatile(						\
38*0ae76531SDavid Feng 	"msr	daif, %0"					\
39*0ae76531SDavid Feng 	:							\
40*0ae76531SDavid Feng 	: "r" (flags)						\
41*0ae76531SDavid Feng 	: "memory");						\
42*0ae76531SDavid Feng 	})
43*0ae76531SDavid Feng 
44*0ae76531SDavid Feng /*
45*0ae76531SDavid Feng  * Enable IRQs/FIQs
46*0ae76531SDavid Feng  */
47*0ae76531SDavid Feng #define local_irq_enable()					\
48*0ae76531SDavid Feng 	({							\
49*0ae76531SDavid Feng 	asm volatile(						\
50*0ae76531SDavid Feng 	"msr	daifclr, #3"					\
51*0ae76531SDavid Feng 	:							\
52*0ae76531SDavid Feng 	:							\
53*0ae76531SDavid Feng 	: "memory");						\
54*0ae76531SDavid Feng 	})
55*0ae76531SDavid Feng 
56*0ae76531SDavid Feng /*
57*0ae76531SDavid Feng  * Disable IRQs/FIQs
58*0ae76531SDavid Feng  */
59*0ae76531SDavid Feng #define local_irq_disable()					\
60*0ae76531SDavid Feng 	({							\
61*0ae76531SDavid Feng 	asm volatile(						\
62*0ae76531SDavid Feng 	"msr	daifset, #3"					\
63*0ae76531SDavid Feng 	:							\
64*0ae76531SDavid Feng 	:							\
65*0ae76531SDavid Feng 	: "memory");						\
66*0ae76531SDavid Feng 	})
67*0ae76531SDavid Feng 
68*0ae76531SDavid Feng #else	/* CONFIG_ARM64 */
69*0ae76531SDavid Feng 
70819833afSPeter Tyser #define local_irq_save(x)					\
71819833afSPeter Tyser 	({							\
72819833afSPeter Tyser 		unsigned long temp;				\
73819833afSPeter Tyser 	__asm__ __volatile__(					\
74819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_save\n"	\
75819833afSPeter Tyser "	orr	%1, %0, #128\n"					\
76819833afSPeter Tyser "	msr	cpsr_c, %1"					\
77819833afSPeter Tyser 	: "=r" (x), "=r" (temp)					\
78819833afSPeter Tyser 	:							\
79819833afSPeter Tyser 	: "memory");						\
80819833afSPeter Tyser 	})
81819833afSPeter Tyser 
82819833afSPeter Tyser /*
83819833afSPeter Tyser  * Enable IRQs
84819833afSPeter Tyser  */
85819833afSPeter Tyser #define local_irq_enable()					\
86819833afSPeter Tyser 	({							\
87819833afSPeter Tyser 		unsigned long temp;				\
88819833afSPeter Tyser 	__asm__ __volatile__(					\
89819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_enable\n"	\
90819833afSPeter Tyser "	bic	%0, %0, #128\n"					\
91819833afSPeter Tyser "	msr	cpsr_c, %0"					\
92819833afSPeter Tyser 	: "=r" (temp)						\
93819833afSPeter Tyser 	:							\
94819833afSPeter Tyser 	: "memory");						\
95819833afSPeter Tyser 	})
96819833afSPeter Tyser 
97819833afSPeter Tyser /*
98819833afSPeter Tyser  * Disable IRQs
99819833afSPeter Tyser  */
100819833afSPeter Tyser #define local_irq_disable()					\
101819833afSPeter Tyser 	({							\
102819833afSPeter Tyser 		unsigned long temp;				\
103819833afSPeter Tyser 	__asm__ __volatile__(					\
104819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_irq_disable\n"	\
105819833afSPeter Tyser "	orr	%0, %0, #128\n"					\
106819833afSPeter Tyser "	msr	cpsr_c, %0"					\
107819833afSPeter Tyser 	: "=r" (temp)						\
108819833afSPeter Tyser 	:							\
109819833afSPeter Tyser 	: "memory");						\
110819833afSPeter Tyser 	})
111819833afSPeter Tyser 
112819833afSPeter Tyser /*
113819833afSPeter Tyser  * Enable FIQs
114819833afSPeter Tyser  */
115819833afSPeter Tyser #define __stf()							\
116819833afSPeter Tyser 	({							\
117819833afSPeter Tyser 		unsigned long temp;				\
118819833afSPeter Tyser 	__asm__ __volatile__(					\
119819833afSPeter Tyser 	"mrs	%0, cpsr		@ stf\n"		\
120819833afSPeter Tyser "	bic	%0, %0, #64\n"					\
121819833afSPeter Tyser "	msr	cpsr_c, %0"					\
122819833afSPeter Tyser 	: "=r" (temp)						\
123819833afSPeter Tyser 	:							\
124819833afSPeter Tyser 	: "memory");						\
125819833afSPeter Tyser 	})
126819833afSPeter Tyser 
127819833afSPeter Tyser /*
128819833afSPeter Tyser  * Disable FIQs
129819833afSPeter Tyser  */
130819833afSPeter Tyser #define __clf()							\
131819833afSPeter Tyser 	({							\
132819833afSPeter Tyser 		unsigned long temp;				\
133819833afSPeter Tyser 	__asm__ __volatile__(					\
134819833afSPeter Tyser 	"mrs	%0, cpsr		@ clf\n"		\
135819833afSPeter Tyser "	orr	%0, %0, #64\n"					\
136819833afSPeter Tyser "	msr	cpsr_c, %0"					\
137819833afSPeter Tyser 	: "=r" (temp)						\
138819833afSPeter Tyser 	:							\
139819833afSPeter Tyser 	: "memory");						\
140819833afSPeter Tyser 	})
141819833afSPeter Tyser 
142819833afSPeter Tyser /*
143819833afSPeter Tyser  * Save the current interrupt enable state.
144819833afSPeter Tyser  */
145819833afSPeter Tyser #define local_save_flags(x)					\
146819833afSPeter Tyser 	({							\
147819833afSPeter Tyser 	__asm__ __volatile__(					\
148819833afSPeter Tyser 	"mrs	%0, cpsr		@ local_save_flags\n"	\
149819833afSPeter Tyser 	  : "=r" (x)						\
150819833afSPeter Tyser 	  :							\
151819833afSPeter Tyser 	  : "memory");						\
152819833afSPeter Tyser 	})
153819833afSPeter Tyser 
154819833afSPeter Tyser /*
155819833afSPeter Tyser  * restore saved IRQ & FIQ state
156819833afSPeter Tyser  */
157819833afSPeter Tyser #define local_irq_restore(x)					\
158819833afSPeter Tyser 	__asm__ __volatile__(					\
159819833afSPeter Tyser 	"msr	cpsr_c, %0		@ local_irq_restore\n"	\
160819833afSPeter Tyser 	:							\
161819833afSPeter Tyser 	: "r" (x)						\
162819833afSPeter Tyser 	: "memory")
163819833afSPeter Tyser 
164*0ae76531SDavid Feng #endif	/* CONFIG_ARM64 */
165*0ae76531SDavid Feng 
166*0ae76531SDavid Feng #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \
167*0ae76531SDavid Feng 	defined(CONFIG_ARM64)
168819833afSPeter Tyser /*
169819833afSPeter Tyser  * On the StrongARM, "swp" is terminally broken since it bypasses the
170819833afSPeter Tyser  * cache totally.  This means that the cache becomes inconsistent, and,
171819833afSPeter Tyser  * since we use normal loads/stores as well, this is really bad.
172819833afSPeter Tyser  * Typically, this causes oopsen in filp_close, but could have other,
173819833afSPeter Tyser  * more disasterous effects.  There are two work-arounds:
174819833afSPeter Tyser  *  1. Disable interrupts and emulate the atomic swap
175819833afSPeter Tyser  *  2. Clean the cache, perform atomic swap, flush the cache
176819833afSPeter Tyser  *
177819833afSPeter Tyser  * We choose (1) since its the "easiest" to achieve here and is not
178819833afSPeter Tyser  * dependent on the processor type.
179819833afSPeter Tyser  */
180819833afSPeter Tyser #define swp_is_buggy
181819833afSPeter Tyser #endif
182819833afSPeter Tyser 
183819833afSPeter Tyser static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
184819833afSPeter Tyser {
185819833afSPeter Tyser 	extern void __bad_xchg(volatile void *, int);
186819833afSPeter Tyser 	unsigned long ret;
187819833afSPeter Tyser #ifdef swp_is_buggy
188819833afSPeter Tyser 	unsigned long flags;
189819833afSPeter Tyser #endif
190819833afSPeter Tyser 
191819833afSPeter Tyser 	switch (size) {
192819833afSPeter Tyser #ifdef swp_is_buggy
193819833afSPeter Tyser 		case 1:
194819833afSPeter Tyser 			local_irq_save(flags);
195819833afSPeter Tyser 			ret = *(volatile unsigned char *)ptr;
196819833afSPeter Tyser 			*(volatile unsigned char *)ptr = x;
197819833afSPeter Tyser 			local_irq_restore(flags);
198819833afSPeter Tyser 			break;
199819833afSPeter Tyser 
200819833afSPeter Tyser 		case 4:
201819833afSPeter Tyser 			local_irq_save(flags);
202819833afSPeter Tyser 			ret = *(volatile unsigned long *)ptr;
203819833afSPeter Tyser 			*(volatile unsigned long *)ptr = x;
204819833afSPeter Tyser 			local_irq_restore(flags);
205819833afSPeter Tyser 			break;
206819833afSPeter Tyser #else
207819833afSPeter Tyser 		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
208819833afSPeter Tyser 					: "=&r" (ret)
209819833afSPeter Tyser 					: "r" (x), "r" (ptr)
210819833afSPeter Tyser 					: "memory");
211819833afSPeter Tyser 			break;
212819833afSPeter Tyser 		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
213819833afSPeter Tyser 					: "=&r" (ret)
214819833afSPeter Tyser 					: "r" (x), "r" (ptr)
215819833afSPeter Tyser 					: "memory");
216819833afSPeter Tyser 			break;
217819833afSPeter Tyser #endif
218819833afSPeter Tyser 		default: __bad_xchg(ptr, size), ret = 0;
219819833afSPeter Tyser 	}
220819833afSPeter Tyser 
221819833afSPeter Tyser 	return ret;
222819833afSPeter Tyser }
223819833afSPeter Tyser 
224819833afSPeter Tyser #endif
225