xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/assembler.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/include/asm/assembler.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1996-2000 Russell King
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  This file contains arm architecture specific defines
8*4882a593Smuzhiyun  *  for the different processors.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  Do not include any C declarations in this file - it is included by
11*4882a593Smuzhiyun  *  assembler source.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun #ifndef __ASM_ASSEMBLER_H__
14*4882a593Smuzhiyun #define __ASM_ASSEMBLER_H__
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #ifndef __ASSEMBLY__
17*4882a593Smuzhiyun #error "Only include this from assembly code"
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <asm/ptrace.h>
21*4882a593Smuzhiyun #include <asm/opcodes-virt.h>
22*4882a593Smuzhiyun #include <asm/asm-offsets.h>
23*4882a593Smuzhiyun #include <asm/page.h>
24*4882a593Smuzhiyun #include <asm/thread_info.h>
25*4882a593Smuzhiyun #include <asm/uaccess-asm.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define IOMEM(x)	(x)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Endian independent macros for shifting bytes within registers.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #ifndef __ARMEB__
33*4882a593Smuzhiyun #define lspull          lsr
34*4882a593Smuzhiyun #define lspush          lsl
35*4882a593Smuzhiyun #define get_byte_0      lsl #0
36*4882a593Smuzhiyun #define get_byte_1	lsr #8
37*4882a593Smuzhiyun #define get_byte_2	lsr #16
38*4882a593Smuzhiyun #define get_byte_3	lsr #24
39*4882a593Smuzhiyun #define put_byte_0      lsl #0
40*4882a593Smuzhiyun #define put_byte_1	lsl #8
41*4882a593Smuzhiyun #define put_byte_2	lsl #16
42*4882a593Smuzhiyun #define put_byte_3	lsl #24
43*4882a593Smuzhiyun #else
44*4882a593Smuzhiyun #define lspull          lsl
45*4882a593Smuzhiyun #define lspush          lsr
46*4882a593Smuzhiyun #define get_byte_0	lsr #24
47*4882a593Smuzhiyun #define get_byte_1	lsr #16
48*4882a593Smuzhiyun #define get_byte_2	lsr #8
49*4882a593Smuzhiyun #define get_byte_3      lsl #0
50*4882a593Smuzhiyun #define put_byte_0	lsl #24
51*4882a593Smuzhiyun #define put_byte_1	lsl #16
52*4882a593Smuzhiyun #define put_byte_2	lsl #8
53*4882a593Smuzhiyun #define put_byte_3      lsl #0
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Select code for any configuration running in BE8 mode */
57*4882a593Smuzhiyun #ifdef CONFIG_CPU_ENDIAN_BE8
58*4882a593Smuzhiyun #define ARM_BE8(code...) code
59*4882a593Smuzhiyun #else
60*4882a593Smuzhiyun #define ARM_BE8(code...)
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * Data preload for architectures that support it
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 5
67*4882a593Smuzhiyun #define PLD(code...)	code
68*4882a593Smuzhiyun #else
69*4882a593Smuzhiyun #define PLD(code...)
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * This can be used to enable code to cacheline align the destination
74*4882a593Smuzhiyun  * pointer when bulk writing to memory.  Experiments on StrongARM and
75*4882a593Smuzhiyun  * XScale didn't show this a worthwhile thing to do when the cache is not
76*4882a593Smuzhiyun  * set to write-allocate (this would need further testing on XScale when WA
77*4882a593Smuzhiyun  * is used).
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * On Feroceon there is much to gain however, regardless of cache mode.
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun #ifdef CONFIG_CPU_FEROCEON
82*4882a593Smuzhiyun #define CALGN(code...) code
83*4882a593Smuzhiyun #else
84*4882a593Smuzhiyun #define CALGN(code...)
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define IMM12_MASK 0xfff
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Enable and disable interrupts
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 6
93*4882a593Smuzhiyun 	.macro	disable_irq_notrace
94*4882a593Smuzhiyun 	cpsid	i
95*4882a593Smuzhiyun 	.endm
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	.macro	enable_irq_notrace
98*4882a593Smuzhiyun 	cpsie	i
99*4882a593Smuzhiyun 	.endm
100*4882a593Smuzhiyun #else
101*4882a593Smuzhiyun 	.macro	disable_irq_notrace
102*4882a593Smuzhiyun 	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
103*4882a593Smuzhiyun 	.endm
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	.macro	enable_irq_notrace
106*4882a593Smuzhiyun 	msr	cpsr_c, #SVC_MODE
107*4882a593Smuzhiyun 	.endm
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
111*4882a593Smuzhiyun 	.macro	dsb, args
112*4882a593Smuzhiyun 	mcr	p15, 0, r0, c7, c10, 4
113*4882a593Smuzhiyun 	.endm
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	.macro	isb, args
116*4882a593Smuzhiyun 	mcr	p15, 0, r0, c7, c5, 4
117*4882a593Smuzhiyun 	.endm
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	.macro asm_trace_hardirqs_off, save=1
121*4882a593Smuzhiyun #if defined(CONFIG_TRACE_IRQFLAGS)
122*4882a593Smuzhiyun 	.if \save
123*4882a593Smuzhiyun 	stmdb   sp!, {r0-r3, ip, lr}
124*4882a593Smuzhiyun 	.endif
125*4882a593Smuzhiyun 	bl	trace_hardirqs_off
126*4882a593Smuzhiyun 	.if \save
127*4882a593Smuzhiyun 	ldmia	sp!, {r0-r3, ip, lr}
128*4882a593Smuzhiyun 	.endif
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun 	.endm
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	.macro asm_trace_hardirqs_on, cond=al, save=1
133*4882a593Smuzhiyun #if defined(CONFIG_TRACE_IRQFLAGS)
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * actually the registers should be pushed and pop'd conditionally, but
136*4882a593Smuzhiyun 	 * after bl the flags are certainly clobbered
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	.if \save
139*4882a593Smuzhiyun 	stmdb   sp!, {r0-r3, ip, lr}
140*4882a593Smuzhiyun 	.endif
141*4882a593Smuzhiyun 	bl\cond	trace_hardirqs_on
142*4882a593Smuzhiyun 	.if \save
143*4882a593Smuzhiyun 	ldmia	sp!, {r0-r3, ip, lr}
144*4882a593Smuzhiyun 	.endif
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun 	.endm
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	.macro disable_irq, save=1
149*4882a593Smuzhiyun 	disable_irq_notrace
150*4882a593Smuzhiyun 	asm_trace_hardirqs_off \save
151*4882a593Smuzhiyun 	.endm
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	.macro enable_irq
154*4882a593Smuzhiyun 	asm_trace_hardirqs_on
155*4882a593Smuzhiyun 	enable_irq_notrace
156*4882a593Smuzhiyun 	.endm
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun  * Save the current IRQ state and disable IRQs.  Note that this macro
159*4882a593Smuzhiyun  * assumes FIQs are enabled, and that the processor is in SVC mode.
160*4882a593Smuzhiyun  */
161*4882a593Smuzhiyun 	.macro	save_and_disable_irqs, oldcpsr
162*4882a593Smuzhiyun #ifdef CONFIG_CPU_V7M
163*4882a593Smuzhiyun 	mrs	\oldcpsr, primask
164*4882a593Smuzhiyun #else
165*4882a593Smuzhiyun 	mrs	\oldcpsr, cpsr
166*4882a593Smuzhiyun #endif
167*4882a593Smuzhiyun 	disable_irq
168*4882a593Smuzhiyun 	.endm
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	.macro	save_and_disable_irqs_notrace, oldcpsr
171*4882a593Smuzhiyun #ifdef CONFIG_CPU_V7M
172*4882a593Smuzhiyun 	mrs	\oldcpsr, primask
173*4882a593Smuzhiyun #else
174*4882a593Smuzhiyun 	mrs	\oldcpsr, cpsr
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun 	disable_irq_notrace
177*4882a593Smuzhiyun 	.endm
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * Restore interrupt state previously stored in a register.  We don't
181*4882a593Smuzhiyun  * guarantee that this will preserve the flags.
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun 	.macro	restore_irqs_notrace, oldcpsr
184*4882a593Smuzhiyun #ifdef CONFIG_CPU_V7M
185*4882a593Smuzhiyun 	msr	primask, \oldcpsr
186*4882a593Smuzhiyun #else
187*4882a593Smuzhiyun 	msr	cpsr_c, \oldcpsr
188*4882a593Smuzhiyun #endif
189*4882a593Smuzhiyun 	.endm
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	.macro restore_irqs, oldcpsr
192*4882a593Smuzhiyun 	tst	\oldcpsr, #PSR_I_BIT
193*4882a593Smuzhiyun 	asm_trace_hardirqs_on cond=eq
194*4882a593Smuzhiyun 	restore_irqs_notrace \oldcpsr
195*4882a593Smuzhiyun 	.endm
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun  * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
199*4882a593Smuzhiyun  * reference local symbols in the same assembly file which are to be
200*4882a593Smuzhiyun  * resolved by the assembler.  Other usage is undefined.
201*4882a593Smuzhiyun  */
202*4882a593Smuzhiyun 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
203*4882a593Smuzhiyun 	.macro	badr\c, rd, sym
204*4882a593Smuzhiyun #ifdef CONFIG_THUMB2_KERNEL
205*4882a593Smuzhiyun 	adr\c	\rd, \sym + 1
206*4882a593Smuzhiyun #else
207*4882a593Smuzhiyun 	adr\c	\rd, \sym
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun 	.endm
210*4882a593Smuzhiyun 	.endr
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun  * Get current thread_info.
214*4882a593Smuzhiyun  */
215*4882a593Smuzhiyun 	.macro	get_thread_info, rd
216*4882a593Smuzhiyun  ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
217*4882a593Smuzhiyun  THUMB(	mov	\rd, sp			)
218*4882a593Smuzhiyun  THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
219*4882a593Smuzhiyun 	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
220*4882a593Smuzhiyun 	.endm
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun  * Increment/decrement the preempt count.
224*4882a593Smuzhiyun  */
225*4882a593Smuzhiyun #ifdef CONFIG_PREEMPT_COUNT
226*4882a593Smuzhiyun 	.macro	inc_preempt_count, ti, tmp
227*4882a593Smuzhiyun 	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
228*4882a593Smuzhiyun 	add	\tmp, \tmp, #1			@ increment it
229*4882a593Smuzhiyun 	str	\tmp, [\ti, #TI_PREEMPT]
230*4882a593Smuzhiyun 	.endm
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	.macro	dec_preempt_count, ti, tmp
233*4882a593Smuzhiyun 	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
234*4882a593Smuzhiyun 	sub	\tmp, \tmp, #1			@ decrement it
235*4882a593Smuzhiyun 	str	\tmp, [\ti, #TI_PREEMPT]
236*4882a593Smuzhiyun 	.endm
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	.macro	dec_preempt_count_ti, ti, tmp
239*4882a593Smuzhiyun 	get_thread_info \ti
240*4882a593Smuzhiyun 	dec_preempt_count \ti, \tmp
241*4882a593Smuzhiyun 	.endm
242*4882a593Smuzhiyun #else
243*4882a593Smuzhiyun 	.macro	inc_preempt_count, ti, tmp
244*4882a593Smuzhiyun 	.endm
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	.macro	dec_preempt_count, ti, tmp
247*4882a593Smuzhiyun 	.endm
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	.macro	dec_preempt_count_ti, ti, tmp
250*4882a593Smuzhiyun 	.endm
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define USERL(l, x...)				\
254*4882a593Smuzhiyun 9999:	x;					\
255*4882a593Smuzhiyun 	.pushsection __ex_table,"a";		\
256*4882a593Smuzhiyun 	.align	3;				\
257*4882a593Smuzhiyun 	.long	9999b,l;			\
258*4882a593Smuzhiyun 	.popsection
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun #define USER(x...)	USERL(9001f, x)
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #ifdef CONFIG_SMP
263*4882a593Smuzhiyun #define ALT_SMP(instr...)					\
264*4882a593Smuzhiyun 9998:	instr
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun  * Note: if you get assembler errors from ALT_UP() when building with
267*4882a593Smuzhiyun  * CONFIG_THUMB2_KERNEL, you almost certainly need to use
268*4882a593Smuzhiyun  * ALT_SMP( W(instr) ... )
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun #define ALT_UP(instr...)					\
271*4882a593Smuzhiyun 	.pushsection ".alt.smp.init", "a"			;\
272*4882a593Smuzhiyun 	.long	9998b						;\
273*4882a593Smuzhiyun 9997:	instr							;\
274*4882a593Smuzhiyun 	.if . - 9997b == 2					;\
275*4882a593Smuzhiyun 		nop						;\
276*4882a593Smuzhiyun 	.endif							;\
277*4882a593Smuzhiyun 	.if . - 9997b != 4					;\
278*4882a593Smuzhiyun 		.error "ALT_UP() content must assemble to exactly 4 bytes";\
279*4882a593Smuzhiyun 	.endif							;\
280*4882a593Smuzhiyun 	.popsection
281*4882a593Smuzhiyun #define ALT_UP_B(label)					\
282*4882a593Smuzhiyun 	.pushsection ".alt.smp.init", "a"			;\
283*4882a593Smuzhiyun 	.long	9998b						;\
284*4882a593Smuzhiyun 	W(b)	. + (label - 9998b)					;\
285*4882a593Smuzhiyun 	.popsection
286*4882a593Smuzhiyun #else
287*4882a593Smuzhiyun #define ALT_SMP(instr...)
288*4882a593Smuzhiyun #define ALT_UP(instr...) instr
289*4882a593Smuzhiyun #define ALT_UP_B(label) b label
290*4882a593Smuzhiyun #endif
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun  * Instruction barrier
294*4882a593Smuzhiyun  */
295*4882a593Smuzhiyun 	.macro	instr_sync
296*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 7
297*4882a593Smuzhiyun 	isb
298*4882a593Smuzhiyun #elif __LINUX_ARM_ARCH__ == 6
299*4882a593Smuzhiyun 	mcr	p15, 0, r0, c7, c5, 4
300*4882a593Smuzhiyun #endif
301*4882a593Smuzhiyun 	.endm
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun  * SMP data memory barrier
305*4882a593Smuzhiyun  */
306*4882a593Smuzhiyun 	.macro	smp_dmb mode
307*4882a593Smuzhiyun #ifdef CONFIG_SMP
308*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 7
309*4882a593Smuzhiyun 	.ifeqs "\mode","arm"
310*4882a593Smuzhiyun 	ALT_SMP(dmb	ish)
311*4882a593Smuzhiyun 	.else
312*4882a593Smuzhiyun 	ALT_SMP(W(dmb)	ish)
313*4882a593Smuzhiyun 	.endif
314*4882a593Smuzhiyun #elif __LINUX_ARM_ARCH__ == 6
315*4882a593Smuzhiyun 	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
316*4882a593Smuzhiyun #else
317*4882a593Smuzhiyun #error Incompatible SMP platform
318*4882a593Smuzhiyun #endif
319*4882a593Smuzhiyun 	.ifeqs "\mode","arm"
320*4882a593Smuzhiyun 	ALT_UP(nop)
321*4882a593Smuzhiyun 	.else
322*4882a593Smuzhiyun 	ALT_UP(W(nop))
323*4882a593Smuzhiyun 	.endif
324*4882a593Smuzhiyun #endif
325*4882a593Smuzhiyun 	.endm
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #if defined(CONFIG_CPU_V7M)
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * setmode is used to assert to be in svc mode during boot. For v7-M
330*4882a593Smuzhiyun 	 * this is done in __v7m_setup, so setmode can be empty here.
331*4882a593Smuzhiyun 	 */
332*4882a593Smuzhiyun 	.macro	setmode, mode, reg
333*4882a593Smuzhiyun 	.endm
334*4882a593Smuzhiyun #elif defined(CONFIG_THUMB2_KERNEL)
335*4882a593Smuzhiyun 	.macro	setmode, mode, reg
336*4882a593Smuzhiyun 	mov	\reg, #\mode
337*4882a593Smuzhiyun 	msr	cpsr_c, \reg
338*4882a593Smuzhiyun 	.endm
339*4882a593Smuzhiyun #else
340*4882a593Smuzhiyun 	.macro	setmode, mode, reg
341*4882a593Smuzhiyun 	msr	cpsr_c, #\mode
342*4882a593Smuzhiyun 	.endm
343*4882a593Smuzhiyun #endif
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun  * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
347*4882a593Smuzhiyun  * a scratch register for the macro to overwrite.
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * This macro is intended for forcing the CPU into SVC mode at boot time.
350*4882a593Smuzhiyun  * you cannot return to the original mode.
351*4882a593Smuzhiyun  */
352*4882a593Smuzhiyun .macro safe_svcmode_maskall reg:req
353*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
354*4882a593Smuzhiyun 	mrs	\reg , cpsr
355*4882a593Smuzhiyun 	eor	\reg, \reg, #HYP_MODE
356*4882a593Smuzhiyun 	tst	\reg, #MODE_MASK
357*4882a593Smuzhiyun 	bic	\reg , \reg , #MODE_MASK
358*4882a593Smuzhiyun 	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
359*4882a593Smuzhiyun THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
360*4882a593Smuzhiyun 	bne	1f
361*4882a593Smuzhiyun 	orr	\reg, \reg, #PSR_A_BIT
362*4882a593Smuzhiyun 	badr	lr, 2f
363*4882a593Smuzhiyun 	msr	spsr_cxsf, \reg
364*4882a593Smuzhiyun 	__MSR_ELR_HYP(14)
365*4882a593Smuzhiyun 	__ERET
366*4882a593Smuzhiyun 1:	msr	cpsr_c, \reg
367*4882a593Smuzhiyun 2:
368*4882a593Smuzhiyun #else
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun  * workaround for possibly broken pre-v6 hardware
371*4882a593Smuzhiyun  * (akita, Sharp Zaurus C-1000, PXA270-based)
372*4882a593Smuzhiyun  */
373*4882a593Smuzhiyun 	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
374*4882a593Smuzhiyun #endif
375*4882a593Smuzhiyun .endm
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun  * STRT/LDRT access macros with ARM and Thumb-2 variants
379*4882a593Smuzhiyun  */
380*4882a593Smuzhiyun #ifdef CONFIG_THUMB2_KERNEL
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
383*4882a593Smuzhiyun 9999:
384*4882a593Smuzhiyun 	.if	\inc == 1
385*4882a593Smuzhiyun 	\instr\()b\t\cond\().w \reg, [\ptr, #\off]
386*4882a593Smuzhiyun 	.elseif	\inc == 4
387*4882a593Smuzhiyun 	\instr\t\cond\().w \reg, [\ptr, #\off]
388*4882a593Smuzhiyun 	.else
389*4882a593Smuzhiyun 	.error	"Unsupported inc macro argument"
390*4882a593Smuzhiyun 	.endif
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	.pushsection __ex_table,"a"
393*4882a593Smuzhiyun 	.align	3
394*4882a593Smuzhiyun 	.long	9999b, \abort
395*4882a593Smuzhiyun 	.popsection
396*4882a593Smuzhiyun 	.endm
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
399*4882a593Smuzhiyun 	@ explicit IT instruction needed because of the label
400*4882a593Smuzhiyun 	@ introduced by the USER macro
401*4882a593Smuzhiyun 	.ifnc	\cond,al
402*4882a593Smuzhiyun 	.if	\rept == 1
403*4882a593Smuzhiyun 	itt	\cond
404*4882a593Smuzhiyun 	.elseif	\rept == 2
405*4882a593Smuzhiyun 	ittt	\cond
406*4882a593Smuzhiyun 	.else
407*4882a593Smuzhiyun 	.error	"Unsupported rept macro argument"
408*4882a593Smuzhiyun 	.endif
409*4882a593Smuzhiyun 	.endif
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	@ Slightly optimised to avoid incrementing the pointer twice
412*4882a593Smuzhiyun 	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
413*4882a593Smuzhiyun 	.if	\rept == 2
414*4882a593Smuzhiyun 	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
415*4882a593Smuzhiyun 	.endif
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	add\cond \ptr, #\rept * \inc
418*4882a593Smuzhiyun 	.endm
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #else	/* !CONFIG_THUMB2_KERNEL */
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
423*4882a593Smuzhiyun 	.rept	\rept
424*4882a593Smuzhiyun 9999:
425*4882a593Smuzhiyun 	.if	\inc == 1
426*4882a593Smuzhiyun 	\instr\()b\t\cond \reg, [\ptr], #\inc
427*4882a593Smuzhiyun 	.elseif	\inc == 4
428*4882a593Smuzhiyun 	\instr\t\cond \reg, [\ptr], #\inc
429*4882a593Smuzhiyun 	.else
430*4882a593Smuzhiyun 	.error	"Unsupported inc macro argument"
431*4882a593Smuzhiyun 	.endif
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	.pushsection __ex_table,"a"
434*4882a593Smuzhiyun 	.align	3
435*4882a593Smuzhiyun 	.long	9999b, \abort
436*4882a593Smuzhiyun 	.popsection
437*4882a593Smuzhiyun 	.endr
438*4882a593Smuzhiyun 	.endm
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun #endif	/* CONFIG_THUMB2_KERNEL */
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
443*4882a593Smuzhiyun 	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
444*4882a593Smuzhiyun 	.endm
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
447*4882a593Smuzhiyun 	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
448*4882a593Smuzhiyun 	.endm
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* Utility macro for declaring string literals */
451*4882a593Smuzhiyun 	.macro	string name:req, string
452*4882a593Smuzhiyun 	.type \name , #object
453*4882a593Smuzhiyun \name:
454*4882a593Smuzhiyun 	.asciz "\string"
455*4882a593Smuzhiyun 	.size \name , . - \name
456*4882a593Smuzhiyun 	.endm
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
459*4882a593Smuzhiyun 	.macro	ret\c, reg
460*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
461*4882a593Smuzhiyun 	mov\c	pc, \reg
462*4882a593Smuzhiyun #else
463*4882a593Smuzhiyun 	.ifeqs	"\reg", "lr"
464*4882a593Smuzhiyun 	bx\c	\reg
465*4882a593Smuzhiyun 	.else
466*4882a593Smuzhiyun 	mov\c	pc, \reg
467*4882a593Smuzhiyun 	.endif
468*4882a593Smuzhiyun #endif
469*4882a593Smuzhiyun 	.endm
470*4882a593Smuzhiyun 	.endr
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	.macro	ret.w, reg
473*4882a593Smuzhiyun 	ret	\reg
474*4882a593Smuzhiyun #ifdef CONFIG_THUMB2_KERNEL
475*4882a593Smuzhiyun 	nop
476*4882a593Smuzhiyun #endif
477*4882a593Smuzhiyun 	.endm
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	.macro	bug, msg, line
480*4882a593Smuzhiyun #ifdef CONFIG_THUMB2_KERNEL
481*4882a593Smuzhiyun 1:	.inst	0xde02
482*4882a593Smuzhiyun #else
483*4882a593Smuzhiyun 1:	.inst	0xe7f001f2
484*4882a593Smuzhiyun #endif
485*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_BUGVERBOSE
486*4882a593Smuzhiyun 	.pushsection .rodata.str, "aMS", %progbits, 1
487*4882a593Smuzhiyun 2:	.asciz	"\msg"
488*4882a593Smuzhiyun 	.popsection
489*4882a593Smuzhiyun 	.pushsection __bug_table, "aw"
490*4882a593Smuzhiyun 	.align	2
491*4882a593Smuzhiyun 	.word	1b, 2b
492*4882a593Smuzhiyun 	.hword	\line
493*4882a593Smuzhiyun 	.popsection
494*4882a593Smuzhiyun #endif
495*4882a593Smuzhiyun 	.endm
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun #ifdef CONFIG_KPROBES
498*4882a593Smuzhiyun #define _ASM_NOKPROBE(entry)				\
499*4882a593Smuzhiyun 	.pushsection "_kprobe_blacklist", "aw" ;	\
500*4882a593Smuzhiyun 	.balign 4 ;					\
501*4882a593Smuzhiyun 	.long entry;					\
502*4882a593Smuzhiyun 	.popsection
503*4882a593Smuzhiyun #else
504*4882a593Smuzhiyun #define _ASM_NOKPROBE(entry)
505*4882a593Smuzhiyun #endif
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	.macro		__adldst_l, op, reg, sym, tmp, c
508*4882a593Smuzhiyun 	.if		__LINUX_ARM_ARCH__ < 7
509*4882a593Smuzhiyun 	ldr\c		\tmp, .La\@
510*4882a593Smuzhiyun 	.subsection	1
511*4882a593Smuzhiyun 	.align		2
512*4882a593Smuzhiyun .La\@:	.long		\sym - .Lpc\@
513*4882a593Smuzhiyun 	.previous
514*4882a593Smuzhiyun 	.else
515*4882a593Smuzhiyun 	.ifnb		\c
516*4882a593Smuzhiyun  THUMB(	ittt		\c			)
517*4882a593Smuzhiyun 	.endif
518*4882a593Smuzhiyun 	movw\c		\tmp, #:lower16:\sym - .Lpc\@
519*4882a593Smuzhiyun 	movt\c		\tmp, #:upper16:\sym - .Lpc\@
520*4882a593Smuzhiyun 	.endif
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun #ifndef CONFIG_THUMB2_KERNEL
523*4882a593Smuzhiyun 	.set		.Lpc\@, . + 8			// PC bias
524*4882a593Smuzhiyun 	.ifc		\op, add
525*4882a593Smuzhiyun 	add\c		\reg, \tmp, pc
526*4882a593Smuzhiyun 	.else
527*4882a593Smuzhiyun 	\op\c		\reg, [pc, \tmp]
528*4882a593Smuzhiyun 	.endif
529*4882a593Smuzhiyun #else
530*4882a593Smuzhiyun .Lb\@:	add\c		\tmp, \tmp, pc
531*4882a593Smuzhiyun 	/*
532*4882a593Smuzhiyun 	 * In Thumb-2 builds, the PC bias depends on whether we are currently
533*4882a593Smuzhiyun 	 * emitting into a .arm or a .thumb section. The size of the add opcode
534*4882a593Smuzhiyun 	 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
535*4882a593Smuzhiyun 	 * emitting in ARM mode, so let's use this to account for the bias.
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 	.set		.Lpc\@, . + (. - .Lb\@)
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	.ifnc		\op, add
540*4882a593Smuzhiyun 	\op\c		\reg, [\tmp]
541*4882a593Smuzhiyun 	.endif
542*4882a593Smuzhiyun #endif
543*4882a593Smuzhiyun 	.endm
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/*
546*4882a593Smuzhiyun 	 * mov_l - move a constant value or [relocated] address into a register
547*4882a593Smuzhiyun 	 */
548*4882a593Smuzhiyun 	.macro		mov_l, dst:req, imm:req
549*4882a593Smuzhiyun 	.if		__LINUX_ARM_ARCH__ < 7
550*4882a593Smuzhiyun 	ldr		\dst, =\imm
551*4882a593Smuzhiyun 	.else
552*4882a593Smuzhiyun 	movw		\dst, #:lower16:\imm
553*4882a593Smuzhiyun 	movt		\dst, #:upper16:\imm
554*4882a593Smuzhiyun 	.endif
555*4882a593Smuzhiyun 	.endm
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/*
558*4882a593Smuzhiyun 	 * adr_l - adr pseudo-op with unlimited range
559*4882a593Smuzhiyun 	 *
560*4882a593Smuzhiyun 	 * @dst: destination register
561*4882a593Smuzhiyun 	 * @sym: name of the symbol
562*4882a593Smuzhiyun 	 * @cond: conditional opcode suffix
563*4882a593Smuzhiyun 	 */
564*4882a593Smuzhiyun 	.macro		adr_l, dst:req, sym:req, cond
565*4882a593Smuzhiyun 	__adldst_l	add, \dst, \sym, \dst, \cond
566*4882a593Smuzhiyun 	.endm
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/*
569*4882a593Smuzhiyun 	 * ldr_l - ldr <literal> pseudo-op with unlimited range
570*4882a593Smuzhiyun 	 *
571*4882a593Smuzhiyun 	 * @dst: destination register
572*4882a593Smuzhiyun 	 * @sym: name of the symbol
573*4882a593Smuzhiyun 	 * @cond: conditional opcode suffix
574*4882a593Smuzhiyun 	 */
575*4882a593Smuzhiyun 	.macro		ldr_l, dst:req, sym:req, cond
576*4882a593Smuzhiyun 	__adldst_l	ldr, \dst, \sym, \dst, \cond
577*4882a593Smuzhiyun 	.endm
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/*
580*4882a593Smuzhiyun 	 * str_l - str <literal> pseudo-op with unlimited range
581*4882a593Smuzhiyun 	 *
582*4882a593Smuzhiyun 	 * @src: source register
583*4882a593Smuzhiyun 	 * @sym: name of the symbol
584*4882a593Smuzhiyun 	 * @tmp: mandatory scratch register
585*4882a593Smuzhiyun 	 * @cond: conditional opcode suffix
586*4882a593Smuzhiyun 	 */
587*4882a593Smuzhiyun 	.macro		str_l, src:req, sym:req, tmp:req, cond
588*4882a593Smuzhiyun 	__adldst_l	str, \src, \sym, \tmp, \cond
589*4882a593Smuzhiyun 	.endm
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/*
592*4882a593Smuzhiyun 	 * rev_l - byte-swap a 32-bit value
593*4882a593Smuzhiyun 	 *
594*4882a593Smuzhiyun 	 * @val: source/destination register
595*4882a593Smuzhiyun 	 * @tmp: scratch register
596*4882a593Smuzhiyun 	 */
597*4882a593Smuzhiyun 	.macro		rev_l, val:req, tmp:req
598*4882a593Smuzhiyun 	.if		__LINUX_ARM_ARCH__ < 6
599*4882a593Smuzhiyun 	eor		\tmp, \val, \val, ror #16
600*4882a593Smuzhiyun 	bic		\tmp, \tmp, #0x00ff0000
601*4882a593Smuzhiyun 	mov		\val, \val, ror #8
602*4882a593Smuzhiyun 	eor		\val, \val, \tmp, lsr #8
603*4882a593Smuzhiyun 	.else
604*4882a593Smuzhiyun 	rev		\val, \val
605*4882a593Smuzhiyun 	.endif
606*4882a593Smuzhiyun 	.endm
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun #endif /* __ASM_ASSEMBLER_H__ */
609