xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/irqflags.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * IRQ flags handling
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef _ASM_IRQFLAGS_H
6*4882a593Smuzhiyun #define _ASM_IRQFLAGS_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef __ASSEMBLY__
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * Get definitions for arch_local_save_flags(x), etc.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #include <asm/hw_irq.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #else
15*4882a593Smuzhiyun #ifdef CONFIG_TRACE_IRQFLAGS
16*4882a593Smuzhiyun #ifdef CONFIG_IRQSOFF_TRACER
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
19*4882a593Smuzhiyun  * which is the stack frame here, we need to force a stack frame
20*4882a593Smuzhiyun  * in case we came from user space.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #define TRACE_WITH_FRAME_BUFFER(func)		\
23*4882a593Smuzhiyun 	mflr	r0;				\
24*4882a593Smuzhiyun 	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
25*4882a593Smuzhiyun 	std	r0, 16(r1);			\
26*4882a593Smuzhiyun 	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
27*4882a593Smuzhiyun 	bl func;				\
28*4882a593Smuzhiyun 	ld	r1, 0(r1);			\
29*4882a593Smuzhiyun 	ld	r1, 0(r1);
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun #define TRACE_WITH_FRAME_BUFFER(func)		\
32*4882a593Smuzhiyun 	bl func;
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * These are calls to C code, so the caller must be prepared for volatiles to
37*4882a593Smuzhiyun  * be clobbered.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
40*4882a593Smuzhiyun #define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * This is used by assembly code to soft-disable interrupts first and
44*4882a593Smuzhiyun  * reconcile irq state.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * NB: This may call C code, so the caller must be prepared for volatiles to
47*4882a593Smuzhiyun  * be clobbered.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun #define RECONCILE_IRQ_STATE(__rA, __rB)		\
50*4882a593Smuzhiyun 	lbz	__rA,PACAIRQSOFTMASK(r13);	\
51*4882a593Smuzhiyun 	lbz	__rB,PACAIRQHAPPENED(r13);	\
52*4882a593Smuzhiyun 	andi.	__rA,__rA,IRQS_DISABLED;	\
53*4882a593Smuzhiyun 	li	__rA,IRQS_DISABLED;		\
54*4882a593Smuzhiyun 	ori	__rB,__rB,PACA_IRQ_HARD_DIS;	\
55*4882a593Smuzhiyun 	stb	__rB,PACAIRQHAPPENED(r13);	\
56*4882a593Smuzhiyun 	bne	44f;				\
57*4882a593Smuzhiyun 	stb	__rA,PACAIRQSOFTMASK(r13);	\
58*4882a593Smuzhiyun 	TRACE_DISABLE_INTS;			\
59*4882a593Smuzhiyun 44:
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #else
62*4882a593Smuzhiyun #define TRACE_ENABLE_INTS
63*4882a593Smuzhiyun #define TRACE_DISABLE_INTS
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define RECONCILE_IRQ_STATE(__rA, __rB)		\
66*4882a593Smuzhiyun 	lbz	__rA,PACAIRQHAPPENED(r13);	\
67*4882a593Smuzhiyun 	li	__rB,IRQS_DISABLED;		\
68*4882a593Smuzhiyun 	ori	__rA,__rA,PACA_IRQ_HARD_DIS;	\
69*4882a593Smuzhiyun 	stb	__rB,PACAIRQSOFTMASK(r13);	\
70*4882a593Smuzhiyun 	stb	__rA,PACAIRQHAPPENED(r13)
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #endif
75