xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/xive-regs.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2016,2017 IBM Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef _ASM_POWERPC_XIVE_REGS_H
6*4882a593Smuzhiyun #define _ASM_POWERPC_XIVE_REGS_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * "magic" Event State Buffer (ESB) MMIO offsets.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Each interrupt source has a 2-bit state machine called ESB
12*4882a593Smuzhiyun  * which can be controlled by MMIO. It's made of 2 bits, P and
13*4882a593Smuzhiyun  * Q. P indicates that an interrupt is pending (has been sent
14*4882a593Smuzhiyun  * to a queue and is waiting for an EOI). Q indicates that the
15*4882a593Smuzhiyun  * interrupt has been triggered while pending.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * This acts as a coalescing mechanism in order to guarantee
18*4882a593Smuzhiyun  * that a given interrupt only occurs at most once in a queue.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * When doing an EOI, the Q bit will indicate if the interrupt
21*4882a593Smuzhiyun  * needs to be re-triggered.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * The following offsets into the ESB MMIO allow to read or
24*4882a593Smuzhiyun  * manipulate the PQ bits. They must be used with an 8-bytes
25*4882a593Smuzhiyun  * load instruction. They all return the previous state of the
26*4882a593Smuzhiyun  * interrupt (atomically).
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * Additionally, some ESB pages support doing an EOI via a
29*4882a593Smuzhiyun  * store at 0 and some ESBs support doing a trigger via a
30*4882a593Smuzhiyun  * separate trigger page.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define XIVE_ESB_STORE_EOI	0x400 /* Store */
33*4882a593Smuzhiyun #define XIVE_ESB_LOAD_EOI	0x000 /* Load */
34*4882a593Smuzhiyun #define XIVE_ESB_GET		0x800 /* Load */
35*4882a593Smuzhiyun #define XIVE_ESB_SET_PQ_00	0xc00 /* Load */
36*4882a593Smuzhiyun #define XIVE_ESB_SET_PQ_01	0xd00 /* Load */
37*4882a593Smuzhiyun #define XIVE_ESB_SET_PQ_10	0xe00 /* Load */
38*4882a593Smuzhiyun #define XIVE_ESB_SET_PQ_11	0xf00 /* Load */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Load-after-store ordering
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * Adding this offset to the load address will enforce
44*4882a593Smuzhiyun  * load-after-store ordering. This is required to use StoreEOI.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun #define XIVE_ESB_LD_ST_MO	0x40 /* Load-after-store ordering */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define XIVE_ESB_VAL_P		0x2
49*4882a593Smuzhiyun #define XIVE_ESB_VAL_Q		0x1
50*4882a593Smuzhiyun #define XIVE_ESB_INVALID	0xFF
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Thread Management (aka "TM") registers
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* TM register offsets */
57*4882a593Smuzhiyun #define TM_QW0_USER		0x000 /* All rings */
58*4882a593Smuzhiyun #define TM_QW1_OS		0x010 /* Ring 0..2 */
59*4882a593Smuzhiyun #define TM_QW2_HV_POOL		0x020 /* Ring 0..1 */
60*4882a593Smuzhiyun #define TM_QW3_HV_PHYS		0x030 /* Ring 0..1 */
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Byte offsets inside a QW             QW0 QW1 QW2 QW3 */
63*4882a593Smuzhiyun #define TM_NSR			0x0  /*  +   +   -   +  */
64*4882a593Smuzhiyun #define TM_CPPR			0x1  /*  -   +   -   +  */
65*4882a593Smuzhiyun #define TM_IPB			0x2  /*  -   +   +   +  */
66*4882a593Smuzhiyun #define TM_LSMFB		0x3  /*  -   +   +   +  */
67*4882a593Smuzhiyun #define TM_ACK_CNT		0x4  /*  -   +   -   -  */
68*4882a593Smuzhiyun #define TM_INC			0x5  /*  -   +   -   +  */
69*4882a593Smuzhiyun #define TM_AGE			0x6  /*  -   +   -   +  */
70*4882a593Smuzhiyun #define TM_PIPR			0x7  /*  -   +   -   +  */
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define TM_WORD0		0x0
73*4882a593Smuzhiyun #define TM_WORD1		0x4
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * QW word 2 contains the valid bit at the top and other fields
77*4882a593Smuzhiyun  * depending on the QW.
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun #define TM_WORD2		0x8
80*4882a593Smuzhiyun #define   TM_QW0W2_VU		PPC_BIT32(0)
81*4882a593Smuzhiyun #define   TM_QW0W2_LOGIC_SERV	PPC_BITMASK32(1,31) // XX 2,31 ?
82*4882a593Smuzhiyun #define   TM_QW1W2_VO		PPC_BIT32(0)
83*4882a593Smuzhiyun #define   TM_QW1W2_OS_CAM	PPC_BITMASK32(8,31)
84*4882a593Smuzhiyun #define   TM_QW2W2_VP		PPC_BIT32(0)
85*4882a593Smuzhiyun #define   TM_QW2W2_POOL_CAM	PPC_BITMASK32(8,31)
86*4882a593Smuzhiyun #define   TM_QW3W2_VT		PPC_BIT32(0)
87*4882a593Smuzhiyun #define   TM_QW3W2_LP		PPC_BIT32(6)
88*4882a593Smuzhiyun #define   TM_QW3W2_LE		PPC_BIT32(7)
89*4882a593Smuzhiyun #define   TM_QW3W2_T		PPC_BIT32(31)
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * In addition to normal loads to "peek" and writes (only when invalid)
93*4882a593Smuzhiyun  * using 4 and 8 bytes accesses, the above registers support these
94*4882a593Smuzhiyun  * "special" byte operations:
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  *   - Byte load from QW0[NSR] - User level NSR (EBB)
97*4882a593Smuzhiyun  *   - Byte store to QW0[NSR] - User level NSR (EBB)
98*4882a593Smuzhiyun  *   - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
99*4882a593Smuzhiyun  *   - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
100*4882a593Smuzhiyun  *                                    otherwise VT||0000000
101*4882a593Smuzhiyun  *   - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Then we have all these "special" CI ops at these offset that trigger
104*4882a593Smuzhiyun  * all sorts of side effects:
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun #define TM_SPC_ACK_EBB		0x800	/* Load8 ack EBB to reg*/
107*4882a593Smuzhiyun #define TM_SPC_ACK_OS_REG	0x810	/* Load16 ack OS irq to reg */
108*4882a593Smuzhiyun #define TM_SPC_PUSH_USR_CTX	0x808	/* Store32 Push/Validate user context */
109*4882a593Smuzhiyun #define TM_SPC_PULL_USR_CTX	0x808	/* Load32 Pull/Invalidate user context */
110*4882a593Smuzhiyun #define TM_SPC_SET_OS_PENDING	0x812	/* Store8 Set OS irq pending bit */
111*4882a593Smuzhiyun #define TM_SPC_PULL_OS_CTX	0x818	/* Load32/Load64 Pull/Invalidate OS context to reg */
112*4882a593Smuzhiyun #define TM_SPC_PULL_POOL_CTX	0x828	/* Load32/Load64 Pull/Invalidate Pool context to reg*/
113*4882a593Smuzhiyun #define TM_SPC_ACK_HV_REG	0x830	/* Load16 ack HV irq to reg */
114*4882a593Smuzhiyun #define TM_SPC_PULL_USR_CTX_OL	0xc08	/* Store8 Pull/Inval usr ctx to odd line */
115*4882a593Smuzhiyun #define TM_SPC_ACK_OS_EL	0xc10	/* Store8 ack OS irq to even line */
116*4882a593Smuzhiyun #define TM_SPC_ACK_HV_POOL_EL	0xc20	/* Store8 ack HV evt pool to even line */
117*4882a593Smuzhiyun #define TM_SPC_ACK_HV_EL	0xc30	/* Store8 ack HV irq to even line */
118*4882a593Smuzhiyun /* XXX more... */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* NSR fields for the various QW ack types */
121*4882a593Smuzhiyun #define TM_QW0_NSR_EB		PPC_BIT8(0)
122*4882a593Smuzhiyun #define TM_QW1_NSR_EO		PPC_BIT8(0)
123*4882a593Smuzhiyun #define TM_QW3_NSR_HE		PPC_BITMASK8(0,1)
124*4882a593Smuzhiyun #define  TM_QW3_NSR_HE_NONE	0
125*4882a593Smuzhiyun #define  TM_QW3_NSR_HE_POOL	1
126*4882a593Smuzhiyun #define  TM_QW3_NSR_HE_PHYS	2
127*4882a593Smuzhiyun #define  TM_QW3_NSR_HE_LSI	3
128*4882a593Smuzhiyun #define TM_QW3_NSR_I		PPC_BIT8(2)
129*4882a593Smuzhiyun #define TM_QW3_NSR_GRP_LVL	PPC_BIT8(3,7)
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #endif /* _ASM_POWERPC_XIVE_REGS_H */
132