xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/bcmpcie.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Broadcom PCIE
3*4882a593Smuzhiyun  * Software-specific definitions shared between device and host side
4*4882a593Smuzhiyun  * Explains the shared area between host and dongle
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
9*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
10*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
11*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12*4882a593Smuzhiyun  * following added to such license:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
15*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
16*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
17*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
18*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
19*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
20*4882a593Smuzhiyun  * modifications of the software.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Dual:>>
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifndef	_bcmpcie_h_
27*4882a593Smuzhiyun #define	_bcmpcie_h_
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <typedefs.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define ADDR_64(x)			(x.addr)
32*4882a593Smuzhiyun #define HIGH_ADDR_32(x)     ((uint32) (((sh_addr_t) x).high_addr))
33*4882a593Smuzhiyun #define LOW_ADDR_32(x)      ((uint32) (((sh_addr_t) x).low_addr))
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun typedef struct {
36*4882a593Smuzhiyun 	uint32 low_addr;
37*4882a593Smuzhiyun 	uint32 high_addr;
38*4882a593Smuzhiyun } sh_addr_t;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* May be overridden by 43xxxxx-roml.mk */
41*4882a593Smuzhiyun #if !defined(BCMPCIE_MAX_TX_FLOWS)
42*4882a593Smuzhiyun #define BCMPCIE_MAX_TX_FLOWS	40
43*4882a593Smuzhiyun #endif /* ! BCMPCIE_MAX_TX_FLOWS */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_9		0x00009
46*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_8		0x00008
47*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_7		0x00007
48*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_6		0x00006 /* rev6 is compatible with rev 5 */
49*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_5		0x00005 /* rev6 is compatible with rev 5 */
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun  * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that
52*4882a593Smuzhiyun  * is located in device memory.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun #define PCIE_SHARED_VERSION_MASK	0x000FF
55*4882a593Smuzhiyun #define PCIE_SHARED_ASSERT_BUILT	0x00100
56*4882a593Smuzhiyun #define PCIE_SHARED_ASSERT		0x00200
57*4882a593Smuzhiyun #define PCIE_SHARED_TRAP		0x00400
58*4882a593Smuzhiyun #define PCIE_SHARED_IN_BRPT		0x00800
59*4882a593Smuzhiyun #define PCIE_SHARED_SET_BRPT		0x01000
60*4882a593Smuzhiyun #define PCIE_SHARED_PENDING_BRPT	0x02000
61*4882a593Smuzhiyun /* BCMPCIE_SUPPORT_TX_PUSH_RING		0x04000 obsolete */
62*4882a593Smuzhiyun #define PCIE_SHARED_EVT_SEQNUM		0x08000
63*4882a593Smuzhiyun #define PCIE_SHARED_DMA_INDEX		0x10000
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  * There are host types where a device interrupt can 'race ahead' of data written by the device into
67*4882a593Smuzhiyun  * host memory. The dongle can avoid this condition using a variety of techniques (read barrier,
68*4882a593Smuzhiyun  * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately
69*4882a593Smuzhiyun  * these techniques have drawbacks on router platforms. For these platforms, it was decided to not
70*4882a593Smuzhiyun  * avoid the condition, but to detect the condition instead and act on it.
71*4882a593Smuzhiyun  * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun #define PCIE_SHARED_D2H_SYNC_SEQNUM     0x20000
74*4882a593Smuzhiyun #define PCIE_SHARED_D2H_SYNC_XORCSUM    0x40000
75*4882a593Smuzhiyun #define PCIE_SHARED_D2H_SYNC_MODE_MASK \
76*4882a593Smuzhiyun 	(PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM)
77*4882a593Smuzhiyun #define PCIE_SHARED_IDLE_FLOW_RING		0x80000
78*4882a593Smuzhiyun #define PCIE_SHARED_2BYTE_INDICES       0x100000
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define PCIE_SHARED_FAST_DELETE_RING	0x00000020      /* Fast Delete Ring */
81*4882a593Smuzhiyun #define PCIE_SHARED_EVENT_BUF_POOL_MAX	0x000000c0      /* event buffer pool max bits */
82*4882a593Smuzhiyun #define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS     6       /* event buffer pool max bit position */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* dongle supports fatal buf log collection */
85*4882a593Smuzhiyun #define PCIE_SHARED_FATAL_LOGBUG_VALID	0x200000
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Implicit DMA with corerev 19 and after */
88*4882a593Smuzhiyun #define PCIE_SHARED_IDMA		0x400000
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* MSI support */
91*4882a593Smuzhiyun #define PCIE_SHARED_D2H_MSI_MULTI_MSG   0x800000
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* IFRM with corerev 19 and after */
94*4882a593Smuzhiyun #define PCIE_SHARED_IFRM		0x1000000
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * From Rev6 and above, suspend/resume can be done using two handshake methods.
98*4882a593Smuzhiyun  * 1. Using ctrl post/ctrl cmpl messages (Default rev6)
99*4882a593Smuzhiyun  * 2. Using Mailbox data (old method as used in rev5)
100*4882a593Smuzhiyun  * This shared flag indicates whether to overide rev6 default method and use mailbox for
101*4882a593Smuzhiyun  * suspend/resume.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun #define PCIE_SHARED_USE_MAILBOX		0x2000000
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Firmware compiled for mfgbuild purposes */
106*4882a593Smuzhiyun #define PCIE_SHARED_MFGBUILD_FW		0x4000000
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* Firmware could use DB0 value as host timestamp */
109*4882a593Smuzhiyun #define PCIE_SHARED_TIMESTAMP_DB0	0x8000000
110*4882a593Smuzhiyun /* Firmware could use Hostready (IPC rev7) */
111*4882a593Smuzhiyun #define PCIE_SHARED_HOSTRDY_SUPPORT	0x10000000
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* When set, Firmwar does not support OOB Device Wake based DS protocol */
114*4882a593Smuzhiyun #define PCIE_SHARED_NO_OOB_DW		0x20000000
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /* When set, Firmwar supports Inband DS protocol */
117*4882a593Smuzhiyun #define PCIE_SHARED_INBAND_DS		0x40000000
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* use DAR registers */
120*4882a593Smuzhiyun #define PCIE_SHARED_DAR			0x80000000
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun  * Following are the shared2 flags. All bits in flags have been used. A flags2
124*4882a593Smuzhiyun  * field got added and the definition for these flags come here:
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun /* WAR: D11 txstatus through unused status field of PCIe completion header */
127*4882a593Smuzhiyun #define PCIE_SHARED2_EXTENDED_TRAP_DATA	0x00000001	/* using flags2 in shared area */
128*4882a593Smuzhiyun #define PCIE_SHARED2_TXSTATUS_METADATA	0x00000002
129*4882a593Smuzhiyun #define PCIE_SHARED2_BT_LOGGING		0x00000004	/* BT logging support */
130*4882a593Smuzhiyun #define PCIE_SHARED2_SNAPSHOT_UPLOAD	0x00000008	/* BT/WLAN snapshot upload support */
131*4882a593Smuzhiyun #define PCIE_SHARED2_SUBMIT_COUNT_WAR	0x00000010	/* submission count WAR */
132*4882a593Smuzhiyun #define PCIE_SHARED2_FAST_DELETE_RING	0x00000020	/* Fast Delete ring support */
133*4882a593Smuzhiyun #define PCIE_SHARED2_EVTBUF_MAX_MASK	0x000000C0	/* 0:32, 1:64, 2:128, 3: 256 */
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */
136*4882a593Smuzhiyun #define PCIE_SHARED2_PKT_TX_STATUS	0x00000100
137*4882a593Smuzhiyun #define PCIE_SHARED2_FW_SMALL_MEMDUMP	0x00000200	/* FW small memdump */
138*4882a593Smuzhiyun #define PCIE_SHARED2_FW_HC_ON_TRAP	0x00000400
139*4882a593Smuzhiyun #define PCIE_SHARED2_HSCB		0x00000800	/* Host SCB support */
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define PCIE_SHARED2_EDL_RING			0x00001000	/* Support Enhanced Debug Lane */
142*4882a593Smuzhiyun #define PCIE_SHARED2_DEBUG_BUF_DEST		0x00002000	/* debug buf dest support */
143*4882a593Smuzhiyun #define PCIE_SHARED2_PCIE_ENUM_RESET_FLR	0x00004000	/* BT producer index reset WAR */
144*4882a593Smuzhiyun #define PCIE_SHARED2_PKT_TIMESTAMP		0x00008000	/* Timestamp in packet */
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #define PCIE_SHARED2_HP2P		0x00010000u	/* HP2P feature */
147*4882a593Smuzhiyun #define PCIE_SHARED2_HWA		0x00020000u	/* HWA feature */
148*4882a593Smuzhiyun #define PCIE_SHARED2_TRAP_ON_HOST_DB7	0x00040000u	/* can take a trap on DB7 from host */
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #define PCIE_SHARED2_DURATION_SCALE	0x00100000u
151*4882a593Smuzhiyun #define PCIE_SHARED2_ETD_ADDR_SUPPORT	0x00800000u
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define PCIE_SHARED2_TXCSO		0x00200000u	/* Tx Checksum offload support */
154*4882a593Smuzhiyun #define PCIE_SHARED2_TXPOST_EXT		0x00400000u	/* extended txpost work item support */
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define PCIE_SHARED2_D2H_D11_TX_STATUS	0x40000000
157*4882a593Smuzhiyun #define PCIE_SHARED2_H2D_D11_TX_STATUS	0x80000000
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #define PCIE_SHARED_D2H_MAGIC		0xFEDCBA09
160*4882a593Smuzhiyun #define PCIE_SHARED_H2D_MAGIC		0x12345678
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun typedef uint16			pcie_hwa_db_index_t;	/* 16 bit HWA index (IPC Rev 7) */
163*4882a593Smuzhiyun #define PCIE_HWA_DB_INDEX_SZ	(2u)			/* 2 bytes  sizeof(pcie_hwa_db_index_t) */
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * Message rings convey messages between host and device. They are unidirectional, and are located
167*4882a593Smuzhiyun  * in host memory.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * This is the minimal set of message rings, known as 'common message rings':
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun #define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT		0
172*4882a593Smuzhiyun #define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT		1
173*4882a593Smuzhiyun #define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE		2
174*4882a593Smuzhiyun #define BCMPCIE_D2H_MSGRING_TX_COMPLETE			3
175*4882a593Smuzhiyun #define BCMPCIE_D2H_MSGRING_RX_COMPLETE			4
176*4882a593Smuzhiyun #define BCMPCIE_COMMON_MSGRING_MAX_ID			4
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #define BCMPCIE_H2D_COMMON_MSGRINGS			2
179*4882a593Smuzhiyun #define BCMPCIE_D2H_COMMON_MSGRINGS			3
180*4882a593Smuzhiyun #define BCMPCIE_COMMON_MSGRINGS				5
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \
183*4882a593Smuzhiyun 	(BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows))
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /* different ring types */
186*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT		0x1
187*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING		0x2
188*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_RXBUFPOST			0x3
189*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_TXSUBMIT			0x4
190*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT		0x5
191*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT		0x6
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_CTRL_CPL			0x1
194*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_TX_CPL			0x2
195*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_RX_CPL			0x3
196*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL		0x4
197*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE		0x5
198*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL			0x6
199*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_EDL                       0x7
200*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL		0x8
201*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL		0x9
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun  * H2D and D2H, WR and RD index, are maintained in the following arrays:
205*4882a593Smuzhiyun  * - Array of all H2D WR Indices
206*4882a593Smuzhiyun  * - Array of all H2D RD Indices
207*4882a593Smuzhiyun  * - Array of all D2H WR Indices
208*4882a593Smuzhiyun  * - Array of all D2H RD Indices
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * The offset of the WR or RD indexes (for common rings) in these arrays are
211*4882a593Smuzhiyun  * listed below. Arrays ARE NOT indexed by a ring's id.
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * D2H common rings WR and RD index start from 0, even though their ringids
214*4882a593Smuzhiyun  * start from BCMPCIE_H2D_COMMON_MSGRINGS
215*4882a593Smuzhiyun  */
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id)
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun enum h2dring_idx {
220*4882a593Smuzhiyun 	/* H2D common rings */
221*4882a593Smuzhiyun 	BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX =
222*4882a593Smuzhiyun 		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT),
223*4882a593Smuzhiyun 	BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =
224*4882a593Smuzhiyun 		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT),
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* First TxPost's WR or RD index starts after all H2D common rings */
227*4882a593Smuzhiyun 	BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START =
228*4882a593Smuzhiyun 		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS)
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \
232*4882a593Smuzhiyun 	((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS)
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun enum d2hring_idx {
235*4882a593Smuzhiyun 	/* D2H Common Rings */
236*4882a593Smuzhiyun 	BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX =
237*4882a593Smuzhiyun 		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE),
238*4882a593Smuzhiyun 	BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX =
239*4882a593Smuzhiyun 		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE),
240*4882a593Smuzhiyun 	BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX =
241*4882a593Smuzhiyun 		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE)
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun  * Macros for managing arrays of RD WR indices:
246*4882a593Smuzhiyun  * rw_index_sz:
247*4882a593Smuzhiyun  *    - in dongle, rw_index_sz is known at compile time
248*4882a593Smuzhiyun  *    - in host/DHD, rw_index_sz is derived from advertized pci_shared flags
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  *  ring_idx: See h2dring_idx and d2hring_idx
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /** Offset of a RD or WR index in H2D or D2H indices array */
254*4882a593Smuzhiyun #define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \
255*4882a593Smuzhiyun 	((rw_index_sz) * (ring_idx))
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /** Fetch the address of RD or WR index in H2D or D2H indices array */
258*4882a593Smuzhiyun #define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \
259*4882a593Smuzhiyun 	(void *)((uint32)(indices_array_base) + \
260*4882a593Smuzhiyun 	BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx)))
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /** H2D DMA Indices array size: given max flow rings */
263*4882a593Smuzhiyun #define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \
264*4882a593Smuzhiyun 	((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows))
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /** D2H DMA Indices array size */
267*4882a593Smuzhiyun #define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \
268*4882a593Smuzhiyun 	((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS)
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /* Backwards compatibility for legacy branches. */
271*4882a593Smuzhiyun #if !defined(PHYS_ADDR_N)
272*4882a593Smuzhiyun 	#define PHYS_ADDR_N(name) name
273*4882a593Smuzhiyun #endif
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used
277*4882a593Smuzhiyun  * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated
278*4882a593Smuzhiyun  * both in host as well as device memory.
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun typedef struct ring_mem {
281*4882a593Smuzhiyun 	uint16		idx;       /* ring id */
282*4882a593Smuzhiyun 	uint8		type;
283*4882a593Smuzhiyun 	uint8		rsvd;
284*4882a593Smuzhiyun 	uint16		max_item;  /* Max number of items in flow ring */
285*4882a593Smuzhiyun 	uint16		len_items; /* Items are fixed size. Length in bytes of one item */
286*4882a593Smuzhiyun 	sh_addr_t	base_addr; /* 64 bits address, either in host or device memory */
287*4882a593Smuzhiyun } ring_mem_t;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * Per flow ring, information is maintained in device memory, eg at what address the ringmem and
291*4882a593Smuzhiyun  * ringstate are located. The flow ring itself can be instantiated in either host or device memory.
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * Perhaps this type should be renamed to make clear that it resides in device memory only.
294*4882a593Smuzhiyun  */
295*4882a593Smuzhiyun typedef struct ring_info {
296*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(ringmem_ptr); /* ring mem location in dongle memory */
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/* Following arrays are indexed using h2dring_idx and d2hring_idx, and not
299*4882a593Smuzhiyun 	 * by a ringid.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */
303*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(h2d_w_idx_ptr); /* Array of all H2D ring's WR indices */
304*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(h2d_r_idx_ptr); /* Array of all H2D ring's RD indices */
305*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(d2h_w_idx_ptr); /* Array of all D2H ring's WR indices */
306*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(d2h_r_idx_ptr); /* Array of all D2H ring's RD indices */
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host.
309*4882a593Smuzhiyun 	 * Host may directly fetch WR and RD indices from these host-side arrays.
310*4882a593Smuzhiyun 	 *
311*4882a593Smuzhiyun 	 * 64bit ptr to arrays of WR or RD indices for all rings in host memory.
312*4882a593Smuzhiyun 	 */
313*4882a593Smuzhiyun 	sh_addr_t	h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */
314*4882a593Smuzhiyun 	sh_addr_t	h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */
315*4882a593Smuzhiyun 	sh_addr_t	d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */
316*4882a593Smuzhiyun 	sh_addr_t	d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	uint16		max_tx_flowrings; /* maximum number of H2D rings: common + flow */
319*4882a593Smuzhiyun 	uint16		max_submission_queues; /* maximum number of H2D rings: common + flow */
320*4882a593Smuzhiyun 	uint16		max_completion_rings; /* maximum number of H2D rings: common + flow */
321*4882a593Smuzhiyun 	uint16		max_vdevs; /* max number of virtual interfaces supported */
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	sh_addr_t	ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */
326*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(h2d_hwa_db_idx_ptr); /* Array of all H2D rings HWA DB indices */
327*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(d2h_hwa_db_idx_ptr); /* Array of all D2H rings HWA DB indices */
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun } ring_info_t;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun  * A structure located in TCM that is shared between host and device, primarily used during
333*4882a593Smuzhiyun  * initialization.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun typedef struct {
336*4882a593Smuzhiyun 	/** shared area version captured at flags 7:0 */
337*4882a593Smuzhiyun 	uint32	flags;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(trap_addr);
340*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(assert_exp_addr);
341*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(assert_file_addr);
342*4882a593Smuzhiyun 	uint32  assert_line;
343*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(console_addr);	/**< Address of hnd_cons_t */
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(msgtrace_addr);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	uint32  fwid;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Used for debug/flow control */
350*4882a593Smuzhiyun 	uint16  total_lfrag_pkt_cnt;
351*4882a593Smuzhiyun 	uint16  max_host_rxbufs; /* rsvd in spec */
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	uint32 dma_rxoffset; /* rsvd in spec */
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/** these will be used for sleep request/ack, d3 req/ack */
356*4882a593Smuzhiyun 	uint32  PHYS_ADDR_N(h2d_mb_data_ptr);
357*4882a593Smuzhiyun 	uint32  PHYS_ADDR_N(d2h_mb_data_ptr);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* information pertinent to host IPC/msgbuf channels */
360*4882a593Smuzhiyun 	/** location in the TCM memory which has the ring_info */
361*4882a593Smuzhiyun 	uint32	PHYS_ADDR_N(rings_info_ptr);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/** block of host memory for the scratch buffer */
364*4882a593Smuzhiyun 	uint32		host_dma_scratch_buffer_len;
365*4882a593Smuzhiyun 	sh_addr_t	host_dma_scratch_buffer;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* location in host memory for scb host offload structures */
368*4882a593Smuzhiyun 	sh_addr_t	host_scb_addr;
369*4882a593Smuzhiyun 	uint32		host_scb_size;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* anonymous union for overloading fields in structure */
372*4882a593Smuzhiyun 	union {
373*4882a593Smuzhiyun 		uint32	buzz_dbg_ptr;	/* BUZZZ state format strings and trace buffer */
374*4882a593Smuzhiyun 		struct {
375*4882a593Smuzhiyun 			/* Host provided trap buffer length in words */
376*4882a593Smuzhiyun 			uint16	device_trap_debug_buffer_len;
377*4882a593Smuzhiyun 			uint16	rsvd2;
378*4882a593Smuzhiyun 		};
379*4882a593Smuzhiyun 	};
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* rev6 compatible changes */
382*4882a593Smuzhiyun 	uint32          flags2;
383*4882a593Smuzhiyun 	uint32          host_cap;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* location in the host address space to write trap indication.
386*4882a593Smuzhiyun 	* At this point for the current rev of the spec, firmware will
387*4882a593Smuzhiyun 	* support only indications to 32 bit host addresses.
388*4882a593Smuzhiyun 	* This essentially is device_trap_debug_buffer_addr
389*4882a593Smuzhiyun 	*/
390*4882a593Smuzhiyun 	sh_addr_t       host_trap_addr;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* location for host fatal error log buffer start address */
393*4882a593Smuzhiyun 	uint32 PHYS_ADDR_N(device_fatal_logbuf_start);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* location in host memory for offloaded modules */
396*4882a593Smuzhiyun 	sh_addr_t	hoffload_addr;
397*4882a593Smuzhiyun 	uint32		flags3;
398*4882a593Smuzhiyun 	uint32		host_cap2;
399*4882a593Smuzhiyun 	uint32		host_cap3;	/* host indicates its txpost ext tag capabilities */
400*4882a593Smuzhiyun 	uint32		PHYS_ADDR_N(etd_addr);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/* Device advertises the txpost extended tag capabilities */
403*4882a593Smuzhiyun 	uint32		device_txpost_ext_tags_bitmask;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun } pciedev_shared_t;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /* Device F/W provides the following access function:
408*4882a593Smuzhiyun  * pciedev_shared_t *hnd_get_pciedev_shared(void);
409*4882a593Smuzhiyun  */
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun /* host capabilities */
412*4882a593Smuzhiyun #define HOSTCAP_PCIEAPI_VERSION_MASK		0x000000FF
413*4882a593Smuzhiyun #define HOSTCAP_H2D_VALID_PHASE			0x00000100
414*4882a593Smuzhiyun #define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE	0x00000200
415*4882a593Smuzhiyun #define HOSTCAP_H2D_ENABLE_HOSTRDY		0x00000400
416*4882a593Smuzhiyun #define HOSTCAP_DB0_TIMESTAMP			0x00000800
417*4882a593Smuzhiyun #define HOSTCAP_DS_NO_OOB_DW			0x00001000
418*4882a593Smuzhiyun #define HOSTCAP_DS_INBAND_DW			0x00002000
419*4882a593Smuzhiyun #define HOSTCAP_H2D_IDMA			0x00004000
420*4882a593Smuzhiyun #define HOSTCAP_H2D_IFRM			0x00008000
421*4882a593Smuzhiyun #define HOSTCAP_H2D_DAR				0x00010000
422*4882a593Smuzhiyun #define HOSTCAP_EXTENDED_TRAP_DATA		0x00020000
423*4882a593Smuzhiyun #define HOSTCAP_TXSTATUS_METADATA		0x00040000
424*4882a593Smuzhiyun #define HOSTCAP_BT_LOGGING			0x00080000
425*4882a593Smuzhiyun #define HOSTCAP_SNAPSHOT_UPLOAD			0x00100000
426*4882a593Smuzhiyun #define HOSTCAP_FAST_DELETE_RING		0x00200000
427*4882a593Smuzhiyun #define HOSTCAP_PKT_TXSTATUS			0x00400000
428*4882a593Smuzhiyun #define HOSTCAP_UR_FW_NO_TRAP			0x00800000 /* Don't trap on UR */
429*4882a593Smuzhiyun #define HOSTCAP_TX_CSO				0x01000000
430*4882a593Smuzhiyun #define HOSTCAP_HSCB				0x02000000
431*4882a593Smuzhiyun /* Host support for extended device trap debug buffer */
432*4882a593Smuzhiyun #define HOSTCAP_EXT_TRAP_DBGBUF			0x04000000
433*4882a593Smuzhiyun #define HOSTCAP_TXPOST_EXT			0x08000000
434*4882a593Smuzhiyun /* Host support for enhanced debug lane */
435*4882a593Smuzhiyun #define HOSTCAP_EDL_RING			0x10000000
436*4882a593Smuzhiyun #define HOSTCAP_PKT_TIMESTAMP			0x20000000
437*4882a593Smuzhiyun #define HOSTCAP_PKT_HP2P			0x40000000
438*4882a593Smuzhiyun #define HOSTCAP_HWA				0x80000000
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun #define HOSTCAP2_DURATION_SCALE_MASK            0x0000003Fu
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /* extended trap debug buffer allocation sizes. Note that this buffer can be used for
443*4882a593Smuzhiyun  * other trap related purposes also.
444*4882a593Smuzhiyun  */
445*4882a593Smuzhiyun #define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN	(64u * 1024u)
446*4882a593Smuzhiyun #define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN	(96u * 1024u)
447*4882a593Smuzhiyun #define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MAX	(256u * 1024u)
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /**
450*4882a593Smuzhiyun  * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware
451*4882a593Smuzhiyun  * support.
452*4882a593Smuzhiyun  */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* H2D mail box Data */
455*4882a593Smuzhiyun #define H2D_HOST_D3_INFORM		0x00000001
456*4882a593Smuzhiyun #define H2D_HOST_DS_ACK		0x00000002
457*4882a593Smuzhiyun #define H2D_HOST_DS_NAK		0x00000004
458*4882a593Smuzhiyun #define H2D_HOST_D0_INFORM_IN_USE	0x00000008
459*4882a593Smuzhiyun #define H2D_HOST_D0_INFORM		0x00000010
460*4882a593Smuzhiyun #define H2DMB_DS_ACTIVE			0x00000020
461*4882a593Smuzhiyun #define H2DMB_DS_DEVICE_WAKE	0x00000040
462*4882a593Smuzhiyun #define H2D_HOST_IDMA_INITED	0x00000080
463*4882a593Smuzhiyun #define H2D_HOST_ACK_NOINT		0x00010000 /* d2h_ack interrupt ignore */
464*4882a593Smuzhiyun #define H2D_HOST_CONS_INT	0x80000000	/**< h2d int for console cmds  */
465*4882a593Smuzhiyun #define H2D_FW_TRAP		0x20000000	/**< h2d force TRAP */
466*4882a593Smuzhiyun #define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM
467*4882a593Smuzhiyun #define H2DMB_DS_DEVICE_SLEEP_ACK  H2D_HOST_DS_ACK
468*4882a593Smuzhiyun #define H2DMB_DS_DEVICE_SLEEP_NAK  H2D_HOST_DS_NAK
469*4882a593Smuzhiyun #define H2DMB_D0_INFORM_IN_USE     H2D_HOST_D0_INFORM_IN_USE
470*4882a593Smuzhiyun #define H2DMB_D0_INFORM            H2D_HOST_D0_INFORM
471*4882a593Smuzhiyun #define H2DMB_FW_TRAP              H2D_FW_TRAP
472*4882a593Smuzhiyun #define H2DMB_HOST_CONS_INT        H2D_HOST_CONS_INT
473*4882a593Smuzhiyun #define H2DMB_DS_DEVICE_WAKE_ASSERT		H2DMB_DS_DEVICE_WAKE
474*4882a593Smuzhiyun #define H2DMB_DS_DEVICE_WAKE_DEASSERT	H2DMB_DS_ACTIVE
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /* D2H mail box Data */
477*4882a593Smuzhiyun #define D2H_DEV_D3_ACK					0x00000001
478*4882a593Smuzhiyun #define D2H_DEV_DS_ENTER_REQ				0x00000002
479*4882a593Smuzhiyun #define D2H_DEV_DS_EXIT_NOTE				0x00000004
480*4882a593Smuzhiyun #define D2HMB_DS_HOST_SLEEP_EXIT_ACK			0x00000008
481*4882a593Smuzhiyun #define D2H_DEV_IDMA_INITED				0x00000010
482*4882a593Smuzhiyun #define D2HMB_DS_HOST_SLEEP_ACK         D2H_DEV_D3_ACK
483*4882a593Smuzhiyun #define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
484*4882a593Smuzhiyun #define D2HMB_DS_DEVICE_SLEEP_EXIT      D2H_DEV_DS_EXIT_NOTE
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #define D2H_DEV_MB_MASK		(D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
487*4882a593Smuzhiyun 				D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED)
488*4882a593Smuzhiyun #define D2H_DEV_MB_INVALIDATED(x)	((!x) || (x & ~D2H_DEV_MB_MASK))
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /* trap data codes */
491*4882a593Smuzhiyun #define D2H_DEV_FWHALT					0x10000000
492*4882a593Smuzhiyun #define D2H_DEV_EXT_TRAP_DATA				0x20000000
493*4882a593Smuzhiyun #define D2H_DEV_TRAP_IN_TRAP				0x40000000
494*4882a593Smuzhiyun #define D2H_DEV_TRAP_HOSTDB				0x80000000 /* trap as set by host DB */
495*4882a593Smuzhiyun #define D2H_DEV_TRAP_DUE_TO_BT				0x01000000
496*4882a593Smuzhiyun /* Indicates trap due to HMAP violation */
497*4882a593Smuzhiyun #define D2H_DEV_TRAP_DUE_TO_HMAP			0x02000000
498*4882a593Smuzhiyun /* Indicates whether HMAP violation was Write */
499*4882a593Smuzhiyun #define D2H_DEV_TRAP_HMAP_WRITE				0x04000000
500*4882a593Smuzhiyun #define D2H_DEV_TRAP_PING_HOST_FAILURE			0x08000000
501*4882a593Smuzhiyun #define D2H_FWTRAP_MASK		0x0000001F	/* Adding maskbits for TRAP information */
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun #define D2HMB_FWHALT                    D2H_DEV_FWHALT
504*4882a593Smuzhiyun #define D2HMB_TRAP_IN_TRAP              D2H_DEV_TRAP_IN_TRAP
505*4882a593Smuzhiyun #define D2HMB_EXT_TRAP_DATA             D2H_DEV_EXT_TRAP_DATA
506*4882a593Smuzhiyun #define D2H_FWTRAP_MAC_SSSR_RDY		0x00010000u	/* MAC SSSR prepped */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun /* Size of Extended Trap data Buffer */
509*4882a593Smuzhiyun #define BCMPCIE_EXT_TRAP_DATA_MAXLEN  4096
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */
512*4882a593Smuzhiyun #define PREVTXP(i, d)           (((i) == 0) ? ((d) - 1) : ((i) - 1))
513*4882a593Smuzhiyun #define NEXTTXP(i, d)           ((((i)+1) >= (d)) ? 0 : ((i)+1))
514*4882a593Smuzhiyun #define NEXTNTXP(i, n, d)       ((((i)+(n)) >= (d)) ? 0 : ((i)+(n)))
515*4882a593Smuzhiyun #define NTXPACTIVE(r, w, d)     (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
516*4882a593Smuzhiyun #define NTXPAVAIL(r, w, d)      (((d) - NTXPACTIVE((r), (w), (d))) > 1)
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun /* Function can be used to notify host of FW halt */
519*4882a593Smuzhiyun #define READ_AVAIL_SPACE(w, r, d) ((w >= r) ? (uint32)(w - r) : (uint32)(d - r))
520*4882a593Smuzhiyun #define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
521*4882a593Smuzhiyun #define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
522*4882a593Smuzhiyun #define CHECK_WRITE_SPACE(r, w, d) ((r) > (w)) ? \
523*4882a593Smuzhiyun 	(uint32)((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? \
524*4882a593Smuzhiyun 	(uint32)((d) - (w) - 1) : (uint32)((d) - (w))
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun #define CHECK_NOWRITE_SPACE(r, w, d) \
527*4882a593Smuzhiyun 	(((uint32)(r) == (uint32)((w) + 1)) || (((r) == 0) && ((w) == ((d) - 1))))
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* These should be moved into pciedev.h --- */
530*4882a593Smuzhiyun #define WRT_PEND(x)	((x)->wr_pending)
531*4882a593Smuzhiyun #define DNGL_RING_WPTR(msgbuf)		(*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */
532*4882a593Smuzhiyun #define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a)	(DNGL_RING_WPTR(msgbuf) = (a))
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun #define DNGL_RING_RPTR(msgbuf)		(*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */
535*4882a593Smuzhiyun #define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a)	(DNGL_RING_RPTR(msgbuf) = (a))
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun #define MODULO_RING_IDX(x, y)	((x) % (y)->bitmap_size)
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun #define  RING_READ_PTR(x)	((x)->ringstate->r_offset)
540*4882a593Smuzhiyun #define  RING_WRITE_PTR(x)	((x)->ringstate->w_offset)
541*4882a593Smuzhiyun #define  RING_START_PTR(x)	((x)->ringmem->base_addr.low_addr)
542*4882a593Smuzhiyun #define  RING_MAX_ITEM(x)	((x)->ringmem->max_item)
543*4882a593Smuzhiyun #define  RING_LEN_ITEMS(x)	((x)->ringmem->len_items)
544*4882a593Smuzhiyun #define	 HOST_RING_BASE(x)	((x)->dma_buf.va)
545*4882a593Smuzhiyun #define	 HOST_RING_END(x)	((uint8 *)HOST_RING_BASE((x)) + \
546*4882a593Smuzhiyun 					((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /* Trap types copied in the pciedev_shared.trap_addr */
549*4882a593Smuzhiyun #define	FW_INITIATED_TRAP_TYPE	(0x1 << 7)
550*4882a593Smuzhiyun #define	HEALTHCHECK_NODS_TRAP_TYPE	(0x1 << 6)
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun /* Device supported txpost extended tag capabilities */
553*4882a593Smuzhiyun #define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_RSVD	(1u << 0u) /* Reserved  */
554*4882a593Smuzhiyun #define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_CSO		(1u << 1u) /* CSO */
555*4882a593Smuzhiyun #define PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH	(1u << 2u) /* MESH */
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #define RING_MESH(x)	(((x)->txpost_ext_cap_flags) & PCIE_SHARED2_DEV_TXPOST_EXT_TAG_CAP_MESH)
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun #endif	/* _bcmpcie_h_ */
560