xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/sbhnddma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
3*4882a593Smuzhiyun  * This supports the following chips: BCM42xx, 44xx, 47xx .
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun  * following added to such license:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
18*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
19*4882a593Smuzhiyun  * modifications of the software.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Dual:>>
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifndef	_sbhnddma_h_
26*4882a593Smuzhiyun #define	_sbhnddma_h_
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* DMA structure:
29*4882a593Smuzhiyun  *  support two DMA engines: 32 bits address or 64 bit addressing
30*4882a593Smuzhiyun  *  basic DMA register set is per channel(transmit or receive)
31*4882a593Smuzhiyun  *  a pair of channels is defined for convenience
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* 32 bits addressing */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /** dma registers per channel(xmt or rcv) */
37*4882a593Smuzhiyun typedef volatile struct {
38*4882a593Smuzhiyun 	uint32	control;		/**< enable, et al */
39*4882a593Smuzhiyun 	uint32	addr;			/**< descriptor ring base address (4K aligned) */
40*4882a593Smuzhiyun 	uint32	ptr;			/**< last descriptor posted to chip */
41*4882a593Smuzhiyun 	uint32	status;			/**< current active descriptor, et al */
42*4882a593Smuzhiyun } dma32regs_t;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun typedef volatile struct {
45*4882a593Smuzhiyun 	dma32regs_t	xmt;		/**< dma tx channel */
46*4882a593Smuzhiyun 	dma32regs_t	rcv;		/**< dma rx channel */
47*4882a593Smuzhiyun } dma32regp_t;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun typedef volatile struct {	/* diag access */
50*4882a593Smuzhiyun 	uint32	fifoaddr;		/**< diag address */
51*4882a593Smuzhiyun 	uint32	fifodatalow;		/**< low 32bits of data */
52*4882a593Smuzhiyun 	uint32	fifodatahigh;		/**< high 32bits of data */
53*4882a593Smuzhiyun 	uint32	pad;			/**< reserved */
54*4882a593Smuzhiyun } dma32diag_t;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * DMA Descriptor
58*4882a593Smuzhiyun  * Descriptors are only read by the hardware, never written back.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun typedef volatile struct {
61*4882a593Smuzhiyun 	uint32	ctrl;		/**< misc control bits & bufcount */
62*4882a593Smuzhiyun 	uint32	addr;		/**< data buffer address */
63*4882a593Smuzhiyun } dma32dd_t;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */
66*4882a593Smuzhiyun #define	D32RINGALIGN_BITS	12
67*4882a593Smuzhiyun #define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
68*4882a593Smuzhiyun #define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* transmit channel control */
73*4882a593Smuzhiyun #define	XC_XE		((uint32)1 << 0)	/**< transmit enable */
74*4882a593Smuzhiyun #define	XC_SE		((uint32)1 << 1)	/**< transmit suspend request */
75*4882a593Smuzhiyun #define	XC_LE		((uint32)1 << 2)	/**< loopback enable */
76*4882a593Smuzhiyun #define	XC_FL		((uint32)1 << 4)	/**< flush request */
77*4882a593Smuzhiyun #define XC_MR_MASK	0x000001C0		/**< Multiple outstanding reads */
78*4882a593Smuzhiyun #define XC_MR_SHIFT	6
79*4882a593Smuzhiyun #define	XC_PD		((uint32)1 << 11)	/**< parity check disable */
80*4882a593Smuzhiyun #define	XC_AE		((uint32)3 << 16)	/**< address extension bits */
81*4882a593Smuzhiyun #define	XC_AE_SHIFT	16
82*4882a593Smuzhiyun #define XC_BL_MASK	0x001C0000		/**< BurstLen bits */
83*4882a593Smuzhiyun #define XC_BL_SHIFT	18
84*4882a593Smuzhiyun #define XC_PC_MASK	0x00E00000		/**< Prefetch control */
85*4882a593Smuzhiyun #define XC_PC_SHIFT	21
86*4882a593Smuzhiyun #define XC_PT_MASK	0x03000000		/**< Prefetch threshold */
87*4882a593Smuzhiyun #define XC_PT_SHIFT	24
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /** Multiple outstanding reads */
90*4882a593Smuzhiyun #define DMA_MR_1	0
91*4882a593Smuzhiyun #define DMA_MR_2	1
92*4882a593Smuzhiyun #define DMA_MR_4	2
93*4882a593Smuzhiyun #define DMA_MR_8	3
94*4882a593Smuzhiyun #define DMA_MR_12	4
95*4882a593Smuzhiyun #define DMA_MR_16	5
96*4882a593Smuzhiyun #define DMA_MR_20	6
97*4882a593Smuzhiyun #define DMA_MR_32	7
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /** DMA Burst Length in bytes */
100*4882a593Smuzhiyun #define DMA_BL_16	0
101*4882a593Smuzhiyun #define DMA_BL_32	1
102*4882a593Smuzhiyun #define DMA_BL_64	2
103*4882a593Smuzhiyun #define DMA_BL_128	3
104*4882a593Smuzhiyun #define DMA_BL_256	4
105*4882a593Smuzhiyun #define DMA_BL_512	5
106*4882a593Smuzhiyun #define DMA_BL_1024	6
107*4882a593Smuzhiyun #define DMA_BL_INVALID	0xFF
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /** Prefetch control */
110*4882a593Smuzhiyun #define DMA_PC_0	0
111*4882a593Smuzhiyun #define DMA_PC_4	1
112*4882a593Smuzhiyun #define DMA_PC_8	2
113*4882a593Smuzhiyun #define DMA_PC_16	3
114*4882a593Smuzhiyun #define DMA_PC_32	4
115*4882a593Smuzhiyun /* others: reserved */
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /** Prefetch threshold */
118*4882a593Smuzhiyun #define DMA_PT_1	0
119*4882a593Smuzhiyun #define DMA_PT_2	1
120*4882a593Smuzhiyun #define DMA_PT_4	2
121*4882a593Smuzhiyun #define DMA_PT_8	3
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /** Channel Switch */
124*4882a593Smuzhiyun #define DMA_CS_OFF	0
125*4882a593Smuzhiyun #define DMA_CS_ON	1
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* transmit descriptor table pointer */
128*4882a593Smuzhiyun #define	XP_LD_MASK	0xfff			/**< last valid descriptor */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* transmit channel status */
131*4882a593Smuzhiyun #define	XS_CD_MASK	0x0fff			/**< current descriptor pointer */
132*4882a593Smuzhiyun #define	XS_XS_MASK	0xf000			/**< transmit state */
133*4882a593Smuzhiyun #define	XS_XS_SHIFT	12
134*4882a593Smuzhiyun #define	XS_XS_DISABLED	0x0000			/**< disabled */
135*4882a593Smuzhiyun #define	XS_XS_ACTIVE	0x1000			/**< active */
136*4882a593Smuzhiyun #define	XS_XS_IDLE	0x2000			/**< idle wait */
137*4882a593Smuzhiyun #define	XS_XS_STOPPED	0x3000			/**< stopped */
138*4882a593Smuzhiyun #define	XS_XS_SUSP	0x4000			/**< suspend pending */
139*4882a593Smuzhiyun #define	XS_XE_MASK	0xf0000			/**< transmit errors */
140*4882a593Smuzhiyun #define	XS_XE_SHIFT	16
141*4882a593Smuzhiyun #define	XS_XE_NOERR	0x00000			/**< no error */
142*4882a593Smuzhiyun #define	XS_XE_DPE	0x10000			/**< descriptor protocol error */
143*4882a593Smuzhiyun #define	XS_XE_DFU	0x20000			/**< data fifo underrun */
144*4882a593Smuzhiyun #define	XS_XE_BEBR	0x30000			/**< bus error on buffer read */
145*4882a593Smuzhiyun #define	XS_XE_BEDA	0x40000			/**< bus error on descriptor access */
146*4882a593Smuzhiyun #define	XS_AD_MASK	0xfff00000		/**< active descriptor */
147*4882a593Smuzhiyun #define	XS_AD_SHIFT	20
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* receive channel control */
150*4882a593Smuzhiyun #define	RC_RE		((uint32)1 << 0)	/**< receive enable */
151*4882a593Smuzhiyun #define	RC_RO_MASK	0xfe			/**< receive frame offset */
152*4882a593Smuzhiyun #define	RC_RO_SHIFT	1
153*4882a593Smuzhiyun #define	RC_FM		((uint32)1 << 8)	/**< direct fifo receive (pio) mode */
154*4882a593Smuzhiyun #define	RC_SH		((uint32)1 << 9)	/**< separate rx header descriptor enable */
155*4882a593Smuzhiyun #define	RC_OC		((uint32)1 << 10)	/**< overflow continue */
156*4882a593Smuzhiyun #define	RC_PD		((uint32)1 << 11)	/**< parity check disable */
157*4882a593Smuzhiyun #define	RC_AE		((uint32)3 << 16)	/**< address extension bits */
158*4882a593Smuzhiyun #define	RC_AE_SHIFT	16
159*4882a593Smuzhiyun #define RC_BL_MASK	0x001C0000		/**< BurstLen bits */
160*4882a593Smuzhiyun #define RC_BL_SHIFT	18
161*4882a593Smuzhiyun #define RC_PC_MASK	0x00E00000		/**< Prefetch control */
162*4882a593Smuzhiyun #define RC_PC_SHIFT	21
163*4882a593Smuzhiyun #define RC_PT_MASK	0x03000000		/**< Prefetch threshold */
164*4882a593Smuzhiyun #define RC_PT_SHIFT	24
165*4882a593Smuzhiyun #define RC_WAITCMP_MASK 0x00001000
166*4882a593Smuzhiyun #define RC_WAITCMP_SHIFT 12
167*4882a593Smuzhiyun /* receive descriptor table pointer */
168*4882a593Smuzhiyun #define	RP_LD_MASK	0xfff			/**< last valid descriptor */
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /* receive channel status */
171*4882a593Smuzhiyun #define	RS_CD_MASK	0x0fff			/**< current descriptor pointer */
172*4882a593Smuzhiyun #define	RS_RS_MASK	0xf000			/**< receive state */
173*4882a593Smuzhiyun #define	RS_RS_SHIFT	12
174*4882a593Smuzhiyun #define	RS_RS_DISABLED	0x0000			/**< disabled */
175*4882a593Smuzhiyun #define	RS_RS_ACTIVE	0x1000			/**< active */
176*4882a593Smuzhiyun #define	RS_RS_IDLE	0x2000			/**< idle wait */
177*4882a593Smuzhiyun #define	RS_RS_STOPPED	0x3000			/**< reserved */
178*4882a593Smuzhiyun #define	RS_RE_MASK	0xf0000			/**< receive errors */
179*4882a593Smuzhiyun #define	RS_RE_SHIFT	16
180*4882a593Smuzhiyun #define	RS_RE_NOERR	0x00000			/**< no error */
181*4882a593Smuzhiyun #define	RS_RE_DPE	0x10000			/**< descriptor protocol error */
182*4882a593Smuzhiyun #define	RS_RE_DFO	0x20000			/**< data fifo overflow */
183*4882a593Smuzhiyun #define	RS_RE_BEBW	0x30000			/**< bus error on buffer write */
184*4882a593Smuzhiyun #define	RS_RE_BEDA	0x40000			/**< bus error on descriptor access */
185*4882a593Smuzhiyun #define	RS_AD_MASK	0xfff00000		/**< active descriptor */
186*4882a593Smuzhiyun #define	RS_AD_SHIFT	20
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /* fifoaddr */
189*4882a593Smuzhiyun #define	FA_OFF_MASK	0xffff			/**< offset */
190*4882a593Smuzhiyun #define	FA_SEL_MASK	0xf0000			/**< select */
191*4882a593Smuzhiyun #define	FA_SEL_SHIFT	16
192*4882a593Smuzhiyun #define	FA_SEL_XDD	0x00000			/**< transmit dma data */
193*4882a593Smuzhiyun #define	FA_SEL_XDP	0x10000			/**< transmit dma pointers */
194*4882a593Smuzhiyun #define	FA_SEL_RDD	0x40000			/**< receive dma data */
195*4882a593Smuzhiyun #define	FA_SEL_RDP	0x50000			/**< receive dma pointers */
196*4882a593Smuzhiyun #define	FA_SEL_XFD	0x80000			/**< transmit fifo data */
197*4882a593Smuzhiyun #define	FA_SEL_XFP	0x90000			/**< transmit fifo pointers */
198*4882a593Smuzhiyun #define	FA_SEL_RFD	0xc0000			/**< receive fifo data */
199*4882a593Smuzhiyun #define	FA_SEL_RFP	0xd0000			/**< receive fifo pointers */
200*4882a593Smuzhiyun #define	FA_SEL_RSD	0xe0000			/**< receive frame status data */
201*4882a593Smuzhiyun #define	FA_SEL_RSP	0xf0000			/**< receive frame status pointers */
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* descriptor control flags */
204*4882a593Smuzhiyun #define	CTRL_BC_MASK	0x00001fff		/**< buffer byte count, real data len must <= 4KB */
205*4882a593Smuzhiyun #define	CTRL_AE		((uint32)3 << 16)	/**< address extension bits */
206*4882a593Smuzhiyun #define	CTRL_AE_SHIFT	16
207*4882a593Smuzhiyun #define	CTRL_PARITY	((uint32)3 << 18)	/**< parity bit */
208*4882a593Smuzhiyun #define	CTRL_EOT	((uint32)1 << 28)	/**< end of descriptor table */
209*4882a593Smuzhiyun #define	CTRL_IOC	((uint32)1 << 29)	/**< interrupt on completion */
210*4882a593Smuzhiyun #define	CTRL_EOF	((uint32)1 << 30)	/**< end of frame */
211*4882a593Smuzhiyun #define	CTRL_SOF	((uint32)1 << 31)	/**< start of frame */
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /** control flags in the range [27:20] are core-specific and not defined here */
214*4882a593Smuzhiyun #define	CTRL_CORE_MASK	0x0ff00000
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* 64 bits addressing */
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /** dma registers per channel(xmt or rcv) */
219*4882a593Smuzhiyun typedef volatile struct {
220*4882a593Smuzhiyun 	uint32	control;	/**< enable, et al */
221*4882a593Smuzhiyun 	uint32	ptr;		/**< last descriptor posted to chip */
222*4882a593Smuzhiyun 	uint32	addrlow;	/**< descriptor ring base address low 32-bits (8K aligned) */
223*4882a593Smuzhiyun 	uint32	addrhigh;	/**< descriptor ring base address bits 63:32 (8K aligned) */
224*4882a593Smuzhiyun 	uint32	status0;	/**< current descriptor, xmt state */
225*4882a593Smuzhiyun 	uint32	status1;	/**< active descriptor, xmt error */
226*4882a593Smuzhiyun } dma64regs_t;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun typedef volatile struct {
229*4882a593Smuzhiyun 	dma64regs_t	tx;		/**< dma64 tx channel */
230*4882a593Smuzhiyun 	dma64regs_t	rx;		/**< dma64 rx channel */
231*4882a593Smuzhiyun } dma64regp_t;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun typedef volatile struct {		/**< diag access */
234*4882a593Smuzhiyun 	uint32	fifoaddr;		/**< diag address */
235*4882a593Smuzhiyun 	uint32	fifodatalow;		/**< low 32bits of data */
236*4882a593Smuzhiyun 	uint32	fifodatahigh;		/**< high 32bits of data */
237*4882a593Smuzhiyun 	uint32	pad;			/**< reserved */
238*4882a593Smuzhiyun } dma64diag_t;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * DMA Descriptor
242*4882a593Smuzhiyun  * Descriptors are only read by the hardware, never written back.
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun typedef volatile struct {
245*4882a593Smuzhiyun 	uint32	ctrl1;		/**< misc control bits */
246*4882a593Smuzhiyun 	uint32	ctrl2;		/**< buffer count and address extension */
247*4882a593Smuzhiyun 	uint32	addrlow;	/**< memory address of the date buffer, bits 31:0 */
248*4882a593Smuzhiyun 	uint32	addrhigh;	/**< memory address of the date buffer, bits 63:32 */
249*4882a593Smuzhiyun } dma64dd_t;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun  * Pool implementation: each pool is 64KB max.  Align it to maximize ability to grow
253*4882a593Smuzhiyun  */
254*4882a593Smuzhiyun #define D64POOLALIGN_BITS	15u
255*4882a593Smuzhiyun #define D64POOLALIGN_BITS_MAX	16u
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun  * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
258*4882a593Smuzhiyun  */
259*4882a593Smuzhiyun #define D64RINGALIGN_BITS	13
260*4882a593Smuzhiyun #define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
261*4882a593Smuzhiyun #define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
266*4882a593Smuzhiyun #define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun  * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
270*4882a593Smuzhiyun  * 64K boundary
271*4882a593Smuzhiyun  */
272*4882a593Smuzhiyun #define	D64RINGBOUNDARY_LARGE	(1 << 16)
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun  * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
276*4882a593Smuzhiyun  * When this field contains the value N, the burst length is 2**(N + 4) bytes.
277*4882a593Smuzhiyun  */
278*4882a593Smuzhiyun #define D64_DEF_USBBURSTLEN     2
279*4882a593Smuzhiyun #define D64_DEF_SDIOBURSTLEN    1
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #ifndef D64_USBBURSTLEN
282*4882a593Smuzhiyun #define D64_USBBURSTLEN	DMA_BL_64
283*4882a593Smuzhiyun #endif
284*4882a593Smuzhiyun #ifndef D64_SDIOBURSTLEN
285*4882a593Smuzhiyun #define D64_SDIOBURSTLEN	DMA_BL_32
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /* transmit channel control */
289*4882a593Smuzhiyun #define	D64_XC_XE		0x00000001	/**< transmit enable */
290*4882a593Smuzhiyun #define	D64_XC_SE		0x00000002	/**< transmit suspend request */
291*4882a593Smuzhiyun #define	D64_XC_LE		0x00000004	/**< loopback enable */
292*4882a593Smuzhiyun #define	D64_XC_FL		0x00000010	/**< flush request */
293*4882a593Smuzhiyun #define D64_XC_MR_MASK		0x000001C0	/**< Multiple outstanding reads */
294*4882a593Smuzhiyun #define D64_XC_MR_SHIFT		6
295*4882a593Smuzhiyun #define D64_XC_CS_SHIFT		9		/**< channel switch enable */
296*4882a593Smuzhiyun #define D64_XC_CS_MASK		0x00000200      /**< channel switch enable */
297*4882a593Smuzhiyun #define	D64_XC_PD		0x00000800	/**< parity check disable */
298*4882a593Smuzhiyun #define	D64_XC_AE		0x00030000	/**< address extension bits */
299*4882a593Smuzhiyun #define	D64_XC_AE_SHIFT		16
300*4882a593Smuzhiyun #define D64_XC_BL_MASK		0x001C0000	/**< BurstLen bits */
301*4882a593Smuzhiyun #define D64_XC_BL_SHIFT		18
302*4882a593Smuzhiyun #define D64_XC_PC_MASK		0x00E00000		/**< Prefetch control */
303*4882a593Smuzhiyun #define D64_XC_PC_SHIFT		21
304*4882a593Smuzhiyun #define D64_XC_PT_MASK		0x03000000		/**< Prefetch threshold */
305*4882a593Smuzhiyun #define D64_XC_PT_SHIFT		24
306*4882a593Smuzhiyun #define D64_XC_CO_MASK		0x04000000	/**< coherent transactions for descriptors */
307*4882a593Smuzhiyun #define D64_XC_CO_SHIFT		26
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /* transmit descriptor table pointer */
310*4882a593Smuzhiyun #define	D64_XP_LD_MASK		0x00001fff	/**< last valid descriptor */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* transmit channel status */
313*4882a593Smuzhiyun #define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/**< current descriptor pointer */
314*4882a593Smuzhiyun #define	D64_XS0_XS_MASK		0xf0000000     	/**< transmit state */
315*4882a593Smuzhiyun #define	D64_XS0_XS_SHIFT		28
316*4882a593Smuzhiyun #define	D64_XS0_XS_DISABLED	0x00000000	/**< disabled */
317*4882a593Smuzhiyun #define	D64_XS0_XS_ACTIVE	0x10000000	/**< active */
318*4882a593Smuzhiyun #define	D64_XS0_XS_IDLE		0x20000000	/**< idle wait */
319*4882a593Smuzhiyun #define	D64_XS0_XS_STOPPED	0x30000000	/**< stopped */
320*4882a593Smuzhiyun #define	D64_XS0_XS_SUSP		0x40000000	/**< suspend pending */
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun #define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/**< active descriptor */
323*4882a593Smuzhiyun #define	D64_XS1_XE_MASK		0xf0000000     	/**< transmit errors */
324*4882a593Smuzhiyun #define	D64_XS1_XE_SHIFT		28
325*4882a593Smuzhiyun #define	D64_XS1_XE_NOERR	0x00000000	/**< no error */
326*4882a593Smuzhiyun #define	D64_XS1_XE_DPE		0x10000000	/**< descriptor protocol error */
327*4882a593Smuzhiyun #define	D64_XS1_XE_DFU		0x20000000	/**< data fifo underrun */
328*4882a593Smuzhiyun #define	D64_XS1_XE_DTE		0x30000000	/**< data transfer error */
329*4882a593Smuzhiyun #define	D64_XS1_XE_DESRE	0x40000000	/**< descriptor read error */
330*4882a593Smuzhiyun #define	D64_XS1_XE_COREE	0x50000000	/**< core error */
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /* receive channel control */
333*4882a593Smuzhiyun #define	D64_RC_RE		0x00000001	/**< receive enable */
334*4882a593Smuzhiyun #define	D64_RC_RO_MASK		0x000000fe	/**< receive frame offset */
335*4882a593Smuzhiyun #define	D64_RC_RO_SHIFT		1
336*4882a593Smuzhiyun #define	D64_RC_FM		0x00000100	/**< direct fifo receive (pio) mode */
337*4882a593Smuzhiyun #define	D64_RC_SH		0x00000200	/**< separate rx header descriptor enable */
338*4882a593Smuzhiyun #define	D64_RC_SHIFT		9	/**< separate rx header descriptor enable */
339*4882a593Smuzhiyun #define	D64_RC_OC		0x00000400	/**< overflow continue */
340*4882a593Smuzhiyun #define	D64_RC_PD		0x00000800	/**< parity check disable */
341*4882a593Smuzhiyun #define D64_RC_WAITCMP_MASK	0x00001000
342*4882a593Smuzhiyun #define D64_RC_WAITCMP_SHIFT	12
343*4882a593Smuzhiyun #define D64_RC_SA		0x00002000	/**< select active */
344*4882a593Smuzhiyun #define D64_RC_GE		0x00004000	/**< Glom enable */
345*4882a593Smuzhiyun #define	D64_RC_AE		0x00030000	/**< address extension bits */
346*4882a593Smuzhiyun #define	D64_RC_AE_SHIFT		16
347*4882a593Smuzhiyun #define D64_RC_BL_MASK		0x001C0000	/**< BurstLen bits */
348*4882a593Smuzhiyun #define D64_RC_BL_SHIFT		18
349*4882a593Smuzhiyun #define D64_RC_PC_MASK		0x00E00000	/**< Prefetch control */
350*4882a593Smuzhiyun #define D64_RC_PC_SHIFT		21
351*4882a593Smuzhiyun #define D64_RC_PT_MASK		0x03000000	/**< Prefetch threshold */
352*4882a593Smuzhiyun #define D64_RC_PT_SHIFT		24
353*4882a593Smuzhiyun #define D64_RC_CO_MASK		0x04000000	/**< coherent transactions for descriptors */
354*4882a593Smuzhiyun #define D64_RC_CO_SHIFT		26
355*4882a593Smuzhiyun #define	D64_RC_ROEXT_MASK	0x08000000	/**< receive frame offset extension bit */
356*4882a593Smuzhiyun #define	D64_RC_ROEXT_SHIFT	27
357*4882a593Smuzhiyun #define D64_RC_MOW_SHIFT	(28u)		/**< multiple outstanding write */
358*4882a593Smuzhiyun #define D64_RC_MOW_MASK		((0x3u) << D64_RC_MOW_SHIFT)
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* receive control values */
361*4882a593Smuzhiyun /* RcvCtrl.MultipleOutstandingWrites(MOW) valid values(N) listed below.
362*4882a593Smuzhiyun  * (N + 1) out standing write(s) supported
363*4882a593Smuzhiyun  */
364*4882a593Smuzhiyun #define D64_RC_MOW_1		(0u)		/**< 1 outstanding write */
365*4882a593Smuzhiyun #define D64_RC_MOW_2		(1u)		/**< 2 outstanding writes */
366*4882a593Smuzhiyun #define D64_RC_MOW_3		(2u)		/**< 3 outstanding writes */
367*4882a593Smuzhiyun #define D64_RC_MOW_4		(3u)		/**< 4 outstanding writes */
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /* flags for dma controller */
370*4882a593Smuzhiyun #define DMA_CTRL_PEN		(1u << 0u)	/**< partity enable */
371*4882a593Smuzhiyun #define DMA_CTRL_ROC		(1u << 1u)	/**< rx overflow continue */
372*4882a593Smuzhiyun #define DMA_CTRL_RXMULTI	(1u << 2u)	/**< allow rx scatter to multiple descriptors */
373*4882a593Smuzhiyun #define DMA_CTRL_UNFRAMED	(1u << 3u)	/**< Unframed Rx/Tx data */
374*4882a593Smuzhiyun #define DMA_CTRL_USB_BOUNDRY4KB_WAR (1u << 4u)	/**< USB core REV9's SETUP dma channel's
375*4882a593Smuzhiyun 						*  buffer can not crossed 4K boundary PR80468
376*4882a593Smuzhiyun 						*/
377*4882a593Smuzhiyun #define DMA_CTRL_DMA_AVOIDANCE_WAR (1u << 5u)	/**< DMA avoidance WAR for 4331 */
378*4882a593Smuzhiyun #define DMA_CTRL_RXSINGLE	(1u << 6u)	/**< always single buffer */
379*4882a593Smuzhiyun #define DMA_CTRL_SDIO_RXGLOM	(1u << 7u)	/**< DMA Rx glome is enabled */
380*4882a593Smuzhiyun #define DMA_CTRL_DESC_ONLY_FLAG (1u << 8u)	/**< For DMA which posts only descriptors,
381*4882a593Smuzhiyun 						 * no packets
382*4882a593Smuzhiyun 						 */
383*4882a593Smuzhiyun #define DMA_CTRL_DESC_CD_WAR	(1u << 9u)	/**< WAR for descriptor only DMA's CD not being
384*4882a593Smuzhiyun 						 * updated correctly by HW in CT mode.
385*4882a593Smuzhiyun 						 */
386*4882a593Smuzhiyun #define DMA_CTRL_CS		(1u << 10u)	/* channel switch enable */
387*4882a593Smuzhiyun #define DMA_CTRL_ROEXT		(1u << 11u)	/* receive frame offset extension support */
388*4882a593Smuzhiyun #define DMA_CTRL_RX_ALIGN_8BYTE	(1u << 12u)	/* RXDMA address 8-byte aligned */
389*4882a593Smuzhiyun #define DMA_CTRL_SHARED_POOL	(1u << 15u)	/** shared descriptor pool */
390*4882a593Smuzhiyun #define DMA_CTRL_COREUNIT_SHIFT	(17u)		/* Core unit shift */
391*4882a593Smuzhiyun #define DMA_CTRL_COREUNIT_MASK	(0x3u << 17u)	/* Core unit mask */
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #define DMA_CTRL_SET_COREUNIT(di, coreunit) \
394*4882a593Smuzhiyun 	((di)->hnddma.dmactrlflags |= \
395*4882a593Smuzhiyun 	(((coreunit) << DMA_CTRL_COREUNIT_SHIFT) & DMA_CTRL_COREUNIT_MASK))
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun #define DMA_CTRL_GET_COREUNIT(di) \
398*4882a593Smuzhiyun 	(((di)->hnddma.dmactrlflags & DMA_CTRL_COREUNIT_MASK) >> DMA_CTRL_COREUNIT_SHIFT)
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /* receive descriptor table pointer */
401*4882a593Smuzhiyun #define	D64_RP_LD_MASK		0x00001fff	/**< last valid descriptor */
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /* receive channel status */
404*4882a593Smuzhiyun #define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/**< current descriptor pointer */
405*4882a593Smuzhiyun #define	D64_RS0_RS_MASK		0xf0000000     	/**< receive state */
406*4882a593Smuzhiyun #define	D64_RS0_RS_SHIFT		28
407*4882a593Smuzhiyun #define	D64_RS0_RS_DISABLED	0x00000000	/**< disabled */
408*4882a593Smuzhiyun #define	D64_RS0_RS_ACTIVE	0x10000000	/**< active */
409*4882a593Smuzhiyun #define	D64_RS0_RS_IDLE		0x20000000	/**< idle wait */
410*4882a593Smuzhiyun #define	D64_RS0_RS_STOPPED	0x30000000	/**< stopped */
411*4882a593Smuzhiyun #define	D64_RS0_RS_SUSP		0x40000000	/**< suspend pending */
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun #define	D64_RS1_AD_MASK		(di->d64_rs1_ad_mask)	/* active descriptor pointer */
414*4882a593Smuzhiyun #define	D64_RS1_RE_MASK		0xf0000000	/* receive errors */
415*4882a593Smuzhiyun #define	D64_RS1_RE_SHIFT		28
416*4882a593Smuzhiyun #define	D64_RS1_RE_NOERR	0x00000000	/**< no error */
417*4882a593Smuzhiyun #define	D64_RS1_RE_DPO		0x10000000	/**< descriptor protocol error */
418*4882a593Smuzhiyun #define	D64_RS1_RE_DFU		0x20000000	/**< data fifo overflow */
419*4882a593Smuzhiyun #define	D64_RS1_RE_DTE		0x30000000	/**< data transfer error */
420*4882a593Smuzhiyun #define	D64_RS1_RE_DESRE	0x40000000	/**< descriptor read error */
421*4882a593Smuzhiyun #define	D64_RS1_RE_COREE	0x50000000	/**< core error */
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* fifoaddr */
424*4882a593Smuzhiyun #define	D64_FA_OFF_MASK		0xffff		/**< offset */
425*4882a593Smuzhiyun #define	D64_FA_SEL_MASK		0xf0000		/**< select */
426*4882a593Smuzhiyun #define	D64_FA_SEL_SHIFT	16
427*4882a593Smuzhiyun #define	D64_FA_SEL_XDD		0x00000		/**< transmit dma data */
428*4882a593Smuzhiyun #define	D64_FA_SEL_XDP		0x10000		/**< transmit dma pointers */
429*4882a593Smuzhiyun #define	D64_FA_SEL_RDD		0x40000		/**< receive dma data */
430*4882a593Smuzhiyun #define	D64_FA_SEL_RDP		0x50000		/**< receive dma pointers */
431*4882a593Smuzhiyun #define	D64_FA_SEL_XFD		0x80000		/**< transmit fifo data */
432*4882a593Smuzhiyun #define	D64_FA_SEL_XFP		0x90000		/**< transmit fifo pointers */
433*4882a593Smuzhiyun #define	D64_FA_SEL_RFD		0xc0000		/**< receive fifo data */
434*4882a593Smuzhiyun #define	D64_FA_SEL_RFP		0xd0000		/**< receive fifo pointers */
435*4882a593Smuzhiyun #define	D64_FA_SEL_RSD		0xe0000		/**< receive frame status data */
436*4882a593Smuzhiyun #define	D64_FA_SEL_RSP		0xf0000		/**< receive frame status pointers */
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /* descriptor control flags 1 */
439*4882a593Smuzhiyun #define D64_CTRL_COREFLAGS	0x0ff00000		/**< core specific flags */
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /**< bzero operation for receive channels or a compare-to-zero operation for transmit engines */
442*4882a593Smuzhiyun #define D64_CTRL1_BIT_BZEROBCMP		(15u)
443*4882a593Smuzhiyun /* WAR for JIRA CRWLDMA-245 */
444*4882a593Smuzhiyun #define D64_DMA_COREFLAGS_WAR_BIT	(25u)
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun #define D64_CTRL1_COHERENT      ((uint32)1 << 17)       /**< cache coherent per transaction */
447*4882a593Smuzhiyun #define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/**< buirst size control */
448*4882a593Smuzhiyun #define	D64_CTRL1_EOT		((uint32)1 << 28)	/**< end of descriptor table */
449*4882a593Smuzhiyun #define	D64_CTRL1_IOC		((uint32)1 << 29)	/**< interrupt on completion */
450*4882a593Smuzhiyun #define	D64_CTRL1_EOF		((uint32)1 << 30)	/**< end of frame */
451*4882a593Smuzhiyun #define	D64_CTRL1_SOF		((uint32)1 << 31)	/**< start of frame */
452*4882a593Smuzhiyun #define D64_CTRL1_SOFPTR	0x0000FFFFu
453*4882a593Smuzhiyun #define D64_CTRL1_NUMD_MASK	0x00F00000u
454*4882a593Smuzhiyun #define D64_CTRL1_NUMD_SHIFT	20u
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun /* descriptor control flags 2 */
457*4882a593Smuzhiyun #define	D64_CTRL2_MAX_LEN	0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */
458*4882a593Smuzhiyun #define	D64_CTRL2_BC_MASK	0x0000ffff /**< mask for buffer byte count */
459*4882a593Smuzhiyun #define	D64_CTRL2_AE		0x00030000 /**< address extension bits */
460*4882a593Smuzhiyun #define	D64_CTRL2_AE_SHIFT	16
461*4882a593Smuzhiyun #define D64_CTRL2_PARITY	0x00040000      /* parity bit */
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun /** control flags in the range [27:20] are core-specific and not defined here */
464*4882a593Smuzhiyun #define	D64_CTRL_CORE_MASK	0x0ff00000
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun #define D64_RX_FRM_STS_LEN	0x0000ffff	/**< frame length mask */
467*4882a593Smuzhiyun #define D64_RX_FRM_STS_OVFL	0x00800000	/**< RxOverFlow */
468*4882a593Smuzhiyun #define D64_RX_FRM_STS_DSCRCNT	0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
469*4882a593Smuzhiyun #define D64_RX_FRM_STS_DSCRCNT_SHIFT   24      /* Shift for no .of dma descriptor field */
470*4882a593Smuzhiyun #define D64_RX_FRM_STS_DATATYPE	0xf0000000	/**< core-dependent data type */
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun #define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \
473*4882a593Smuzhiyun (((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len))
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /** receive frame status */
476*4882a593Smuzhiyun typedef volatile struct {
477*4882a593Smuzhiyun 	uint16 len;
478*4882a593Smuzhiyun 	uint16 flags;
479*4882a593Smuzhiyun } dma_rxh_t;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #endif	/* _sbhnddma_h_ */
482