xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/include/sbhnddma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
3*4882a593Smuzhiyun  * This supports the following chips: BCM42xx, 44xx, 47xx .
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 1999-2017, Broadcom Corporation
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
10*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
11*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
12*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13*4882a593Smuzhiyun  * following added to such license:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
16*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
17*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
18*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
19*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
20*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
21*4882a593Smuzhiyun  * modifications of the software.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *      Notwithstanding the above, under no circumstances may you combine this
24*4882a593Smuzhiyun  * software in any way with any other Broadcom software provided under a license
25*4882a593Smuzhiyun  * other than the GPL, without Broadcom's express prior written consent.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * $Id: sbhnddma.h 694506 2017-04-13 05:10:05Z $
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef	_sbhnddma_h_
34*4882a593Smuzhiyun #define	_sbhnddma_h_
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* DMA structure:
37*4882a593Smuzhiyun  *  support two DMA engines: 32 bits address or 64 bit addressing
38*4882a593Smuzhiyun  *  basic DMA register set is per channel(transmit or receive)
39*4882a593Smuzhiyun  *  a pair of channels is defined for convenience
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* 32 bits addressing */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /** dma registers per channel(xmt or rcv) */
45*4882a593Smuzhiyun typedef volatile struct {
46*4882a593Smuzhiyun 	uint32	control;		/**< enable, et al */
47*4882a593Smuzhiyun 	uint32	addr;			/**< descriptor ring base address (4K aligned) */
48*4882a593Smuzhiyun 	uint32	ptr;			/**< last descriptor posted to chip */
49*4882a593Smuzhiyun 	uint32	status;			/**< current active descriptor, et al */
50*4882a593Smuzhiyun } dma32regs_t;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun typedef volatile struct {
53*4882a593Smuzhiyun 	dma32regs_t	xmt;		/**< dma tx channel */
54*4882a593Smuzhiyun 	dma32regs_t	rcv;		/**< dma rx channel */
55*4882a593Smuzhiyun } dma32regp_t;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun typedef volatile struct {	/* diag access */
58*4882a593Smuzhiyun 	uint32	fifoaddr;		/**< diag address */
59*4882a593Smuzhiyun 	uint32	fifodatalow;		/**< low 32bits of data */
60*4882a593Smuzhiyun 	uint32	fifodatahigh;		/**< high 32bits of data */
61*4882a593Smuzhiyun 	uint32	pad;			/**< reserved */
62*4882a593Smuzhiyun } dma32diag_t;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun  * DMA Descriptor
66*4882a593Smuzhiyun  * Descriptors are only read by the hardware, never written back.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun typedef volatile struct {
69*4882a593Smuzhiyun 	uint32	ctrl;		/**< misc control bits & bufcount */
70*4882a593Smuzhiyun 	uint32	addr;		/**< data buffer address */
71*4882a593Smuzhiyun } dma32dd_t;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */
74*4882a593Smuzhiyun #define	D32RINGALIGN_BITS	12
75*4882a593Smuzhiyun #define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
76*4882a593Smuzhiyun #define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* transmit channel control */
81*4882a593Smuzhiyun #define	XC_XE		((uint32)1 << 0)	/**< transmit enable */
82*4882a593Smuzhiyun #define	XC_SE		((uint32)1 << 1)	/**< transmit suspend request */
83*4882a593Smuzhiyun #define	XC_LE		((uint32)1 << 2)	/**< loopback enable */
84*4882a593Smuzhiyun #define	XC_FL		((uint32)1 << 4)	/**< flush request */
85*4882a593Smuzhiyun #define XC_MR_MASK	0x000001C0		/**< Multiple outstanding reads */
86*4882a593Smuzhiyun #define XC_MR_SHIFT	6
87*4882a593Smuzhiyun #define	XC_PD		((uint32)1 << 11)	/**< parity check disable */
88*4882a593Smuzhiyun #define	XC_AE		((uint32)3 << 16)	/**< address extension bits */
89*4882a593Smuzhiyun #define	XC_AE_SHIFT	16
90*4882a593Smuzhiyun #define XC_BL_MASK	0x001C0000		/**< BurstLen bits */
91*4882a593Smuzhiyun #define XC_BL_SHIFT	18
92*4882a593Smuzhiyun #define XC_PC_MASK	0x00E00000		/**< Prefetch control */
93*4882a593Smuzhiyun #define XC_PC_SHIFT	21
94*4882a593Smuzhiyun #define XC_PT_MASK	0x03000000		/**< Prefetch threshold */
95*4882a593Smuzhiyun #define XC_PT_SHIFT	24
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /** Multiple outstanding reads */
98*4882a593Smuzhiyun #define DMA_MR_1	0
99*4882a593Smuzhiyun #define DMA_MR_2	1
100*4882a593Smuzhiyun #define DMA_MR_4	2
101*4882a593Smuzhiyun #define DMA_MR_8	3
102*4882a593Smuzhiyun #define DMA_MR_12	4
103*4882a593Smuzhiyun #define DMA_MR_16	5
104*4882a593Smuzhiyun #define DMA_MR_20	6
105*4882a593Smuzhiyun #define DMA_MR_32	7
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /** DMA Burst Length in bytes */
108*4882a593Smuzhiyun #define DMA_BL_16	0
109*4882a593Smuzhiyun #define DMA_BL_32	1
110*4882a593Smuzhiyun #define DMA_BL_64	2
111*4882a593Smuzhiyun #define DMA_BL_128	3
112*4882a593Smuzhiyun #define DMA_BL_256	4
113*4882a593Smuzhiyun #define DMA_BL_512	5
114*4882a593Smuzhiyun #define DMA_BL_1024	6
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /** Prefetch control */
117*4882a593Smuzhiyun #define DMA_PC_0	0
118*4882a593Smuzhiyun #define DMA_PC_4	1
119*4882a593Smuzhiyun #define DMA_PC_8	2
120*4882a593Smuzhiyun #define DMA_PC_16	3
121*4882a593Smuzhiyun #define DMA_PC_32	4
122*4882a593Smuzhiyun /* others: reserved */
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /** Prefetch threshold */
125*4882a593Smuzhiyun #define DMA_PT_1	0
126*4882a593Smuzhiyun #define DMA_PT_2	1
127*4882a593Smuzhiyun #define DMA_PT_4	2
128*4882a593Smuzhiyun #define DMA_PT_8	3
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /** Channel Switch */
131*4882a593Smuzhiyun #define DMA_CS_OFF	0
132*4882a593Smuzhiyun #define DMA_CS_ON	1
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* transmit descriptor table pointer */
135*4882a593Smuzhiyun #define	XP_LD_MASK	0xfff			/**< last valid descriptor */
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* transmit channel status */
138*4882a593Smuzhiyun #define	XS_CD_MASK	0x0fff			/**< current descriptor pointer */
139*4882a593Smuzhiyun #define	XS_XS_MASK	0xf000			/**< transmit state */
140*4882a593Smuzhiyun #define	XS_XS_SHIFT	12
141*4882a593Smuzhiyun #define	XS_XS_DISABLED	0x0000			/**< disabled */
142*4882a593Smuzhiyun #define	XS_XS_ACTIVE	0x1000			/**< active */
143*4882a593Smuzhiyun #define	XS_XS_IDLE	0x2000			/**< idle wait */
144*4882a593Smuzhiyun #define	XS_XS_STOPPED	0x3000			/**< stopped */
145*4882a593Smuzhiyun #define	XS_XS_SUSP	0x4000			/**< suspend pending */
146*4882a593Smuzhiyun #define	XS_XE_MASK	0xf0000			/**< transmit errors */
147*4882a593Smuzhiyun #define	XS_XE_SHIFT	16
148*4882a593Smuzhiyun #define	XS_XE_NOERR	0x00000			/**< no error */
149*4882a593Smuzhiyun #define	XS_XE_DPE	0x10000			/**< descriptor protocol error */
150*4882a593Smuzhiyun #define	XS_XE_DFU	0x20000			/**< data fifo underrun */
151*4882a593Smuzhiyun #define	XS_XE_BEBR	0x30000			/**< bus error on buffer read */
152*4882a593Smuzhiyun #define	XS_XE_BEDA	0x40000			/**< bus error on descriptor access */
153*4882a593Smuzhiyun #define	XS_AD_MASK	0xfff00000		/**< active descriptor */
154*4882a593Smuzhiyun #define	XS_AD_SHIFT	20
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* receive channel control */
157*4882a593Smuzhiyun #define	RC_RE		((uint32)1 << 0)	/**< receive enable */
158*4882a593Smuzhiyun #define	RC_RO_MASK	0xfe			/**< receive frame offset */
159*4882a593Smuzhiyun #define	RC_RO_SHIFT	1
160*4882a593Smuzhiyun #define	RC_FM		((uint32)1 << 8)	/**< direct fifo receive (pio) mode */
161*4882a593Smuzhiyun #define	RC_SH		((uint32)1 << 9)	/**< separate rx header descriptor enable */
162*4882a593Smuzhiyun #define	RC_OC		((uint32)1 << 10)	/**< overflow continue */
163*4882a593Smuzhiyun #define	RC_PD		((uint32)1 << 11)	/**< parity check disable */
164*4882a593Smuzhiyun #define	RC_AE		((uint32)3 << 16)	/**< address extension bits */
165*4882a593Smuzhiyun #define	RC_AE_SHIFT	16
166*4882a593Smuzhiyun #define RC_BL_MASK	0x001C0000		/**< BurstLen bits */
167*4882a593Smuzhiyun #define RC_BL_SHIFT	18
168*4882a593Smuzhiyun #define RC_PC_MASK	0x00E00000		/**< Prefetch control */
169*4882a593Smuzhiyun #define RC_PC_SHIFT	21
170*4882a593Smuzhiyun #define RC_PT_MASK	0x03000000		/**< Prefetch threshold */
171*4882a593Smuzhiyun #define RC_PT_SHIFT	24
172*4882a593Smuzhiyun #define RC_WAITCMP_MASK 0x00001000
173*4882a593Smuzhiyun #define RC_WAITCMP_SHIFT 12
174*4882a593Smuzhiyun /* receive descriptor table pointer */
175*4882a593Smuzhiyun #define	RP_LD_MASK	0xfff			/**< last valid descriptor */
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /* receive channel status */
178*4882a593Smuzhiyun #define	RS_CD_MASK	0x0fff			/**< current descriptor pointer */
179*4882a593Smuzhiyun #define	RS_RS_MASK	0xf000			/**< receive state */
180*4882a593Smuzhiyun #define	RS_RS_SHIFT	12
181*4882a593Smuzhiyun #define	RS_RS_DISABLED	0x0000			/**< disabled */
182*4882a593Smuzhiyun #define	RS_RS_ACTIVE	0x1000			/**< active */
183*4882a593Smuzhiyun #define	RS_RS_IDLE	0x2000			/**< idle wait */
184*4882a593Smuzhiyun #define	RS_RS_STOPPED	0x3000			/**< reserved */
185*4882a593Smuzhiyun #define	RS_RE_MASK	0xf0000			/**< receive errors */
186*4882a593Smuzhiyun #define	RS_RE_SHIFT	16
187*4882a593Smuzhiyun #define	RS_RE_NOERR	0x00000			/**< no error */
188*4882a593Smuzhiyun #define	RS_RE_DPE	0x10000			/**< descriptor protocol error */
189*4882a593Smuzhiyun #define	RS_RE_DFO	0x20000			/**< data fifo overflow */
190*4882a593Smuzhiyun #define	RS_RE_BEBW	0x30000			/**< bus error on buffer write */
191*4882a593Smuzhiyun #define	RS_RE_BEDA	0x40000			/**< bus error on descriptor access */
192*4882a593Smuzhiyun #define	RS_AD_MASK	0xfff00000		/**< active descriptor */
193*4882a593Smuzhiyun #define	RS_AD_SHIFT	20
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /* fifoaddr */
196*4882a593Smuzhiyun #define	FA_OFF_MASK	0xffff			/**< offset */
197*4882a593Smuzhiyun #define	FA_SEL_MASK	0xf0000			/**< select */
198*4882a593Smuzhiyun #define	FA_SEL_SHIFT	16
199*4882a593Smuzhiyun #define	FA_SEL_XDD	0x00000			/**< transmit dma data */
200*4882a593Smuzhiyun #define	FA_SEL_XDP	0x10000			/**< transmit dma pointers */
201*4882a593Smuzhiyun #define	FA_SEL_RDD	0x40000			/**< receive dma data */
202*4882a593Smuzhiyun #define	FA_SEL_RDP	0x50000			/**< receive dma pointers */
203*4882a593Smuzhiyun #define	FA_SEL_XFD	0x80000			/**< transmit fifo data */
204*4882a593Smuzhiyun #define	FA_SEL_XFP	0x90000			/**< transmit fifo pointers */
205*4882a593Smuzhiyun #define	FA_SEL_RFD	0xc0000			/**< receive fifo data */
206*4882a593Smuzhiyun #define	FA_SEL_RFP	0xd0000			/**< receive fifo pointers */
207*4882a593Smuzhiyun #define	FA_SEL_RSD	0xe0000			/**< receive frame status data */
208*4882a593Smuzhiyun #define	FA_SEL_RSP	0xf0000			/**< receive frame status pointers */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* descriptor control flags */
211*4882a593Smuzhiyun #define	CTRL_BC_MASK	0x00001fff		/**< buffer byte count, real data len must <= 4KB */
212*4882a593Smuzhiyun #define	CTRL_AE		((uint32)3 << 16)	/**< address extension bits */
213*4882a593Smuzhiyun #define	CTRL_AE_SHIFT	16
214*4882a593Smuzhiyun #define	CTRL_PARITY	((uint32)3 << 18)	/**< parity bit */
215*4882a593Smuzhiyun #define	CTRL_EOT	((uint32)1 << 28)	/**< end of descriptor table */
216*4882a593Smuzhiyun #define	CTRL_IOC	((uint32)1 << 29)	/**< interrupt on completion */
217*4882a593Smuzhiyun #define	CTRL_EOF	((uint32)1 << 30)	/**< end of frame */
218*4882a593Smuzhiyun #define	CTRL_SOF	((uint32)1 << 31)	/**< start of frame */
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /** control flags in the range [27:20] are core-specific and not defined here */
221*4882a593Smuzhiyun #define	CTRL_CORE_MASK	0x0ff00000
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /* 64 bits addressing */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /** dma registers per channel(xmt or rcv) */
226*4882a593Smuzhiyun typedef volatile struct {
227*4882a593Smuzhiyun 	uint32	control;	/**< enable, et al */
228*4882a593Smuzhiyun 	uint32	ptr;		/**< last descriptor posted to chip */
229*4882a593Smuzhiyun 	uint32	addrlow;	/**< descriptor ring base address low 32-bits (8K aligned) */
230*4882a593Smuzhiyun 	uint32	addrhigh;	/**< descriptor ring base address bits 63:32 (8K aligned) */
231*4882a593Smuzhiyun 	uint32	status0;	/**< current descriptor, xmt state */
232*4882a593Smuzhiyun 	uint32	status1;	/**< active descriptor, xmt error */
233*4882a593Smuzhiyun } dma64regs_t;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun typedef volatile struct {
236*4882a593Smuzhiyun 	dma64regs_t	tx;		/**< dma64 tx channel */
237*4882a593Smuzhiyun 	dma64regs_t	rx;		/**< dma64 rx channel */
238*4882a593Smuzhiyun } dma64regp_t;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun typedef volatile struct {		/**< diag access */
241*4882a593Smuzhiyun 	uint32	fifoaddr;		/**< diag address */
242*4882a593Smuzhiyun 	uint32	fifodatalow;		/**< low 32bits of data */
243*4882a593Smuzhiyun 	uint32	fifodatahigh;		/**< high 32bits of data */
244*4882a593Smuzhiyun 	uint32	pad;			/**< reserved */
245*4882a593Smuzhiyun } dma64diag_t;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /**
248*4882a593Smuzhiyun  * DMA Descriptor
249*4882a593Smuzhiyun  * Descriptors are only read by the hardware, never written back.
250*4882a593Smuzhiyun  */
251*4882a593Smuzhiyun typedef volatile struct {
252*4882a593Smuzhiyun 	uint32	ctrl1;		/**< misc control bits */
253*4882a593Smuzhiyun 	uint32	ctrl2;		/**< buffer count and address extension */
254*4882a593Smuzhiyun 	uint32	addrlow;	/**< memory address of the date buffer, bits 31:0 */
255*4882a593Smuzhiyun 	uint32	addrhigh;	/**< memory address of the date buffer, bits 63:32 */
256*4882a593Smuzhiyun } dma64dd_t;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun  * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
260*4882a593Smuzhiyun  */
261*4882a593Smuzhiyun #define D64RINGALIGN_BITS	13
262*4882a593Smuzhiyun #define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
263*4882a593Smuzhiyun #define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
268*4882a593Smuzhiyun #define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /**
271*4882a593Smuzhiyun  * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
272*4882a593Smuzhiyun  * 64K boundary
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun #define	D64RINGBOUNDARY_LARGE	(1 << 16)
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun  * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
278*4882a593Smuzhiyun  * When this field contains the value N, the burst length is 2**(N + 4) bytes.
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun #define D64_DEF_USBBURSTLEN     2
281*4882a593Smuzhiyun #define D64_DEF_SDIOBURSTLEN    1
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #ifndef D64_USBBURSTLEN
284*4882a593Smuzhiyun #define D64_USBBURSTLEN	DMA_BL_64
285*4882a593Smuzhiyun #endif // endif
286*4882a593Smuzhiyun #ifndef D64_SDIOBURSTLEN
287*4882a593Smuzhiyun #define D64_SDIOBURSTLEN	DMA_BL_32
288*4882a593Smuzhiyun #endif // endif
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* transmit channel control */
291*4882a593Smuzhiyun #define	D64_XC_XE		0x00000001	/**< transmit enable */
292*4882a593Smuzhiyun #define	D64_XC_SE		0x00000002	/**< transmit suspend request */
293*4882a593Smuzhiyun #define	D64_XC_LE		0x00000004	/**< loopback enable */
294*4882a593Smuzhiyun #define	D64_XC_FL		0x00000010	/**< flush request */
295*4882a593Smuzhiyun #define D64_XC_MR_MASK		0x000001C0	/**< Multiple outstanding reads */
296*4882a593Smuzhiyun #define D64_XC_MR_SHIFT		6
297*4882a593Smuzhiyun #define D64_XC_CS_SHIFT		9		/**< channel switch enable */
298*4882a593Smuzhiyun #define D64_XC_CS_MASK		0x00000200      /**< channel switch enable */
299*4882a593Smuzhiyun #define	D64_XC_PD		0x00000800	/**< parity check disable */
300*4882a593Smuzhiyun #define	D64_XC_AE		0x00030000	/**< address extension bits */
301*4882a593Smuzhiyun #define	D64_XC_AE_SHIFT		16
302*4882a593Smuzhiyun #define D64_XC_BL_MASK		0x001C0000	/**< BurstLen bits */
303*4882a593Smuzhiyun #define D64_XC_BL_SHIFT		18
304*4882a593Smuzhiyun #define D64_XC_PC_MASK		0x00E00000		/**< Prefetch control */
305*4882a593Smuzhiyun #define D64_XC_PC_SHIFT		21
306*4882a593Smuzhiyun #define D64_XC_PT_MASK		0x03000000		/**< Prefetch threshold */
307*4882a593Smuzhiyun #define D64_XC_PT_SHIFT		24
308*4882a593Smuzhiyun #define D64_XC_CO_MASK		0x04000000	/**< coherent transactions for descriptors */
309*4882a593Smuzhiyun #define D64_XC_CO_SHIFT		26
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /* transmit descriptor table pointer */
312*4882a593Smuzhiyun #define	D64_XP_LD_MASK		0x00001fff	/**< last valid descriptor */
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /* transmit channel status */
315*4882a593Smuzhiyun #define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/**< current descriptor pointer */
316*4882a593Smuzhiyun #define	D64_XS0_XS_MASK		0xf0000000     	/**< transmit state */
317*4882a593Smuzhiyun #define	D64_XS0_XS_SHIFT		28
318*4882a593Smuzhiyun #define	D64_XS0_XS_DISABLED	0x00000000	/**< disabled */
319*4882a593Smuzhiyun #define	D64_XS0_XS_ACTIVE	0x10000000	/**< active */
320*4882a593Smuzhiyun #define	D64_XS0_XS_IDLE		0x20000000	/**< idle wait */
321*4882a593Smuzhiyun #define	D64_XS0_XS_STOPPED	0x30000000	/**< stopped */
322*4882a593Smuzhiyun #define	D64_XS0_XS_SUSP		0x40000000	/**< suspend pending */
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun #define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/**< active descriptor */
325*4882a593Smuzhiyun #define	D64_XS1_XE_MASK		0xf0000000     	/**< transmit errors */
326*4882a593Smuzhiyun #define	D64_XS1_XE_SHIFT		28
327*4882a593Smuzhiyun #define	D64_XS1_XE_NOERR	0x00000000	/**< no error */
328*4882a593Smuzhiyun #define	D64_XS1_XE_DPE		0x10000000	/**< descriptor protocol error */
329*4882a593Smuzhiyun #define	D64_XS1_XE_DFU		0x20000000	/**< data fifo underrun */
330*4882a593Smuzhiyun #define	D64_XS1_XE_DTE		0x30000000	/**< data transfer error */
331*4882a593Smuzhiyun #define	D64_XS1_XE_DESRE	0x40000000	/**< descriptor read error */
332*4882a593Smuzhiyun #define	D64_XS1_XE_COREE	0x50000000	/**< core error */
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /* receive channel control */
335*4882a593Smuzhiyun #define	D64_RC_RE		0x00000001	/**< receive enable */
336*4882a593Smuzhiyun #define	D64_RC_RO_MASK		0x000000fe	/**< receive frame offset */
337*4882a593Smuzhiyun #define	D64_RC_RO_SHIFT		1
338*4882a593Smuzhiyun #define	D64_RC_FM		0x00000100	/**< direct fifo receive (pio) mode */
339*4882a593Smuzhiyun #define	D64_RC_SH		0x00000200	/**< separate rx header descriptor enable */
340*4882a593Smuzhiyun #define	D64_RC_SHIFT		9	/**< separate rx header descriptor enable */
341*4882a593Smuzhiyun #define	D64_RC_OC		0x00000400	/**< overflow continue */
342*4882a593Smuzhiyun #define	D64_RC_PD		0x00000800	/**< parity check disable */
343*4882a593Smuzhiyun #define D64_RC_WAITCMP_MASK	0x00001000
344*4882a593Smuzhiyun #define D64_RC_WAITCMP_SHIFT	12
345*4882a593Smuzhiyun #define D64_RC_SA		0x00002000	/**< select active */
346*4882a593Smuzhiyun #define D64_RC_GE		0x00004000	/**< Glom enable */
347*4882a593Smuzhiyun #define	D64_RC_AE		0x00030000	/**< address extension bits */
348*4882a593Smuzhiyun #define	D64_RC_AE_SHIFT		16
349*4882a593Smuzhiyun #define D64_RC_BL_MASK		0x001C0000	/**< BurstLen bits */
350*4882a593Smuzhiyun #define D64_RC_BL_SHIFT		18
351*4882a593Smuzhiyun #define D64_RC_PC_MASK		0x00E00000	/**< Prefetch control */
352*4882a593Smuzhiyun #define D64_RC_PC_SHIFT		21
353*4882a593Smuzhiyun #define D64_RC_PT_MASK		0x03000000	/**< Prefetch threshold */
354*4882a593Smuzhiyun #define D64_RC_PT_SHIFT		24
355*4882a593Smuzhiyun #define D64_RC_CO_MASK		0x04000000	/**< coherent transactions for descriptors */
356*4882a593Smuzhiyun #define D64_RC_CO_SHIFT		26
357*4882a593Smuzhiyun #define	D64_RC_ROEXT_MASK	0x08000000	/**< receive frame offset extension bit */
358*4882a593Smuzhiyun #define	D64_RC_ROEXT_SHIFT	27
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* flags for dma controller */
361*4882a593Smuzhiyun #define DMA_CTRL_PEN		(1 << 0)	/**< partity enable */
362*4882a593Smuzhiyun #define DMA_CTRL_ROC		(1 << 1)	/**< rx overflow continue */
363*4882a593Smuzhiyun #define DMA_CTRL_RXMULTI	(1 << 2)	/**< allow rx scatter to multiple descriptors */
364*4882a593Smuzhiyun #define DMA_CTRL_UNFRAMED	(1 << 3)	/**< Unframed Rx/Tx data */
365*4882a593Smuzhiyun #define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
366*4882a593Smuzhiyun #define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)	/**< DMA avoidance WAR for 4331 */
367*4882a593Smuzhiyun #define DMA_CTRL_RXSINGLE	(1 << 6)	/**< always single buffer */
368*4882a593Smuzhiyun #define DMA_CTRL_SDIO_RXGLOM	(1 << 7)	/**< DMA Rx glome is enabled */
369*4882a593Smuzhiyun #define DMA_CTRL_DESC_ONLY_FLAG (1 << 8)	/**< For DMA which posts only descriptors,
370*4882a593Smuzhiyun 						 * no packets
371*4882a593Smuzhiyun 						 */
372*4882a593Smuzhiyun #define DMA_CTRL_DESC_CD_WAR	(1 << 9)	/**< WAR for descriptor only DMA's CD not being
373*4882a593Smuzhiyun 						 * updated correctly by HW in CT mode.
374*4882a593Smuzhiyun 						 */
375*4882a593Smuzhiyun #define DMA_CTRL_CS		(1 << 10)	/* channel switch enable */
376*4882a593Smuzhiyun #define DMA_CTRL_ROEXT		(1 << 11)	/* receive frame offset extension support */
377*4882a593Smuzhiyun #define DMA_CTRL_RX_ALIGN_8BYTE	(1 << 12)	/* RXDMA address 8-byte aligned for 43684A0 */
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /* receive descriptor table pointer */
380*4882a593Smuzhiyun #define	D64_RP_LD_MASK		0x00001fff	/**< last valid descriptor */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /* receive channel status */
383*4882a593Smuzhiyun #define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/**< current descriptor pointer */
384*4882a593Smuzhiyun #define	D64_RS0_RS_MASK		0xf0000000     	/**< receive state */
385*4882a593Smuzhiyun #define	D64_RS0_RS_SHIFT		28
386*4882a593Smuzhiyun #define	D64_RS0_RS_DISABLED	0x00000000	/**< disabled */
387*4882a593Smuzhiyun #define	D64_RS0_RS_ACTIVE	0x10000000	/**< active */
388*4882a593Smuzhiyun #define	D64_RS0_RS_IDLE		0x20000000	/**< idle wait */
389*4882a593Smuzhiyun #define	D64_RS0_RS_STOPPED	0x30000000	/**< stopped */
390*4882a593Smuzhiyun #define	D64_RS0_RS_SUSP		0x40000000	/**< suspend pending */
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun #define	D64_RS1_AD_MASK		(di->d64_rs1_ad_mask)	/* active descriptor pointer */
393*4882a593Smuzhiyun #define	D64_RS1_RE_MASK		0xf0000000	/* receive errors */
394*4882a593Smuzhiyun #define	D64_RS1_RE_SHIFT		28
395*4882a593Smuzhiyun #define	D64_RS1_RE_NOERR	0x00000000	/**< no error */
396*4882a593Smuzhiyun #define	D64_RS1_RE_DPO		0x10000000	/**< descriptor protocol error */
397*4882a593Smuzhiyun #define	D64_RS1_RE_DFU		0x20000000	/**< data fifo overflow */
398*4882a593Smuzhiyun #define	D64_RS1_RE_DTE		0x30000000	/**< data transfer error */
399*4882a593Smuzhiyun #define	D64_RS1_RE_DESRE	0x40000000	/**< descriptor read error */
400*4882a593Smuzhiyun #define	D64_RS1_RE_COREE	0x50000000	/**< core error */
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /* fifoaddr */
403*4882a593Smuzhiyun #define	D64_FA_OFF_MASK		0xffff		/**< offset */
404*4882a593Smuzhiyun #define	D64_FA_SEL_MASK		0xf0000		/**< select */
405*4882a593Smuzhiyun #define	D64_FA_SEL_SHIFT	16
406*4882a593Smuzhiyun #define	D64_FA_SEL_XDD		0x00000		/**< transmit dma data */
407*4882a593Smuzhiyun #define	D64_FA_SEL_XDP		0x10000		/**< transmit dma pointers */
408*4882a593Smuzhiyun #define	D64_FA_SEL_RDD		0x40000		/**< receive dma data */
409*4882a593Smuzhiyun #define	D64_FA_SEL_RDP		0x50000		/**< receive dma pointers */
410*4882a593Smuzhiyun #define	D64_FA_SEL_XFD		0x80000		/**< transmit fifo data */
411*4882a593Smuzhiyun #define	D64_FA_SEL_XFP		0x90000		/**< transmit fifo pointers */
412*4882a593Smuzhiyun #define	D64_FA_SEL_RFD		0xc0000		/**< receive fifo data */
413*4882a593Smuzhiyun #define	D64_FA_SEL_RFP		0xd0000		/**< receive fifo pointers */
414*4882a593Smuzhiyun #define	D64_FA_SEL_RSD		0xe0000		/**< receive frame status data */
415*4882a593Smuzhiyun #define	D64_FA_SEL_RSP		0xf0000		/**< receive frame status pointers */
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* descriptor control flags 1 */
418*4882a593Smuzhiyun #define D64_CTRL_COREFLAGS	0x0ff00000		/**< core specific flags */
419*4882a593Smuzhiyun #define D64_CTRL1_COHERENT      ((uint32)1 << 17)       /* cache coherent per transaction */
420*4882a593Smuzhiyun #define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/**< buirst size control */
421*4882a593Smuzhiyun #define	D64_CTRL1_EOT		((uint32)1 << 28)	/**< end of descriptor table */
422*4882a593Smuzhiyun #define	D64_CTRL1_IOC		((uint32)1 << 29)	/**< interrupt on completion */
423*4882a593Smuzhiyun #define	D64_CTRL1_EOF		((uint32)1 << 30)	/**< end of frame */
424*4882a593Smuzhiyun #define	D64_CTRL1_SOF		((uint32)1 << 31)	/**< start of frame */
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /* descriptor control flags 2 */
427*4882a593Smuzhiyun #define	D64_CTRL2_MAX_LEN	0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */
428*4882a593Smuzhiyun #define	D64_CTRL2_BC_MASK	0x0000ffff /**< mask for buffer byte count */
429*4882a593Smuzhiyun #define	D64_CTRL2_AE		0x00030000 /**< address extension bits */
430*4882a593Smuzhiyun #define	D64_CTRL2_AE_SHIFT	16
431*4882a593Smuzhiyun #define D64_CTRL2_PARITY	0x00040000      /* parity bit */
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /** control flags in the range [27:20] are core-specific and not defined here */
434*4882a593Smuzhiyun #define	D64_CTRL_CORE_MASK	0x0ff00000
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun #define D64_RX_FRM_STS_LEN	0x0000ffff	/**< frame length mask */
437*4882a593Smuzhiyun #define D64_RX_FRM_STS_OVFL	0x00800000	/**< RxOverFlow */
438*4882a593Smuzhiyun #define D64_RX_FRM_STS_DSCRCNT	0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
439*4882a593Smuzhiyun #define D64_RX_FRM_STS_DSCRCNT_SHIFT   24      /* Shift for no .of dma descriptor field */
440*4882a593Smuzhiyun #define D64_RX_FRM_STS_DATATYPE	0xf0000000	/**< core-dependent data type */
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun #define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \
443*4882a593Smuzhiyun (((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len))
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /** receive frame status */
446*4882a593Smuzhiyun typedef volatile struct {
447*4882a593Smuzhiyun 	uint16 len;
448*4882a593Smuzhiyun 	uint16 flags;
449*4882a593Smuzhiyun } dma_rxh_t;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #endif	/* _sbhnddma_h_ */
452