xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/hnddma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
3*4882a593Smuzhiyun  * This supports the following chips: BCM42xx, 44xx, 47xx .
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun  * following added to such license:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
18*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
19*4882a593Smuzhiyun  * modifications of the software.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Dual:>>
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifndef	_hnddma_h_
26*4882a593Smuzhiyun #define	_hnddma_h_
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <typedefs.h>
29*4882a593Smuzhiyun #include <osl_decl.h>
30*4882a593Smuzhiyun #include <siutils.h>
31*4882a593Smuzhiyun #include <sbhnddma.h>
32*4882a593Smuzhiyun #include <hnd_pktq.h>
33*4882a593Smuzhiyun #include <hnd_pktpool.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifndef _hnddma_pub_
36*4882a593Smuzhiyun #define _hnddma_pub_
37*4882a593Smuzhiyun typedef const struct hnddma_pub hnddma_t;
38*4882a593Smuzhiyun #endif /* _hnddma_pub_ */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* range param for dma_getnexttxp() and dma_txreclaim */
41*4882a593Smuzhiyun typedef enum txd_range {
42*4882a593Smuzhiyun 	HNDDMA_RANGE_ALL		= 1,
43*4882a593Smuzhiyun 	HNDDMA_RANGE_TRANSMITTED,
44*4882a593Smuzhiyun 	HNDDMA_RANGE_TRANSFERED
45*4882a593Smuzhiyun } txd_range_t;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* dma parameters id */
48*4882a593Smuzhiyun enum dma_param_id {
49*4882a593Smuzhiyun 	HNDDMA_PID_TX_MULTI_OUTSTD_RD	= 0,
50*4882a593Smuzhiyun 	HNDDMA_PID_TX_PREFETCH_CTL,
51*4882a593Smuzhiyun 	HNDDMA_PID_TX_PREFETCH_THRESH,
52*4882a593Smuzhiyun 	HNDDMA_PID_TX_BURSTLEN,
53*4882a593Smuzhiyun 	HNDDMA_PID_TX_CHAN_SWITCH,
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	HNDDMA_PID_RX_PREFETCH_CTL	= 0x100,
56*4882a593Smuzhiyun 	HNDDMA_PID_RX_PREFETCH_THRESH,
57*4882a593Smuzhiyun 	HNDDMA_PID_RX_BURSTLEN,
58*4882a593Smuzhiyun 	HNDDMA_PID_BURSTLEN_CAP,
59*4882a593Smuzhiyun 	HNDDMA_PID_BURSTLEN_WAR,
60*4882a593Smuzhiyun 	HNDDMA_SEP_RX_HDR,	/**< SPLITRX related */
61*4882a593Smuzhiyun 	HNDDMA_SPLIT_FIFO,
62*4882a593Smuzhiyun 	HNDDMA_PID_D11RX_WAR,
63*4882a593Smuzhiyun 	HNDDMA_PID_RX_WAIT_CMPL,
64*4882a593Smuzhiyun 	HNDDMA_NRXPOST,
65*4882a593Smuzhiyun 	HNDDMA_NRXBUFSZ,
66*4882a593Smuzhiyun 	HNDDMA_PID_RXCTL_MOW,
67*4882a593Smuzhiyun 	HNDDMA_M2M_RXBUF_RAW /* rx buffers are raw buffers, not lbufs/lfrags */
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define SPLIT_FIFO_0	1
71*4882a593Smuzhiyun #define SPLIT_FIFO_1	2
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun typedef void (*setup_context_t)(void *ctx, void *p, uint8 **desc0, uint16 *len0,
74*4882a593Smuzhiyun 	uint8 **desc1, uint16 *len1);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * Exported data structure (read-only)
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun /* export structure */
80*4882a593Smuzhiyun struct hnddma_pub {
81*4882a593Smuzhiyun 	uint		dmastflags;	/* dma status flags */
82*4882a593Smuzhiyun 	uint		dmactrlflags;	/**< dma control flags */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* rx error counters */
85*4882a593Smuzhiyun 	uint		rxgiants;	/**< rx giant frames */
86*4882a593Smuzhiyun 	uint		rxnobuf;	/**< rx out of dma descriptors */
87*4882a593Smuzhiyun 	/* tx error counters */
88*4882a593Smuzhiyun 	uint		txnobuf;	/**< tx out of dma descriptors */
89*4882a593Smuzhiyun 	uint		txnodesc;	/**< tx out of dma descriptors running count */
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* DMA status flags */
93*4882a593Smuzhiyun #define BCM_DMA_STF_RX	(1u << 0u)	/* the channel is RX DMA */
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun typedef struct dma_common dma_common_t;
96*4882a593Smuzhiyun typedef struct dma_dd_pool dma_dd_pool_t;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* Flags for dma_attach_ext function */
99*4882a593Smuzhiyun #define BCM_DMA_IND_INTF_FLAG		0x00000001	/* set for using INDIRECT DMA INTERFACE */
100*4882a593Smuzhiyun #define BCM_DMA_DESC_ONLY_FLAG		0x00000002	/* For DMA that posts descriptors only and
101*4882a593Smuzhiyun 							 * no packets
102*4882a593Smuzhiyun 							 */
103*4882a593Smuzhiyun #define BCM_DMA_CHAN_SWITCH_EN		0x00000008	/* for d11 corerev 64+ to help arbitrate
104*4882a593Smuzhiyun 							 * btw dma channels.
105*4882a593Smuzhiyun 							 */
106*4882a593Smuzhiyun #define BCM_DMA_ROEXT_SUPPORT		0x00000010	/* for d11 corerev 128+ to support receive
107*4882a593Smuzhiyun 							 * frame offset >=128B and <= 255B
108*4882a593Smuzhiyun 							 */
109*4882a593Smuzhiyun #define BCM_DMA_RX_ALIGN_8BYTE		0x00000020	/* RXDMA address 8-byte aligned */
110*4882a593Smuzhiyun #define BCM_DMA_DESC_SHARED_POOL	0x00000100	/* For TX DMA that uses shared desc pool */
111*4882a593Smuzhiyun #define BCM_DMA_RXP_LIST		0x00000200      /* linked list for RXP instead of array */
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun typedef int (*rxpkt_error_check_t)(const void* ctx, void* pkt);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun extern dma_common_t * dma_common_attach(osl_t *osh, volatile uint32 *indqsel,
116*4882a593Smuzhiyun 	volatile uint32 *suspreq, volatile uint32 *flushreq, rxpkt_error_check_t cb, void *ctx);
117*4882a593Smuzhiyun extern void dma_common_detach(dma_common_t *dmacommon);
118*4882a593Smuzhiyun extern void dma_common_set_ddpool_ctx(dma_common_t *dmacommon, void *desc_pool);
119*4882a593Smuzhiyun extern void * dma_common_get_ddpool_ctx(dma_common_t *dmacommon, void **va);
120*4882a593Smuzhiyun extern bool dma_check_last_desc(hnddma_t *dmah);
121*4882a593Smuzhiyun extern void dma_txfrwd(hnddma_t *dmah);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #ifdef BCM_DMA_INDIRECT
124*4882a593Smuzhiyun /* Use indirect registers for non-ctmode */
125*4882a593Smuzhiyun #define DMA_INDQSEL_IA	(1 << 31)
126*4882a593Smuzhiyun extern void dma_set_indqsel(hnddma_t *di, bool force);
127*4882a593Smuzhiyun extern bool dma_is_indirect(hnddma_t *dmah);
128*4882a593Smuzhiyun #else
129*4882a593Smuzhiyun #define dma_set_indqsel(a, b)
130*4882a593Smuzhiyun #define dma_is_indirect(a)	FALSE
131*4882a593Smuzhiyun #endif /* #ifdef BCM_DMA_INDIRECT */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun extern hnddma_t * dma_attach_ext(dma_common_t *dmac, osl_t *osh, const char *name, si_t *sih,
134*4882a593Smuzhiyun 	volatile void *dmaregstx, volatile void *dmaregsrx, uint32 flags, uint8 qnum,
135*4882a593Smuzhiyun 	uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset,
136*4882a593Smuzhiyun 	uint *msg_level, uint coreunit);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun extern hnddma_t * dma_attach(osl_t *osh, const char *name, si_t *sih,
139*4882a593Smuzhiyun 	volatile void *dmaregstx, volatile void *dmaregsrx,
140*4882a593Smuzhiyun 	uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost,
141*4882a593Smuzhiyun 	uint rxoffset, uint *msg_level);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun void dma_rx_desc_init(hnddma_t *dmah, uint rxfifo);
144*4882a593Smuzhiyun void dma_detach(hnddma_t *dmah);
145*4882a593Smuzhiyun bool dma_txreset(hnddma_t *dmah);
146*4882a593Smuzhiyun bool dma_rxreset(hnddma_t *dmah);
147*4882a593Smuzhiyun bool dma_rxidle(hnddma_t *dmah);
148*4882a593Smuzhiyun void dma_txinit(hnddma_t *dmah);
149*4882a593Smuzhiyun bool dma_txenabled(hnddma_t *dmah);
150*4882a593Smuzhiyun void dma_rxinit(hnddma_t *dmah);
151*4882a593Smuzhiyun void dma_txsuspend(hnddma_t *dmah);
152*4882a593Smuzhiyun void dma_txresume(hnddma_t *dmah);
153*4882a593Smuzhiyun bool dma_txsuspended(hnddma_t *dmah);
154*4882a593Smuzhiyun bool dma_txsuspendedidle(hnddma_t *dmah);
155*4882a593Smuzhiyun void dma_txflush(hnddma_t *dmah);
156*4882a593Smuzhiyun void dma_txflush_clear(hnddma_t *dmah);
157*4882a593Smuzhiyun int dma_txfast_ext(hnddma_t *dmah, void *p0, bool commit, uint16 *pre_txout, uint16 *numd);
158*4882a593Smuzhiyun int dma_txfast_alfrag(hnddma_t *dmah, hnddma_t *aqm_dmah, void *p, bool commit, dma64dd_t *aqmdesc,
159*4882a593Smuzhiyun 	uint d11_txh_len, bool ptxd_hw_enab);
160*4882a593Smuzhiyun #define dma_txfast(dmah, p0, commit) \
161*4882a593Smuzhiyun 		dma_txfast_ext((dmah), (p0), (commit), NULL, NULL)
162*4882a593Smuzhiyun void dma_txcommit(hnddma_t *dmah);
163*4882a593Smuzhiyun int dma_txunframed(hnddma_t *dmah, void *buf, uint len, bool commit);
164*4882a593Smuzhiyun void *dma_getpos(hnddma_t *dmah, bool direction);
165*4882a593Smuzhiyun void dma_fifoloopbackenable(hnddma_t *dmah);
166*4882a593Smuzhiyun void dma_fifoloopbackdisable(hnddma_t *dmah);
167*4882a593Smuzhiyun bool dma_txstopped(hnddma_t *dmah);
168*4882a593Smuzhiyun bool dma_rxstopped(hnddma_t *dmah);
169*4882a593Smuzhiyun void dma_rxenable(hnddma_t *dmah);
170*4882a593Smuzhiyun bool dma_rxenabled(hnddma_t *dmah);
171*4882a593Smuzhiyun void *dma_rx(hnddma_t *dmah);
172*4882a593Smuzhiyun #ifdef APP_RX
173*4882a593Smuzhiyun void dma_getnextrxp_app(hnddma_t *dmah, bool forceall, uint *pktcnt,
174*4882a593Smuzhiyun 	void **head, void **tail);
175*4882a593Smuzhiyun void dma_rxfill_haddr_getparams(hnddma_t *dmah, uint *nrxd, uint16 *rxout,
176*4882a593Smuzhiyun 	dma64dd_t **ddring, uint *rxextrahdrroom, uint32 **rxpktid);
177*4882a593Smuzhiyun void dma_rxfill_haddr_setparams(hnddma_t *dmah, uint16 rxout);
178*4882a593Smuzhiyun #endif /* APP_RX */
179*4882a593Smuzhiyun uint dma_rx_get_rxoffset(hnddma_t *dmah);
180*4882a593Smuzhiyun bool dma_rxfill(hnddma_t *dmah);
181*4882a593Smuzhiyun bool dma_rxfill_required(hnddma_t *dmah);
182*4882a593Smuzhiyun void dma_txreclaim(hnddma_t *dmah, txd_range_t range);
183*4882a593Smuzhiyun void dma_rxreclaim(hnddma_t *dmah);
184*4882a593Smuzhiyun #define _DMA_GETUINTVARPTR_
185*4882a593Smuzhiyun uint *dma_getuintvarptr(hnddma_t *dmah, const char *name);
186*4882a593Smuzhiyun uint8 dma_getuint8var(hnddma_t *dmah, const char *name);
187*4882a593Smuzhiyun uint16 dma_getuint16var(hnddma_t *dmah, const char *name);
188*4882a593Smuzhiyun uint32 dma_getuint32var(hnddma_t *dmah, const char *name);
189*4882a593Smuzhiyun void * dma_getnexttxp(hnddma_t *dmah, txd_range_t range);
190*4882a593Smuzhiyun void * dma_getnextp(hnddma_t *dmah);
191*4882a593Smuzhiyun void * dma_getnextrxp(hnddma_t *dmah, bool forceall);
192*4882a593Smuzhiyun void * dma_peeknexttxp(hnddma_t *dmah, txd_range_t range);
193*4882a593Smuzhiyun int dma_peekntxp(hnddma_t *dmah, int *len, void *txps[], txd_range_t range);
194*4882a593Smuzhiyun void * dma_peeknextrxp(hnddma_t *dmah);
195*4882a593Smuzhiyun void dma_rxparam_get(hnddma_t *dmah, uint16 *rxoffset, uint16 *rxbufsize);
196*4882a593Smuzhiyun bool dma_is_rxfill_suspend(hnddma_t *dmah);
197*4882a593Smuzhiyun void dma_txblock(hnddma_t *dmah);
198*4882a593Smuzhiyun void dma_txunblock(hnddma_t *dmah);
199*4882a593Smuzhiyun uint dma_txactive(hnddma_t *dmah);
200*4882a593Smuzhiyun uint dma_rxactive(hnddma_t *dmah);
201*4882a593Smuzhiyun void dma_txrotate(hnddma_t *dmah);
202*4882a593Smuzhiyun void dma_counterreset(hnddma_t *dmah);
203*4882a593Smuzhiyun uint dma_ctrlflags(hnddma_t *dmah, uint mask, uint flags);
204*4882a593Smuzhiyun uint dma_txpending(hnddma_t *dmah);
205*4882a593Smuzhiyun uint dma_txcommitted(hnddma_t *dmah);
206*4882a593Smuzhiyun int dma_pktpool_set(hnddma_t *dmah, pktpool_t *pool);
207*4882a593Smuzhiyun int dma_rxdatapool_set(hnddma_t *dmah, pktpool_t *pktpool);
208*4882a593Smuzhiyun pktpool_t *dma_rxdatapool_get(hnddma_t *dmah);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun void dma_dump_txdmaregs(hnddma_t *dmah, uint32 **buf);
211*4882a593Smuzhiyun void dma_dump_rxdmaregs(hnddma_t *dmah, uint32 **buf);
212*4882a593Smuzhiyun #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_DMA)
213*4882a593Smuzhiyun void dma_dump(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
214*4882a593Smuzhiyun void dma_dumptx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
215*4882a593Smuzhiyun void dma_dumprx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
216*4882a593Smuzhiyun #endif
217*4882a593Smuzhiyun bool dma_rxtxerror(hnddma_t *dmah, bool istx);
218*4882a593Smuzhiyun void dma_burstlen_set(hnddma_t *dmah, uint8 rxburstlen, uint8 txburstlen);
219*4882a593Smuzhiyun uint dma_avoidance_cnt(hnddma_t *dmah);
220*4882a593Smuzhiyun void dma_param_set(hnddma_t *dmah, uint16 paramid, uint16 paramval);
221*4882a593Smuzhiyun void dma_param_get(hnddma_t *dmah, uint16 paramid, uint *paramval);
222*4882a593Smuzhiyun void dma_context(hnddma_t *dmah, setup_context_t fn, void *ctx);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun bool dma_glom_enable(hnddma_t *dmah, uint32 val);
225*4882a593Smuzhiyun uint dma_activerxbuf(hnddma_t *dmah);
226*4882a593Smuzhiyun bool dma_rxidlestatus(hnddma_t *dmah);
227*4882a593Smuzhiyun uint dma_get_rxpost(hnddma_t *dmah);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /* return addresswidth allowed
230*4882a593Smuzhiyun  * This needs to be done after SB attach but before dma attach.
231*4882a593Smuzhiyun  * SB attach provides ability to probe backplane and dma core capabilities
232*4882a593Smuzhiyun  * This info is needed by DMA_ALLOC_CONSISTENT in dma attach
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun extern uint dma_addrwidth(si_t *sih, void *dmaregs);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* count the number of tx packets that are queued to the dma ring */
237*4882a593Smuzhiyun extern uint dma_txp(hnddma_t *di);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun extern void dma_txrewind(hnddma_t *di);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /* pio helpers */
242*4882a593Smuzhiyun extern int dma_msgbuf_txfast(hnddma_t *di, dma64addr_t p0, bool com, uint32 ln, bool fst, bool lst);
243*4882a593Smuzhiyun extern int dma_ptrbuf_txfast(hnddma_t *dmah, dma64addr_t p0, void *p, bool commit,
244*4882a593Smuzhiyun 	uint32 len, bool first, bool last);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun extern int dma_rxfast(hnddma_t *di, dma64addr_t p, uint32 len);
247*4882a593Smuzhiyun extern int dma_rxfill_suspend(hnddma_t *dmah, bool suspended);
248*4882a593Smuzhiyun extern void dma_link_handle(hnddma_t *dmah1, hnddma_t *dmah2);
249*4882a593Smuzhiyun extern void dma_unlink_handle(hnddma_t *dmah1, hnddma_t *dmah2);
250*4882a593Smuzhiyun extern int dma_rxfill_unframed(hnddma_t *di, void *buf, uint len, bool commit);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun extern uint16 dma_get_next_txd_idx(hnddma_t *di, bool txout);
253*4882a593Smuzhiyun extern uint16 dma_get_txd_count(hnddma_t *dmah, uint16 start, bool txout);
254*4882a593Smuzhiyun extern uintptr dma_get_txd_addr(hnddma_t *di, uint16 idx);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* returns the memory address (hi and low) of the buffer associated with the dma descriptor
257*4882a593Smuzhiyun  * having index idx.
258*4882a593Smuzhiyun  */
259*4882a593Smuzhiyun extern void dma_get_txd_memaddr(hnddma_t *dmah, uint32 *addrlo, uint32 *addrhi, uint idx);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun extern int dma_txdesc(hnddma_t *dmah, dma64dd_t *dd, bool commit);
262*4882a593Smuzhiyun extern int dma_nexttxdd(hnddma_t *dmah, txd_range_t range, uint32 *flags1, uint32 *flags2,
263*4882a593Smuzhiyun 	bool advance);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun extern void dma_update_rxfill(hnddma_t *dmah);
266*4882a593Smuzhiyun extern void dma_rxchan_reset(hnddma_t *di);
267*4882a593Smuzhiyun extern void dma_txchan_reset(hnddma_t *di);
268*4882a593Smuzhiyun extern void dma_chan_reset(hnddma_t *dmah);
269*4882a593Smuzhiyun extern pktpool_t* dma_pktpool_get(hnddma_t *dmah);
270*4882a593Smuzhiyun extern void dma_clearrxp(hnddma_t *dmah);
271*4882a593Smuzhiyun extern void dma_cleartxp(hnddma_t *dmah);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #define dma_getnexttxdd(dmah, range, flags1, flags2) \
274*4882a593Smuzhiyun 		dma_nexttxdd((dmah), (range), (flags1), (flags2), TRUE)
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define dma_peeknexttxdd(dmah, range, flags1, flags2) \
277*4882a593Smuzhiyun 		dma_nexttxdd((dmah), (range), (flags1), (flags2), FALSE)
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun #define NUM_VEC_PCIE	4
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define XFER_FROM_LBUF	0x1
282*4882a593Smuzhiyun #define XFER_TO_LBUF	0x2
283*4882a593Smuzhiyun #define XFER_INJ_ERR	0x4
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun typedef struct m2m_vec_s {
286*4882a593Smuzhiyun 	dma64addr_t	addr;
287*4882a593Smuzhiyun 	uint32		len;
288*4882a593Smuzhiyun } m2m_vec_t;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun typedef struct m2m_desc_s {
291*4882a593Smuzhiyun 	uint8		num_rx_vec;
292*4882a593Smuzhiyun 	uint8		num_tx_vec;
293*4882a593Smuzhiyun 	uint8		flags;
294*4882a593Smuzhiyun 	bool		commit;
295*4882a593Smuzhiyun 	m2m_vec_t	vec[];
296*4882a593Smuzhiyun } m2m_desc_t;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #define INIT_M2M_DESC(desc) \
299*4882a593Smuzhiyun {\
300*4882a593Smuzhiyun 	desc->num_rx_vec = 0;	\
301*4882a593Smuzhiyun 	desc->num_tx_vec = 0;	\
302*4882a593Smuzhiyun 	desc->flags = 0;	\
303*4882a593Smuzhiyun 	desc->commit = TRUE;	\
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun #define SETUP_RX_DESC(desc, rxaddr, rxlen) \
307*4882a593Smuzhiyun {\
308*4882a593Smuzhiyun 	ASSERT(desc->num_tx_vec == 0);	\
309*4882a593Smuzhiyun 	desc->vec[desc->num_rx_vec].addr = rxaddr;	\
310*4882a593Smuzhiyun 	desc->vec[desc->num_rx_vec].len = rxlen;	\
311*4882a593Smuzhiyun 	desc->num_rx_vec++;	\
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #define SETUP_TX_DESC(desc, txaddr, txlen) \
315*4882a593Smuzhiyun {\
316*4882a593Smuzhiyun 	desc->vec[desc->num_tx_vec + desc->num_rx_vec].addr = txaddr;	\
317*4882a593Smuzhiyun 	desc->vec[desc->num_tx_vec + desc->num_rx_vec].len = txlen;	\
318*4882a593Smuzhiyun 	desc->num_tx_vec++;	\
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun #define SETUP_XFER_FLAGS(desc, flag) \
322*4882a593Smuzhiyun {\
323*4882a593Smuzhiyun 	desc->flags |= flag;	\
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #define DD_IS_SHARED_POOL(di) ((di)->dmactrlflags & DMA_CTRL_SHARED_POOL)
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun extern int dma_m2m_submit(hnddma_t *dmah, m2m_desc_t *desc, bool implicit);
329*4882a593Smuzhiyun extern void dma_chan_enable(hnddma_t *dmah, bool enable);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun extern bool dma_rxfill_p(hnddma_t *dmah, void *p);
332*4882a593Smuzhiyun extern void dma_aqm_di_link(hnddma_t *dmah_aqm, hnddma_t *dmah_hw);
333*4882a593Smuzhiyun extern void dma_dump_aqminfo(hnddma_t * dmah, struct bcmstrbuf *b, uint16 fifonum);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /* To dump ntxd and nrxd from the DMA ring */
336*4882a593Smuzhiyun void dma_dump_info(hnddma_t *dmah, uint16 fifonum, struct bcmstrbuf *b);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun #endif	/* _hnddma_h_ */
339