xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/include/hnddma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
3  * This supports the following chips: BCM42xx, 44xx, 47xx .
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Dual:>>
23  */
24 
25 #ifndef	_hnddma_h_
26 #define	_hnddma_h_
27 
28 #include <typedefs.h>
29 #include <osl_decl.h>
30 #include <siutils.h>
31 #include <sbhnddma.h>
32 #include <hnd_pktq.h>
33 #include <hnd_pktpool.h>
34 
35 #ifndef _hnddma_pub_
36 #define _hnddma_pub_
37 typedef const struct hnddma_pub hnddma_t;
38 #endif /* _hnddma_pub_ */
39 
40 /* range param for dma_getnexttxp() and dma_txreclaim */
41 typedef enum txd_range {
42 	HNDDMA_RANGE_ALL		= 1,
43 	HNDDMA_RANGE_TRANSMITTED,
44 	HNDDMA_RANGE_TRANSFERED
45 } txd_range_t;
46 
47 /* dma parameters id */
48 enum dma_param_id {
49 	HNDDMA_PID_TX_MULTI_OUTSTD_RD	= 0,
50 	HNDDMA_PID_TX_PREFETCH_CTL,
51 	HNDDMA_PID_TX_PREFETCH_THRESH,
52 	HNDDMA_PID_TX_BURSTLEN,
53 	HNDDMA_PID_TX_CHAN_SWITCH,
54 
55 	HNDDMA_PID_RX_PREFETCH_CTL	= 0x100,
56 	HNDDMA_PID_RX_PREFETCH_THRESH,
57 	HNDDMA_PID_RX_BURSTLEN,
58 	HNDDMA_PID_BURSTLEN_CAP,
59 	HNDDMA_PID_BURSTLEN_WAR,
60 	HNDDMA_SEP_RX_HDR,	/**< SPLITRX related */
61 	HNDDMA_SPLIT_FIFO,
62 	HNDDMA_PID_D11RX_WAR,
63 	HNDDMA_PID_RX_WAIT_CMPL,
64 	HNDDMA_NRXPOST,
65 	HNDDMA_NRXBUFSZ,
66 	HNDDMA_PID_RXCTL_MOW,
67 	HNDDMA_M2M_RXBUF_RAW /* rx buffers are raw buffers, not lbufs/lfrags */
68 };
69 
70 #define SPLIT_FIFO_0	1
71 #define SPLIT_FIFO_1	2
72 
73 typedef void (*setup_context_t)(void *ctx, void *p, uint8 **desc0, uint16 *len0,
74 	uint8 **desc1, uint16 *len1);
75 
76 /**
77  * Exported data structure (read-only)
78  */
79 /* export structure */
80 struct hnddma_pub {
81 	uint		dmastflags;	/* dma status flags */
82 	uint		dmactrlflags;	/**< dma control flags */
83 
84 	/* rx error counters */
85 	uint		rxgiants;	/**< rx giant frames */
86 	uint		rxnobuf;	/**< rx out of dma descriptors */
87 	/* tx error counters */
88 	uint		txnobuf;	/**< tx out of dma descriptors */
89 	uint		txnodesc;	/**< tx out of dma descriptors running count */
90 };
91 
92 /* DMA status flags */
93 #define BCM_DMA_STF_RX	(1u << 0u)	/* the channel is RX DMA */
94 
95 typedef struct dma_common dma_common_t;
96 typedef struct dma_dd_pool dma_dd_pool_t;
97 
98 /* Flags for dma_attach_ext function */
99 #define BCM_DMA_IND_INTF_FLAG		0x00000001	/* set for using INDIRECT DMA INTERFACE */
100 #define BCM_DMA_DESC_ONLY_FLAG		0x00000002	/* For DMA that posts descriptors only and
101 							 * no packets
102 							 */
103 #define BCM_DMA_CHAN_SWITCH_EN		0x00000008	/* for d11 corerev 64+ to help arbitrate
104 							 * btw dma channels.
105 							 */
106 #define BCM_DMA_ROEXT_SUPPORT		0x00000010	/* for d11 corerev 128+ to support receive
107 							 * frame offset >=128B and <= 255B
108 							 */
109 #define BCM_DMA_RX_ALIGN_8BYTE		0x00000020	/* RXDMA address 8-byte aligned */
110 #define BCM_DMA_DESC_SHARED_POOL	0x00000100	/* For TX DMA that uses shared desc pool */
111 #define BCM_DMA_RXP_LIST		0x00000200      /* linked list for RXP instead of array */
112 
113 typedef int (*rxpkt_error_check_t)(const void* ctx, void* pkt);
114 
115 extern dma_common_t * dma_common_attach(osl_t *osh, volatile uint32 *indqsel,
116 	volatile uint32 *suspreq, volatile uint32 *flushreq, rxpkt_error_check_t cb, void *ctx);
117 extern void dma_common_detach(dma_common_t *dmacommon);
118 extern void dma_common_set_ddpool_ctx(dma_common_t *dmacommon, void *desc_pool);
119 extern void * dma_common_get_ddpool_ctx(dma_common_t *dmacommon, void **va);
120 extern bool dma_check_last_desc(hnddma_t *dmah);
121 extern void dma_txfrwd(hnddma_t *dmah);
122 
123 #ifdef BCM_DMA_INDIRECT
124 /* Use indirect registers for non-ctmode */
125 #define DMA_INDQSEL_IA	(1 << 31)
126 extern void dma_set_indqsel(hnddma_t *di, bool force);
127 extern bool dma_is_indirect(hnddma_t *dmah);
128 #else
129 #define dma_set_indqsel(a, b)
130 #define dma_is_indirect(a)	FALSE
131 #endif /* #ifdef BCM_DMA_INDIRECT */
132 
133 extern hnddma_t * dma_attach_ext(dma_common_t *dmac, osl_t *osh, const char *name, si_t *sih,
134 	volatile void *dmaregstx, volatile void *dmaregsrx, uint32 flags, uint8 qnum,
135 	uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset,
136 	uint *msg_level, uint coreunit);
137 
138 extern hnddma_t * dma_attach(osl_t *osh, const char *name, si_t *sih,
139 	volatile void *dmaregstx, volatile void *dmaregsrx,
140 	uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost,
141 	uint rxoffset, uint *msg_level);
142 
143 void dma_rx_desc_init(hnddma_t *dmah, uint rxfifo);
144 void dma_detach(hnddma_t *dmah);
145 bool dma_txreset(hnddma_t *dmah);
146 bool dma_rxreset(hnddma_t *dmah);
147 bool dma_rxidle(hnddma_t *dmah);
148 void dma_txinit(hnddma_t *dmah);
149 bool dma_txenabled(hnddma_t *dmah);
150 void dma_rxinit(hnddma_t *dmah);
151 void dma_txsuspend(hnddma_t *dmah);
152 void dma_txresume(hnddma_t *dmah);
153 bool dma_txsuspended(hnddma_t *dmah);
154 bool dma_txsuspendedidle(hnddma_t *dmah);
155 void dma_txflush(hnddma_t *dmah);
156 void dma_txflush_clear(hnddma_t *dmah);
157 int dma_txfast_ext(hnddma_t *dmah, void *p0, bool commit, uint16 *pre_txout, uint16 *numd);
158 int dma_txfast_alfrag(hnddma_t *dmah, hnddma_t *aqm_dmah, void *p, bool commit, dma64dd_t *aqmdesc,
159 	uint d11_txh_len, bool ptxd_hw_enab);
160 #define dma_txfast(dmah, p0, commit) \
161 		dma_txfast_ext((dmah), (p0), (commit), NULL, NULL)
162 void dma_txcommit(hnddma_t *dmah);
163 int dma_txunframed(hnddma_t *dmah, void *buf, uint len, bool commit);
164 void *dma_getpos(hnddma_t *dmah, bool direction);
165 void dma_fifoloopbackenable(hnddma_t *dmah);
166 void dma_fifoloopbackdisable(hnddma_t *dmah);
167 bool dma_txstopped(hnddma_t *dmah);
168 bool dma_rxstopped(hnddma_t *dmah);
169 void dma_rxenable(hnddma_t *dmah);
170 bool dma_rxenabled(hnddma_t *dmah);
171 void *dma_rx(hnddma_t *dmah);
172 #ifdef APP_RX
173 void dma_getnextrxp_app(hnddma_t *dmah, bool forceall, uint *pktcnt,
174 	void **head, void **tail);
175 void dma_rxfill_haddr_getparams(hnddma_t *dmah, uint *nrxd, uint16 *rxout,
176 	dma64dd_t **ddring, uint *rxextrahdrroom, uint32 **rxpktid);
177 void dma_rxfill_haddr_setparams(hnddma_t *dmah, uint16 rxout);
178 #endif /* APP_RX */
179 uint dma_rx_get_rxoffset(hnddma_t *dmah);
180 bool dma_rxfill(hnddma_t *dmah);
181 bool dma_rxfill_required(hnddma_t *dmah);
182 void dma_txreclaim(hnddma_t *dmah, txd_range_t range);
183 void dma_rxreclaim(hnddma_t *dmah);
184 #define _DMA_GETUINTVARPTR_
185 uint *dma_getuintvarptr(hnddma_t *dmah, const char *name);
186 uint8 dma_getuint8var(hnddma_t *dmah, const char *name);
187 uint16 dma_getuint16var(hnddma_t *dmah, const char *name);
188 uint32 dma_getuint32var(hnddma_t *dmah, const char *name);
189 void * dma_getnexttxp(hnddma_t *dmah, txd_range_t range);
190 void * dma_getnextp(hnddma_t *dmah);
191 void * dma_getnextrxp(hnddma_t *dmah, bool forceall);
192 void * dma_peeknexttxp(hnddma_t *dmah, txd_range_t range);
193 int dma_peekntxp(hnddma_t *dmah, int *len, void *txps[], txd_range_t range);
194 void * dma_peeknextrxp(hnddma_t *dmah);
195 void dma_rxparam_get(hnddma_t *dmah, uint16 *rxoffset, uint16 *rxbufsize);
196 bool dma_is_rxfill_suspend(hnddma_t *dmah);
197 void dma_txblock(hnddma_t *dmah);
198 void dma_txunblock(hnddma_t *dmah);
199 uint dma_txactive(hnddma_t *dmah);
200 uint dma_rxactive(hnddma_t *dmah);
201 void dma_txrotate(hnddma_t *dmah);
202 void dma_counterreset(hnddma_t *dmah);
203 uint dma_ctrlflags(hnddma_t *dmah, uint mask, uint flags);
204 uint dma_txpending(hnddma_t *dmah);
205 uint dma_txcommitted(hnddma_t *dmah);
206 int dma_pktpool_set(hnddma_t *dmah, pktpool_t *pool);
207 int dma_rxdatapool_set(hnddma_t *dmah, pktpool_t *pktpool);
208 pktpool_t *dma_rxdatapool_get(hnddma_t *dmah);
209 
210 void dma_dump_txdmaregs(hnddma_t *dmah, uint32 **buf);
211 void dma_dump_rxdmaregs(hnddma_t *dmah, uint32 **buf);
212 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_DMA)
213 void dma_dump(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
214 void dma_dumptx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
215 void dma_dumprx(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
216 #endif
217 bool dma_rxtxerror(hnddma_t *dmah, bool istx);
218 void dma_burstlen_set(hnddma_t *dmah, uint8 rxburstlen, uint8 txburstlen);
219 uint dma_avoidance_cnt(hnddma_t *dmah);
220 void dma_param_set(hnddma_t *dmah, uint16 paramid, uint16 paramval);
221 void dma_param_get(hnddma_t *dmah, uint16 paramid, uint *paramval);
222 void dma_context(hnddma_t *dmah, setup_context_t fn, void *ctx);
223 
224 bool dma_glom_enable(hnddma_t *dmah, uint32 val);
225 uint dma_activerxbuf(hnddma_t *dmah);
226 bool dma_rxidlestatus(hnddma_t *dmah);
227 uint dma_get_rxpost(hnddma_t *dmah);
228 
229 /* return addresswidth allowed
230  * This needs to be done after SB attach but before dma attach.
231  * SB attach provides ability to probe backplane and dma core capabilities
232  * This info is needed by DMA_ALLOC_CONSISTENT in dma attach
233  */
234 extern uint dma_addrwidth(si_t *sih, void *dmaregs);
235 
236 /* count the number of tx packets that are queued to the dma ring */
237 extern uint dma_txp(hnddma_t *di);
238 
239 extern void dma_txrewind(hnddma_t *di);
240 
241 /* pio helpers */
242 extern int dma_msgbuf_txfast(hnddma_t *di, dma64addr_t p0, bool com, uint32 ln, bool fst, bool lst);
243 extern int dma_ptrbuf_txfast(hnddma_t *dmah, dma64addr_t p0, void *p, bool commit,
244 	uint32 len, bool first, bool last);
245 
246 extern int dma_rxfast(hnddma_t *di, dma64addr_t p, uint32 len);
247 extern int dma_rxfill_suspend(hnddma_t *dmah, bool suspended);
248 extern void dma_link_handle(hnddma_t *dmah1, hnddma_t *dmah2);
249 extern void dma_unlink_handle(hnddma_t *dmah1, hnddma_t *dmah2);
250 extern int dma_rxfill_unframed(hnddma_t *di, void *buf, uint len, bool commit);
251 
252 extern uint16 dma_get_next_txd_idx(hnddma_t *di, bool txout);
253 extern uint16 dma_get_txd_count(hnddma_t *dmah, uint16 start, bool txout);
254 extern uintptr dma_get_txd_addr(hnddma_t *di, uint16 idx);
255 
256 /* returns the memory address (hi and low) of the buffer associated with the dma descriptor
257  * having index idx.
258  */
259 extern void dma_get_txd_memaddr(hnddma_t *dmah, uint32 *addrlo, uint32 *addrhi, uint idx);
260 
261 extern int dma_txdesc(hnddma_t *dmah, dma64dd_t *dd, bool commit);
262 extern int dma_nexttxdd(hnddma_t *dmah, txd_range_t range, uint32 *flags1, uint32 *flags2,
263 	bool advance);
264 
265 extern void dma_update_rxfill(hnddma_t *dmah);
266 extern void dma_rxchan_reset(hnddma_t *di);
267 extern void dma_txchan_reset(hnddma_t *di);
268 extern void dma_chan_reset(hnddma_t *dmah);
269 extern pktpool_t* dma_pktpool_get(hnddma_t *dmah);
270 extern void dma_clearrxp(hnddma_t *dmah);
271 extern void dma_cleartxp(hnddma_t *dmah);
272 
273 #define dma_getnexttxdd(dmah, range, flags1, flags2) \
274 		dma_nexttxdd((dmah), (range), (flags1), (flags2), TRUE)
275 
276 #define dma_peeknexttxdd(dmah, range, flags1, flags2) \
277 		dma_nexttxdd((dmah), (range), (flags1), (flags2), FALSE)
278 
279 #define NUM_VEC_PCIE	4
280 
281 #define XFER_FROM_LBUF	0x1
282 #define XFER_TO_LBUF	0x2
283 #define XFER_INJ_ERR	0x4
284 
285 typedef struct m2m_vec_s {
286 	dma64addr_t	addr;
287 	uint32		len;
288 } m2m_vec_t;
289 
290 typedef struct m2m_desc_s {
291 	uint8		num_rx_vec;
292 	uint8		num_tx_vec;
293 	uint8		flags;
294 	bool		commit;
295 	m2m_vec_t	vec[];
296 } m2m_desc_t;
297 
298 #define INIT_M2M_DESC(desc) \
299 {\
300 	desc->num_rx_vec = 0;	\
301 	desc->num_tx_vec = 0;	\
302 	desc->flags = 0;	\
303 	desc->commit = TRUE;	\
304 }
305 
306 #define SETUP_RX_DESC(desc, rxaddr, rxlen) \
307 {\
308 	ASSERT(desc->num_tx_vec == 0);	\
309 	desc->vec[desc->num_rx_vec].addr = rxaddr;	\
310 	desc->vec[desc->num_rx_vec].len = rxlen;	\
311 	desc->num_rx_vec++;	\
312 }
313 
314 #define SETUP_TX_DESC(desc, txaddr, txlen) \
315 {\
316 	desc->vec[desc->num_tx_vec + desc->num_rx_vec].addr = txaddr;	\
317 	desc->vec[desc->num_tx_vec + desc->num_rx_vec].len = txlen;	\
318 	desc->num_tx_vec++;	\
319 }
320 
321 #define SETUP_XFER_FLAGS(desc, flag) \
322 {\
323 	desc->flags |= flag;	\
324 }
325 
326 #define DD_IS_SHARED_POOL(di) ((di)->dmactrlflags & DMA_CTRL_SHARED_POOL)
327 
328 extern int dma_m2m_submit(hnddma_t *dmah, m2m_desc_t *desc, bool implicit);
329 extern void dma_chan_enable(hnddma_t *dmah, bool enable);
330 
331 extern bool dma_rxfill_p(hnddma_t *dmah, void *p);
332 extern void dma_aqm_di_link(hnddma_t *dmah_aqm, hnddma_t *dmah_hw);
333 extern void dma_dump_aqminfo(hnddma_t * dmah, struct bcmstrbuf *b, uint16 fifonum);
334 
335 /* To dump ntxd and nrxd from the DMA ring */
336 void dma_dump_info(hnddma_t *dmah, uint16 fifonum, struct bcmstrbuf *b);
337 
338 #endif	/* _hnddma_h_ */
339