xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_msgbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
7  *
8  * Copyright (C) 1999-2017, Broadcom Corporation
9  *
10  *      Unless you and Broadcom execute a separate written software license
11  * agreement governing use of this software, this software is licensed to you
12  * under the terms of the GNU General Public License version 2 (the "GPL"),
13  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
14  * following added to such license:
15  *
16  *      As a special exception, the copyright holders of this software give you
17  * permission to link this software with independent modules, and to copy and
18  * distribute the resulting executable under terms of your choice, provided that
19  * you also meet, for each linked independent module, the terms and conditions of
20  * the license of that module.  An independent module is a module which is not
21  * derived from this software.  The special exception does not apply to any
22  * modifications of the software.
23  *
24  *      Notwithstanding the above, under no circumstances may you combine this
25  * software in any way with any other Broadcom software provided under a license
26  * other than the GPL, without Broadcom's express prior written consent.
27  *
28  *
29  * <<Broadcom-WL-IPTag/Open:>>
30  *
31  * $Id: dhd_msgbuf.c 701962 2017-05-30 06:13:15Z $
32  */
33 
34 #include <typedefs.h>
35 #include <osl.h>
36 
37 #include <bcmutils.h>
38 #include <bcmmsgbuf.h>
39 #include <bcmendian.h>
40 #include <bcmstdlib_s.h>
41 
42 #include <dngl_stats.h>
43 #include <dhd.h>
44 #include <dhd_proto.h>
45 
46 #include <dhd_bus.h>
47 
48 #include <dhd_dbg.h>
49 #include <siutils.h>
50 #include <dhd_debug.h>
51 
52 #include <dhd_flowring.h>
53 
54 #include <pcie_core.h>
55 #include <bcmpcie.h>
56 #include <dhd_pcie.h>
57 
58 #if defined(DHD_LB)
59 #include <linux/cpu.h>
60 #include <bcm_ring.h>
61 #define DHD_LB_WORKQ_SZ			    (8192)
62 #define DHD_LB_WORKQ_SYNC           (16)
63 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
64 #endif /* DHD_LB */
65 
66 #include <etd.h>
67 #include <hnd_debug.h>
68 #include <bcmtlv.h>
69 #include <hnd_armtrap.h>
70 #include <dnglevent.h>
71 
72 #ifdef DHD_PKT_LOGGING
73 #include <dhd_pktlog.h>
74 #include <dhd_linux_pktdump.h>
75 #endif /* DHD_PKT_LOGGING */
76 #ifdef DHD_EWPR_VER2
77 #include <dhd_bitpack.h>
78 #endif /* DHD_EWPR_VER2 */
79 
80 extern char dhd_version[];
81 extern char fw_version[];
82 
83 /**
84  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
85  * address where a value must be written. Host may also interrupt coalescing
86  * on this soft doorbell.
87  * Use Case: Hosts with network processors, may register with the dongle the
88  * network processor's thread wakeup register and a value corresponding to the
89  * core/thread context. Dongle will issue a write transaction <address,value>
90  * to the PCIE RC which will need to be routed to the mapped register space, by
91  * the host.
92  */
93 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
94 
95 /* Dependency Check */
96 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
97 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
98 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
99 
100 #define RETRIES 2		/* # of retries to retrieve matching ioctl response */
101 
102 #define DEFAULT_RX_BUFFERS_TO_POST	256
103 #define RXBUFPOST_THRESHOLD			32
104 #define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
105 
106 #define DHD_STOP_QUEUE_THRESHOLD	200
107 #define DHD_START_QUEUE_THRESHOLD	100
108 
109 #define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
110 #define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
111 
112 /* flags for ioctl pending status */
113 #define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
114 #define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
115 
116 #define DHD_IOCTL_REQ_PKTBUFSZ		2048
117 #define MSGBUF_IOCTL_MAX_RQSTLEN	(DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
118 
119 #define DMA_ALIGN_LEN		4
120 
121 #define DMA_D2H_SCRATCH_BUF_LEN	8
122 #define DMA_XFER_LEN_LIMIT	0x400000
123 
124 #ifdef BCM_HOST_BUF
125 #ifndef DMA_HOST_BUFFER_LEN
126 #define DMA_HOST_BUFFER_LEN	0x200000
127 #endif // endif
128 #endif /* BCM_HOST_BUF */
129 
130 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
131 
132 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
133 #define DHD_FLOWRING_MAX_EVENTBUF_POST			32
134 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
135 #define DHD_H2D_INFORING_MAX_BUF_POST			32
136 #define DHD_MAX_TSBUF_POST			8
137 
138 #define DHD_PROT_FUNCS	43
139 
140 /* Length of buffer in host for bus throughput measurement */
141 #define DHD_BUS_TPUT_BUF_LEN 2048
142 
143 #define TXP_FLUSH_NITEMS
144 
145 /* optimization to write "n" tx items at a time to ring */
146 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
147 
148 #define RING_NAME_MAX_LENGTH		24
149 #define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
150 /* Giving room before ioctl_trans_id rollsover. */
151 #define BUFFER_BEFORE_ROLLOVER 300
152 
153 /* 512K memory + 32K registers */
154 #define SNAPSHOT_UPLOAD_BUF_SIZE	((512 + 32) * 1024)
155 
156 struct msgbuf_ring; /* ring context for common and flow rings */
157 
158 /**
159  * PCIE D2H DMA Complete Sync Modes
160  *
161  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
162  * Host system memory. A WAR using one of 3 approaches is needed:
163  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
164  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
165  *    writes in the last word of each work item. Each work item has a seqnum
166  *    number = sequence num % 253.
167  *
168  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
169  *    interrupt, ensuring that D2H data transfer indeed completed.
170  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
171  *    ring contents before the indices.
172  *
173  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
174  * callback (see dhd_prot_d2h_sync_none) may be bound.
175  *
176  * Dongle advertizes host side sync mechanism requirements.
177  */
178 
179 #define PCIE_D2H_SYNC_WAIT_TRIES    (512U)
180 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5U)
181 #define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
182 
183 #define HWA_DB_TYPE_RXPOST	(0x0050)
184 #define HWA_DB_TYPE_TXCPLT	(0x0060)
185 #define HWA_DB_TYPE_RXCPLT	(0x0170)
186 #define HWA_DB_INDEX_VALUE(val)	((uint32)(val) << 16)
187 
188 #define HWA_ENAB_BITMAP_RXPOST	(1U << 0)	/* 1A */
189 #define HWA_ENAB_BITMAP_RXCPLT	(1U << 1)	/* 2B */
190 #define HWA_ENAB_BITMAP_TXCPLT	(1U << 2)	/* 4B */
191 
192 /**
193  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
194  *
195  * On success: return cmn_msg_hdr_t::msg_type
196  * On failure: return 0 (invalid msg_type)
197  */
198 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
199                                 volatile cmn_msg_hdr_t *msg, int msglen);
200 
201 /**
202  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
203  * For EDL messages.
204  *
205  * On success: return cmn_msg_hdr_t::msg_type
206  * On failure: return 0 (invalid msg_type)
207  */
208 #ifdef EWP_EDL
209 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
210                                 volatile cmn_msg_hdr_t *msg);
211 #endif /* EWP_EDL */
212 
213 /*
214  * +----------------------------------------------------------------------------
215  *
216  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
217  * flowids do not.
218  *
219  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
220  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
221  *
222  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
223  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
224  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
225  *
226  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
227  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
228  *
229  *  D2H Control  Complete RingId = 2
230  *  D2H Transmit Complete RingId = 3
231  *  D2H Receive  Complete RingId = 4
232  *
233  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
234  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
235  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
236  *
237  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
238  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
239  *
240  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
241  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
242  * FlowId values would be in the range [2..133] and the corresponding
243  * RingId values would be in the range [5..136].
244  *
245  * The flowId allocator, may chose to, allocate Flowids:
246  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
247  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
248  *   packet's access category (e.g. 4 uc flowids per station).
249  *
250  * CAUTION:
251  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
252  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
253  * since the FlowId truly represents the index in the H2D DMA indices array.
254  *
255  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
256  * will represent the index in the D2H DMA indices array.
257  *
258  * +----------------------------------------------------------------------------
259  */
260 
261 /* First TxPost Flowring Id */
262 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
263 
264 /* Determine whether a ringid belongs to a TxPost flowring */
265 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
266 	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
267 	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
268 
269 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
270 #define DHD_FLOWID_TO_RINGID(flowid) \
271 	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
272 
273 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
274 #define DHD_RINGID_TO_FLOWID(ringid) \
275 	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
276 
277 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
278  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
279  * any array of H2D rings.
280  */
281 #define DHD_H2D_RING_OFFSET(ringid) \
282 	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
283 
284 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
285  * This may be used for IFRM.
286  */
287 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
288 	((ringid) - BCMPCIE_COMMON_MSGRINGS)
289 
290 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
291  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
292  * any array of D2H rings.
293  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
294  * max_h2d_rings: total number of h2d rings
295  */
296 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
297 	((ringid) > (max_h2d_rings) ? \
298 		((ringid) - max_h2d_rings) : \
299 		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
300 
301 /* Convert a D2H DMA Indices Offset to a RingId */
302 #define DHD_D2H_RINGID(offset) \
303 	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
304 
305 #define DHD_DMAH_NULL      ((void*)NULL)
306 
307 /*
308  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
309  * buffer does not occupy the entire cacheline, and another object is placed
310  * following the DMA-able buffer, data corruption may occur if the DMA-able
311  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
312  * is not available.
313  */
314 #if defined(L1_CACHE_BYTES)
315 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
316 #else
317 #define DHD_DMA_PAD        (128)
318 #endif // endif
319 
320 /*
321  * +----------------------------------------------------------------------------
322  * Flowring Pool
323  *
324  * Unlike common rings, which are attached very early on (dhd_prot_attach),
325  * flowrings are dynamically instantiated. Moreover, flowrings may require a
326  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
327  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
328  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
329  *
330  * Each DMA-able buffer may be allocated independently, or may be carved out
331  * of a single large contiguous region that is registered with the protocol
332  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
333  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
334  *
335  * No flowring pool action is performed in dhd_prot_attach(), as the number
336  * of h2d rings is not yet known.
337  *
338  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
339  * determine the number of flowrings required, and a pool of msgbuf_rings are
340  * allocated and a DMA-able buffer (carved or allocated) is attached.
341  * See: dhd_prot_flowrings_pool_attach()
342  *
343  * A flowring msgbuf_ring object may be fetched from this pool during flowring
344  * creation, using the flowid. Likewise, flowrings may be freed back into the
345  * pool on flowring deletion.
346  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
347  *
348  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
349  * are detached (returned back to the carved region or freed), and the pool of
350  * msgbuf_ring and any objects allocated against it are freed.
351  * See: dhd_prot_flowrings_pool_detach()
352  *
353  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
354  * state as-if upon an attach. All DMA-able buffers are retained.
355  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
356  * pool attach will notice that the pool persists and continue to use it. This
357  * will avoid the case of a fragmented DMA-able region.
358  *
359  * +----------------------------------------------------------------------------
360  */
361 
362 /* Conversion of a flowid to a flowring pool index */
363 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
364 	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
365 
366 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
367 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
368 	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
369 	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
370 
371 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
372 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
373 	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
374 		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
375 		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
376 		 (ring)++, (flowid)++)
377 
378 /* Used in loopback tests */
379 typedef struct dhd_dmaxfer {
380 	dhd_dma_buf_t srcmem;
381 	dhd_dma_buf_t dstmem;
382 	uint32        srcdelay;
383 	uint32        destdelay;
384 	uint32        len;
385 	bool          in_progress;
386 	uint64        start_usec;
387 	uint64        time_taken;
388 	uint32        d11_lpbk;
389 	int           status;
390 } dhd_dmaxfer_t;
391 
392 /**
393  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
394  * buffer, the WR and RD indices, ring parameters such as max number of items
395  * an length of each items, and other miscellaneous runtime state.
396  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
397  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
398  * Ring parameters are conveyed to the dongle, which maintains its own peer end
399  * ring state. Depending on whether the DMA Indices feature is supported, the
400  * host will update the WR/RD index in the DMA indices array in host memory or
401  * directly in dongle memory.
402  */
403 typedef struct msgbuf_ring {
404 	bool           inited;
405 	uint16         idx;       /* ring id */
406 	uint16         rd;        /* read index */
407 	uint16         curr_rd;   /* read index for debug */
408 	uint16         wr;        /* write index */
409 	uint16         max_items; /* maximum number of items in ring */
410 	uint16         item_len;  /* length of each item in the ring */
411 	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
412 	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
413 	uint32         seqnum;    /* next expected item's sequence number */
414 #ifdef TXP_FLUSH_NITEMS
415 	void           *start_addr;
416 	/* # of messages on ring not yet announced to dongle */
417 	uint16         pend_items_count;
418 #endif /* TXP_FLUSH_NITEMS */
419 
420 	uint8   ring_type;
421 	uint16  hwa_db_type;	  /* hwa type non-zero for Data path rings */
422 	uint8   n_completion_ids;
423 	bool    create_pending;
424 	uint16  create_req_id;
425 	uint8   current_phase;
426 	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
427 	uchar		name[RING_NAME_MAX_LENGTH];
428 	uint32		ring_mem_allocated;
429 	void	*ring_lock;
430 } msgbuf_ring_t;
431 
432 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
433 #define DHD_RING_END_VA(ring) \
434 	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
435 	 (((ring)->max_items - 1) * (ring)->item_len))
436 
437 /* This can be overwritten by module parameter defined in dhd_linux.c
438  * or by dhd iovar h2d_max_txpost.
439  */
440 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
441 
442 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
443 typedef struct dhd_prot {
444 	osl_t *osh;		/* OSL handle */
445 	uint16 rxbufpost_sz;
446 	uint16 rxbufpost;
447 	uint16 max_rxbufpost;
448 	uint16 max_eventbufpost;
449 	uint16 max_ioctlrespbufpost;
450 	uint16 max_tsbufpost;
451 	uint16 max_infobufpost;
452 	uint16 infobufpost;
453 	uint16 cur_event_bufs_posted;
454 	uint16 cur_ioctlresp_bufs_posted;
455 	uint16 cur_ts_bufs_posted;
456 
457 	/* Flow control mechanism based on active transmits pending */
458 	osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
459 	uint16 h2d_max_txpost;
460 	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
461 
462 	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
463 	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
464 	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
465 	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
466 	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
467 	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
468 	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
469 	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
470 	msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
471 
472 	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
473 	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
474 	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
475 
476 	uint32		rx_dataoffset;
477 
478 	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
479 	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
480 
481 	/* ioctl related resources */
482 	uint8 ioctl_state;
483 	int16 ioctl_status;		/* status returned from dongle */
484 	uint16 ioctl_resplen;
485 	dhd_ioctl_recieved_status_t ioctl_received;
486 	uint curr_ioctl_cmd;
487 	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
488 	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
489 
490 	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
491 
492 	/* DMA-able arrays for holding WR and RD indices */
493 	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
494 	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
495 	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
496 	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
497 	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
498 	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
499 
500 	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
501 
502 	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
503 	uint32			flowring_num;
504 
505 	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
506 #ifdef EWP_EDL
507 	d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
508 #endif /* EWP_EDL */
509 	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
510 	ulong d2h_sync_wait_tot; /* total wait loops */
511 
512 	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
513 
514 	uint16		ioctl_seq_no;
515 	uint16		data_seq_no;
516 	uint16		ioctl_trans_id;
517 	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
518 	void		*pktid_rx_map;	/* pktid map for rx path */
519 	void		*pktid_tx_map;	/* pktid map for tx path */
520 	bool		metadata_dbg;
521 	void		*pktid_map_handle_ioctl;
522 #ifdef DHD_MAP_PKTID_LOGGING
523 	void		*pktid_dma_map;	/* pktid map for DMA MAP */
524 	void		*pktid_dma_unmap; /* pktid map for DMA UNMAP */
525 #endif /* DHD_MAP_PKTID_LOGGING */
526 	uint32		pktid_depleted_cnt;	/* pktid depleted count */
527 	/* netif tx queue stop count */
528 	uint8		pktid_txq_stop_cnt;
529 	/* netif tx queue start count */
530 	uint8		pktid_txq_start_cnt;
531 	uint64		ioctl_fillup_time;	/* timestamp for ioctl fillup */
532 	uint64		ioctl_ack_time;		/* timestamp for ioctl ack */
533 	uint64		ioctl_cmplt_time;	/* timestamp for ioctl completion */
534 
535 	/* Applications/utilities can read tx and rx metadata using IOVARs */
536 	uint16		rx_metadata_offset;
537 	uint16		tx_metadata_offset;
538 
539 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
540 	/* Host's soft doorbell configuration */
541 	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
542 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
543 
544 	/* Work Queues to be used by the producer and the consumer, and threshold
545 	 * when the WRITE index must be synced to consumer's workq
546 	 */
547 #if defined(DHD_LB_TXC)
548 	uint32 tx_compl_prod_sync ____cacheline_aligned;
549 	bcm_workq_t tx_compl_prod, tx_compl_cons;
550 #endif /* DHD_LB_TXC */
551 #if defined(DHD_LB_RXC)
552 	uint32 rx_compl_prod_sync ____cacheline_aligned;
553 	bcm_workq_t rx_compl_prod, rx_compl_cons;
554 #endif /* DHD_LB_RXC */
555 
556 	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
557 
558 	uint32  host_ipc_version; /* Host sypported IPC rev */
559 	uint32  device_ipc_version; /* FW supported IPC rev */
560 	uint32  active_ipc_version; /* Host advertised IPC rev */
561 	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
562 	bool    hostts_req_buf_inuse;
563 	bool    rx_ts_log_enabled;
564 	bool    tx_ts_log_enabled;
565 	bool no_retry;
566 	bool no_aggr;
567 	bool fixed_rate;
568 	dhd_dma_buf_t	host_scb_buf;	/* scb host offload buffer */
569 #ifdef DHD_HP2P
570 	msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
571 	msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
572 #endif /* DHD_HP2P */
573 	bool no_tx_resource;
574 } dhd_prot_t;
575 
576 #ifdef DHD_EWPR_VER2
577 #define HANG_INFO_BASE64_BUFFER_SIZE 640
578 #endif // endif
579 
580 #ifdef DHD_DUMP_PCIE_RINGS
581 static
582 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
583 	const void *user_buf, unsigned long *file_posn);
584 #ifdef EWP_EDL
585 static
586 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
587 	unsigned long *file_posn);
588 #endif /* EWP_EDL */
589 #endif /* DHD_DUMP_PCIE_RINGS */
590 
591 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
592 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
593 /* Convert a dmaaddr_t to a base_addr with htol operations */
594 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
595 
596 /* APIs for managing a DMA-able buffer */
597 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
598 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
599 
600 /* msgbuf ring management */
601 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
602 	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
603 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
604 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
605 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
606 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
607 
608 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
609 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
610 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
611 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
612 
613 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
614 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
615 	uint16 flowid);
616 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
617 
618 /* Producer: Allocate space in a msgbuf ring */
619 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
620 	uint16 nitems, uint16 *alloced, bool exactly_nitems);
621 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
622 	uint16 *alloced, bool exactly_nitems);
623 
624 /* Consumer: Determine the location where the next message may be consumed */
625 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
626 	uint32 *available_len);
627 
628 /* Producer (WR index update) or Consumer (RD index update) indication */
629 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
630 	void *p, uint16 len);
631 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
632 
633 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
634 	dhd_dma_buf_t *dma_buf, uint32 bufsz);
635 
636 /* Set/Get a RD or WR index in the array of indices */
637 /* See also: dhd_prot_dma_indx_init() */
638 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
639 	uint16 ringid);
640 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
641 
642 /* Locate a packet given a pktid */
643 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
644 	bool free_pktid);
645 /* Locate a packet given a PktId and free it. */
646 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
647 
648 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
649 	void *buf, uint len, uint8 action);
650 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
651 	void *buf, uint len, uint8 action);
652 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
653 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
654 	void *buf, int ifidx);
655 
656 /* Post buffers for Rx, control ioctl response and events */
657 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
658 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
659 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
660 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
661 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
662 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
663 
664 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
665 
666 /* D2H Message handling */
667 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
668 
669 /* D2H Message handlers */
670 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
671 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
672 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
673 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
674 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
675 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
676 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
677 
678 /* Loopback test with dongle */
679 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
680 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
681 	uint destdelay, dhd_dmaxfer_t *dma);
682 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
683 
684 /* Flowring management communication with dongle */
685 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
686 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
687 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
688 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
689 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
690 
691 /* Monitor Mode */
692 #ifdef WL_MONITOR
693 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
694 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
695 #endif /* WL_MONITOR */
696 
697 /* Configure a soft doorbell per D2H ring */
698 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
699 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
700 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
701 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
702 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
703 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
704 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
705 #ifdef DHD_HP2P
706 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
707 #endif /* DHD_HP2P */
708 #ifdef EWP_EDL
709 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
710 #endif // endif
711 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
712 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
713 
714 #ifdef DHD_HP2P
715 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
716 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
717 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
718 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
719 #endif // endif
720 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
721 
722 /** callback functions for messages generated by the dongle */
723 #define MSG_TYPE_INVALID 0
724 
725 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
726 	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
727 	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
728 	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
729 	NULL,
730 	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
731 	NULL,
732 	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
733 	NULL,
734 	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
735 	NULL,
736 	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
737 	NULL,
738 	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
739 	NULL,
740 	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
741 	NULL,
742 	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
743 	NULL,
744 	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
745 	NULL,
746 	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
747 	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
748 	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
749 	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
750 	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
751 	NULL, /* MSG_TYPE_INFO_BUF_POST */
752 	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
753 	NULL, /* MSG_TYPE_H2D_RING_CREATE */
754 	NULL, /* MSG_TYPE_D2H_RING_CREATE */
755 	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
756 	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
757 	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
758 	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
759 	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
760 	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
761 	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
762 	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
763 	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
764 	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
765 	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
766 	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
767 	NULL,	/* MSG_TYPE_SNAPSHOT_UPLOAD */
768 	dhd_prot_process_snapshot_complete,	/* MSG_TYPE_SNAPSHOT_CMPLT */
769 };
770 
771 #ifdef DHD_RX_CHAINING
772 
773 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
774 	(dhd_wet_chainable(dhd) && \
775 	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
776 	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
777 	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
778 	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
779 	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
780 	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
781 	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
782 	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
783 
784 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
785 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
786 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
787 
788 #define DHD_PKT_CTF_MAX_CHAIN_LEN	64
789 
790 #endif /* DHD_RX_CHAINING */
791 
792 #define DHD_LPBKDTDUMP_ON()	(dhd_msg_level & DHD_LPBKDTDUMP_VAL)
793 
794 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
795 
796 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)797 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
798 {
799 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
800 	uint16 rd, wr;
801 	bool ret;
802 
803 	if (dhd->dma_d2h_ring_upd_support) {
804 		wr = flow_ring->wr;
805 	} else {
806 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
807 	}
808 	if (dhd->dma_h2d_ring_upd_support) {
809 		rd = flow_ring->rd;
810 	} else {
811 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
812 	}
813 	ret = (wr == rd) ? TRUE : FALSE;
814 	return ret;
815 }
816 
817 void
dhd_prot_dump_ring_ptrs(void * prot_info)818 dhd_prot_dump_ring_ptrs(void *prot_info)
819 {
820 	msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
821 	DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
822 		ring->curr_rd, ring->rd, ring->wr));
823 }
824 
825 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)826 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
827 {
828 	return (uint16)h2d_max_txpost;
829 }
830 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)831 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
832 {
833 	h2d_max_txpost = max_txpost;
834 }
835 /**
836  * D2H DMA to completion callback handlers. Based on the mode advertised by the
837  * dongle through the PCIE shared region, the appropriate callback will be
838  * registered in the proto layer to be invoked prior to precessing any message
839  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
840  * does not require host participation, then a noop callback handler will be
841  * bound that simply returns the msg_type.
842  */
843 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
844                                        uint32 tries, volatile uchar *msg, int msglen);
845 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
846                                       volatile cmn_msg_hdr_t *msg, int msglen);
847 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
848                                        volatile cmn_msg_hdr_t *msg, int msglen);
849 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
850                                     volatile cmn_msg_hdr_t *msg, int msglen);
851 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
852 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
853 	uint16 ring_type, uint32 id);
854 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
855 	uint8 type, uint32 id);
856 
857 /**
858  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
859  * not completed, a livelock condition occurs. Host will avert this livelock by
860  * dropping this message and moving to the next. This dropped message can lead
861  * to a packet leak, or even something disastrous in the case the dropped
862  * message happens to be a control response.
863  * Here we will log this condition. One may choose to reboot the dongle.
864  *
865  */
866 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)867 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
868                            volatile uchar *msg, int msglen)
869 {
870 	uint32 ring_seqnum = ring->seqnum;
871 
872 	if (dhd_query_bus_erros(dhd)) {
873 		return;
874 	}
875 
876 	DHD_ERROR((
877 		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
878 		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
879 		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
880 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
881 		ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
882 
883 	dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
884 
885 	/* Try to resume if already suspended or suspend in progress */
886 #ifdef DHD_PCIE_RUNTIMEPM
887 	dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
888 #endif /* DHD_PCIE_RUNTIMEPM */
889 
890 	/* Skip if still in suspended or suspend in progress */
891 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
892 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
893 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
894 		goto exit;
895 	}
896 
897 	dhd_bus_dump_console_buffer(dhd->bus);
898 	dhd_prot_debug_info_print(dhd);
899 
900 #ifdef DHD_FW_COREDUMP
901 	if (dhd->memdump_enabled) {
902 		/* collect core dump */
903 		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
904 		dhd_bus_mem_dump(dhd);
905 	}
906 #endif /* DHD_FW_COREDUMP */
907 
908 exit:
909 	dhd_schedule_reset(dhd);
910 
911 #ifdef OEM_ANDROID
912 #ifdef SUPPORT_LINKDOWN_RECOVERY
913 #ifdef CONFIG_ARCH_MSM
914 	dhd->bus->no_cfg_restore = 1;
915 #endif /* CONFIG_ARCH_MSM */
916 	dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
917 	dhd_os_send_hang_message(dhd);
918 #endif /* SUPPORT_LINKDOWN_RECOVERY */
919 #endif /* OEM_ANDROID */
920 	dhd->livelock_occured = TRUE;
921 }
922 
923 /**
924  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
925  * mode. Sequence number is always in the last word of a message.
926  */
927 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)928 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
929                          volatile cmn_msg_hdr_t *msg, int msglen)
930 {
931 	uint32 tries;
932 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
933 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
934 	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
935 	dhd_prot_t *prot = dhd->prot;
936 	uint32 msg_seqnum;
937 	uint32 step = 0;
938 	uint32 delay = PCIE_D2H_SYNC_DELAY;
939 	uint32 total_tries = 0;
940 
941 	ASSERT(msglen == ring->item_len);
942 
943 	BCM_REFERENCE(delay);
944 	/*
945 	 * For retries we have to make some sort of stepper algorithm.
946 	 * We see that every time when the Dongle comes out of the D3
947 	 * Cold state, the first D2H mem2mem DMA takes more time to
948 	 * complete, leading to livelock issues.
949 	 *
950 	 * Case 1 - Apart from Host CPU some other bus master is
951 	 * accessing the DDR port, probably page close to the ring
952 	 * so, PCIE does not get a change to update the memory.
953 	 * Solution - Increase the number of tries.
954 	 *
955 	 * Case 2 - The 50usec delay given by the Host CPU is not
956 	 * sufficient for the PCIe RC to start its work.
957 	 * In this case the breathing time of 50usec given by
958 	 * the Host CPU is not sufficient.
959 	 * Solution: Increase the delay in a stepper fashion.
960 	 * This is done to ensure that there are no
961 	 * unwanted extra delay introdcued in normal conditions.
962 	 */
963 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
964 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
965 			msg_seqnum = *marker;
966 			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
967 				ring->seqnum++; /* next expected sequence number */
968 				/* Check for LIVELOCK induce flag, which is set by firing
969 				 * dhd iovar to induce LIVELOCK error. If flag is set,
970 				 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
971 				 */
972 				if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
973 					goto dma_completed;
974 				}
975 			}
976 
977 			total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
978 
979 			if (total_tries > prot->d2h_sync_wait_max)
980 				prot->d2h_sync_wait_max = total_tries;
981 
982 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
983 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
984 			OSL_DELAY(delay * step); /* Add stepper delay */
985 
986 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
987 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
988 
989 	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
990 		(volatile uchar *) msg, msglen);
991 
992 	ring->seqnum++; /* skip this message ... leak of a pktid */
993 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
994 
995 dma_completed:
996 
997 	prot->d2h_sync_wait_tot += tries;
998 	return msg->msg_type;
999 }
1000 
1001 /**
1002  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1003  * mode. The xorcsum is placed in the last word of a message. Dongle will also
1004  * place a seqnum in the epoch field of the cmn_msg_hdr.
1005  */
1006 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1007 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1008                           volatile cmn_msg_hdr_t *msg, int msglen)
1009 {
1010 	uint32 tries;
1011 	uint32 prot_checksum = 0; /* computed checksum */
1012 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1013 	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1014 	dhd_prot_t *prot = dhd->prot;
1015 	uint32 step = 0;
1016 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1017 	uint32 total_tries = 0;
1018 
1019 	ASSERT(msglen == ring->item_len);
1020 
1021 	BCM_REFERENCE(delay);
1022 	/*
1023 	 * For retries we have to make some sort of stepper algorithm.
1024 	 * We see that every time when the Dongle comes out of the D3
1025 	 * Cold state, the first D2H mem2mem DMA takes more time to
1026 	 * complete, leading to livelock issues.
1027 	 *
1028 	 * Case 1 - Apart from Host CPU some other bus master is
1029 	 * accessing the DDR port, probably page close to the ring
1030 	 * so, PCIE does not get a change to update the memory.
1031 	 * Solution - Increase the number of tries.
1032 	 *
1033 	 * Case 2 - The 50usec delay given by the Host CPU is not
1034 	 * sufficient for the PCIe RC to start its work.
1035 	 * In this case the breathing time of 50usec given by
1036 	 * the Host CPU is not sufficient.
1037 	 * Solution: Increase the delay in a stepper fashion.
1038 	 * This is done to ensure that there are no
1039 	 * unwanted extra delay introdcued in normal conditions.
1040 	 */
1041 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1042 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1043 			/* First verify if the seqnumber has been update,
1044 			 * if yes, then only check xorcsum.
1045 			 * Once seqnum and xorcsum is proper that means
1046 			 * complete message has arrived.
1047 			 */
1048 			if (msg->epoch == ring_seqnum) {
1049 				prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1050 					num_words);
1051 				if (prot_checksum == 0U) { /* checksum is OK */
1052 					ring->seqnum++; /* next expected sequence number */
1053 					/* Check for LIVELOCK induce flag, which is set by firing
1054 					 * dhd iovar to induce LIVELOCK error. If flag is set,
1055 					 * MSG_TYPE_INVALID is returned, which results in to
1056 					 * LIVELOCK error.
1057 					 */
1058 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1059 						goto dma_completed;
1060 					}
1061 				}
1062 			}
1063 
1064 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1065 
1066 			if (total_tries > prot->d2h_sync_wait_max)
1067 				prot->d2h_sync_wait_max = total_tries;
1068 
1069 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1070 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1071 			OSL_DELAY(delay * step); /* Add stepper delay */
1072 
1073 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1074 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1075 
1076 	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1077 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1078 		(volatile uchar *) msg, msglen);
1079 
1080 	ring->seqnum++; /* skip this message ... leak of a pktid */
1081 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1082 
1083 dma_completed:
1084 
1085 	prot->d2h_sync_wait_tot += tries;
1086 	return msg->msg_type;
1087 }
1088 
1089 /**
1090  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1091  * need to try to sync. This noop sync handler will be bound when the dongle
1092  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1093  */
1094 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1095 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1096                        volatile cmn_msg_hdr_t *msg, int msglen)
1097 {
1098 	/* Check for LIVELOCK induce flag, which is set by firing
1099 	* dhd iovar to induce LIVELOCK error. If flag is set,
1100 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1101 	*/
1102 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1103 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1104 		return MSG_TYPE_INVALID;
1105 	} else {
1106 		return msg->msg_type;
1107 	}
1108 }
1109 
1110 #ifdef EWP_EDL
1111 /**
1112  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1113  * header values at both the beginning and end of the payload.
1114  * The cmn_msg_hdr_t is placed at the start and end of the payload
1115  * in each work item in the EDL ring.
1116  * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1117  * and the length of the payload in the 'request_id' field.
1118  * Structure of each work item in the EDL ring:
1119  * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1120  * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1121  * too costly on the dongle side and might take up too many ARM cycles,
1122  * hence the xorcsum sync method is not being used for EDL ring.
1123  */
1124 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1125 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1126                           volatile cmn_msg_hdr_t *msg)
1127 {
1128 	uint32 tries;
1129 	int msglen = 0, len = 0;
1130 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1131 	dhd_prot_t *prot = dhd->prot;
1132 	uint32 step = 0;
1133 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1134 	uint32 total_tries = 0;
1135 	volatile cmn_msg_hdr_t *trailer = NULL;
1136 	volatile uint8 *buf = NULL;
1137 	bool valid_msg = FALSE;
1138 
1139 	BCM_REFERENCE(delay);
1140 	/*
1141 	 * For retries we have to make some sort of stepper algorithm.
1142 	 * We see that every time when the Dongle comes out of the D3
1143 	 * Cold state, the first D2H mem2mem DMA takes more time to
1144 	 * complete, leading to livelock issues.
1145 	 *
1146 	 * Case 1 - Apart from Host CPU some other bus master is
1147 	 * accessing the DDR port, probably page close to the ring
1148 	 * so, PCIE does not get a change to update the memory.
1149 	 * Solution - Increase the number of tries.
1150 	 *
1151 	 * Case 2 - The 50usec delay given by the Host CPU is not
1152 	 * sufficient for the PCIe RC to start its work.
1153 	 * In this case the breathing time of 50usec given by
1154 	 * the Host CPU is not sufficient.
1155 	 * Solution: Increase the delay in a stepper fashion.
1156 	 * This is done to ensure that there are no
1157 	 * unwanted extra delay introdcued in normal conditions.
1158 	 */
1159 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1160 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1161 			/* First verify if the seqnumber has been updated,
1162 			 * if yes, only then validate the header and trailer.
1163 			 * Once seqnum, header and trailer have been validated, it means
1164 			 * that the complete message has arrived.
1165 			 */
1166 			valid_msg = FALSE;
1167 			if (msg->epoch == ring_seqnum &&
1168 				msg->msg_type == MSG_TYPE_INFO_PYLD &&
1169 				msg->request_id > 0 &&
1170 				msg->request_id <= ring->item_len) {
1171 				/* proceed to check trailer only if header is valid */
1172 				buf = (volatile uint8 *)msg;
1173 				msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1174 				buf += msglen;
1175 				if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1176 					trailer = (volatile cmn_msg_hdr_t *)buf;
1177 					valid_msg = (trailer->epoch == ring_seqnum) &&
1178 						(trailer->msg_type == msg->msg_type) &&
1179 						(trailer->request_id == msg->request_id);
1180 					if (!valid_msg) {
1181 						DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1182 						" expected, seqnum=%u; reqid=%u. Retrying... \n",
1183 						__FUNCTION__, trailer->epoch, trailer->request_id,
1184 						msg->epoch, msg->request_id));
1185 					}
1186 				} else {
1187 					DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1188 						__FUNCTION__, msg->request_id));
1189 				}
1190 
1191 				if (valid_msg) {
1192 					/* data is OK */
1193 					ring->seqnum++; /* next expected sequence number */
1194 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1195 						goto dma_completed;
1196 					}
1197 				}
1198 			} else {
1199 				DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1200 					" msg_type=0x%x, request_id=%u."
1201 					" Retrying...\n",
1202 					__FUNCTION__, ring_seqnum, msg->epoch,
1203 					msg->msg_type, msg->request_id));
1204 			}
1205 
1206 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1207 
1208 			if (total_tries > prot->d2h_sync_wait_max)
1209 				prot->d2h_sync_wait_max = total_tries;
1210 
1211 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1212 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1213 			OSL_DELAY(delay * step); /* Add stepper delay */
1214 
1215 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1216 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1217 
1218 	DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1219 	DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1220 		" msgtype=0x%x; expected-msgtype=0x%x"
1221 		" length=%u; expected-max-length=%u", __FUNCTION__,
1222 		msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1223 		msg->request_id, ring->item_len));
1224 	dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1225 	if (trailer && msglen > 0 &&
1226 			(msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1227 		DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1228 			" msgtype=0x%x; expected-msgtype=0x%x"
1229 			" length=%u; expected-length=%u", __FUNCTION__,
1230 			trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1231 			trailer->request_id, msg->request_id));
1232 		dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1233 			sizeof(*trailer), DHD_ERROR_VAL);
1234 	}
1235 
1236 	if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1237 		len = msglen + sizeof(cmn_msg_hdr_t);
1238 	else
1239 		len = ring->item_len;
1240 
1241 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1242 		(volatile uchar *) msg, len);
1243 
1244 	ring->seqnum++; /* skip this message */
1245 	return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1246 
1247 dma_completed:
1248 	DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1249 		msg->epoch, msg->request_id));
1250 
1251 	prot->d2h_sync_wait_tot += tries;
1252 	return BCME_OK;
1253 }
1254 
1255 /**
1256  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1257  * need to try to sync. This noop sync handler will be bound when the dongle
1258  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1259  */
1260 static int BCMFASTPATH
dhd_prot_d2h_sync_edl_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg)1261 dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1262                        volatile cmn_msg_hdr_t *msg)
1263 {
1264 	/* Check for LIVELOCK induce flag, which is set by firing
1265 	* dhd iovar to induce LIVELOCK error. If flag is set,
1266 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1267 	*/
1268 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1269 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1270 		return BCME_ERROR;
1271 	} else {
1272 		if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1273 			return BCME_OK;
1274 		else
1275 			return msg->msg_type;
1276 	}
1277 }
1278 #endif /* EWP_EDL */
1279 
1280 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1281 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1282 {
1283 	/* To synchronize with the previous memory operations call wmb() */
1284 	OSL_SMP_WMB();
1285 	dhd->prot->ioctl_received = reason;
1286 	/* Call another wmb() to make sure before waking up the other event value gets updated */
1287 	OSL_SMP_WMB();
1288 	dhd_os_ioctl_resp_wake(dhd);
1289 }
1290 
1291 /**
1292  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1293  * dongle advertizes.
1294  */
1295 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1296 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1297 {
1298 	dhd_prot_t *prot = dhd->prot;
1299 	prot->d2h_sync_wait_max = 0UL;
1300 	prot->d2h_sync_wait_tot = 0UL;
1301 
1302 	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1303 	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1304 
1305 	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1306 	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1307 
1308 	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1309 	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1310 
1311 	if (HWA_ACTIVE(dhd)) {
1312 		prot->d2hring_tx_cpln.hwa_db_type =
1313 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
1314 		prot->d2hring_rx_cpln.hwa_db_type =
1315 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
1316 		DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
1317 			__FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
1318 			prot->d2hring_rx_cpln.hwa_db_type));
1319 	}
1320 
1321 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1322 		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1323 #ifdef EWP_EDL
1324 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1325 #endif /* EWP_EDL */
1326 		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1327 	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1328 		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1329 #ifdef EWP_EDL
1330 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1331 #endif /* EWP_EDL */
1332 		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1333 	} else {
1334 		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1335 #ifdef EWP_EDL
1336 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1337 #endif /* EWP_EDL */
1338 		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1339 	}
1340 }
1341 
1342 /**
1343  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1344  */
1345 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1346 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1347 {
1348 	dhd_prot_t *prot = dhd->prot;
1349 	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1350 
1351 	if (HWA_ACTIVE(dhd)) {
1352 		prot->h2dring_rxp_subn.hwa_db_type =
1353 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
1354 		DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
1355 			__FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
1356 	}
1357 
1358 	prot->h2dring_rxp_subn.current_phase = 0;
1359 
1360 	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1361 	prot->h2dring_ctrl_subn.current_phase = 0;
1362 }
1363 
1364 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1365 
1366 /*
1367  * +---------------------------------------------------------------------------+
1368  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1369  * virtual and physical address, the buffer lenght and the DMA handler.
1370  * A secdma handler is also included in the dhd_dma_buf object.
1371  * +---------------------------------------------------------------------------+
1372  */
1373 
1374 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1375 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1376 {
1377 	base_addr->low_addr = htol32(PHYSADDRLO(pa));
1378 	base_addr->high_addr = htol32(PHYSADDRHI(pa));
1379 }
1380 
1381 /**
1382  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1383  */
1384 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1385 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1386 {
1387 	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1388 	ASSERT(dma_buf);
1389 	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1390 	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1391 	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1392 	ASSERT(dma_buf->len != 0);
1393 
1394 	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1395 	end = (pa_lowaddr + dma_buf->len); /* end address */
1396 
1397 	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1398 		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1399 			__FUNCTION__, pa_lowaddr, dma_buf->len));
1400 		return BCME_ERROR;
1401 	}
1402 
1403 	return BCME_OK;
1404 }
1405 
1406 /**
1407  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1408  * returns BCME_OK=0 on success
1409  * returns non-zero negative error value on failure.
1410  */
1411 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1412 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1413 {
1414 	uint32 dma_pad = 0;
1415 	osl_t *osh = dhd->osh;
1416 	uint16 dma_align = DMA_ALIGN_LEN;
1417 	uint32 rem = 0;
1418 
1419 	ASSERT(dma_buf != NULL);
1420 	ASSERT(dma_buf->va == NULL);
1421 	ASSERT(dma_buf->len == 0);
1422 
1423 	/* Pad the buffer length to align to cacheline size. */
1424 	rem = (buf_len % DHD_DMA_PAD);
1425 	dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1426 
1427 	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1428 		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1429 
1430 	if (dma_buf->va == NULL) {
1431 		DHD_ERROR(("%s: buf_len %d, no memory available\n",
1432 			__FUNCTION__, buf_len));
1433 		return BCME_NOMEM;
1434 	}
1435 
1436 	dma_buf->len = buf_len; /* not including padded len */
1437 
1438 	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1439 		dhd_dma_buf_free(dhd, dma_buf);
1440 		return BCME_ERROR;
1441 	}
1442 
1443 	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1444 
1445 	return BCME_OK;
1446 }
1447 
1448 /**
1449  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1450  */
1451 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1452 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1453 {
1454 	if ((dma_buf == NULL) || (dma_buf->va == NULL))
1455 		return;
1456 
1457 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1458 
1459 	/* Zero out the entire buffer and cache flush */
1460 	memset((void*)dma_buf->va, 0, dma_buf->len);
1461 	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1462 }
1463 
1464 /**
1465  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1466  * dhd_dma_buf_alloc().
1467  */
1468 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1469 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1470 {
1471 	osl_t *osh = dhd->osh;
1472 
1473 	ASSERT(dma_buf);
1474 
1475 	if (dma_buf->va == NULL)
1476 		return; /* Allow for free invocation, when alloc failed */
1477 
1478 	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1479 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1480 
1481 	/* dma buffer may have been padded at allocation */
1482 	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1483 		dma_buf->pa, dma_buf->dmah);
1484 
1485 	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1486 }
1487 
1488 /**
1489  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1490  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1491  */
1492 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1493 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1494 	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1495 {
1496 	dhd_dma_buf_t *dma_buf;
1497 	ASSERT(dhd_dma_buf);
1498 	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1499 	dma_buf->va = va;
1500 	dma_buf->len = len;
1501 	dma_buf->pa = pa;
1502 	dma_buf->dmah = dmah;
1503 	dma_buf->secdma = secdma;
1504 
1505 	/* Audit user defined configuration */
1506 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1507 }
1508 
1509 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1510 
1511 /*
1512  * +---------------------------------------------------------------------------+
1513  * DHD_MAP_PKTID_LOGGING
1514  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1515  * debugging in customer platform.
1516  * +---------------------------------------------------------------------------+
1517  */
1518 
1519 #ifdef DHD_MAP_PKTID_LOGGING
1520 typedef struct dhd_pktid_log_item {
1521 	dmaaddr_t pa;		/* DMA bus address */
1522 	uint64 ts_nsec;		/* Timestamp: nsec */
1523 	uint32 size;		/* DMA map/unmap size */
1524 	uint32 pktid;		/* Packet ID */
1525 	uint8 pkttype;		/* Packet Type */
1526 	uint8 rsvd[7];		/* Reserved for future use */
1527 } dhd_pktid_log_item_t;
1528 
1529 typedef struct dhd_pktid_log {
1530 	uint32 items;		/* number of total items */
1531 	uint32 index;		/* index of pktid_log_item */
1532 	dhd_pktid_log_item_t map[0];	/* metadata storage */
1533 } dhd_pktid_log_t;
1534 
1535 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1536 
1537 #define	MAX_PKTID_LOG				(2048)
1538 #define DHD_PKTID_LOG_ITEM_SZ			(sizeof(dhd_pktid_log_item_t))
1539 #define DHD_PKTID_LOG_SZ(items)			(uint32)((sizeof(dhd_pktid_log_t)) + \
1540 					((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1541 
1542 #define DHD_PKTID_LOG_INIT(dhd, hdl)		dhd_pktid_logging_init((dhd), (hdl))
1543 #define DHD_PKTID_LOG_FINI(dhd, hdl)		dhd_pktid_logging_fini((dhd), (hdl))
1544 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)	\
1545 	dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1546 #define DHD_PKTID_LOG_DUMP(dhd)			dhd_pktid_logging_dump((dhd))
1547 
1548 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1549 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1550 {
1551 	dhd_pktid_log_t *log;
1552 	uint32 log_size;
1553 
1554 	log_size = DHD_PKTID_LOG_SZ(num_items);
1555 	log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1556 	if (log == NULL) {
1557 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
1558 			__FUNCTION__, log_size));
1559 		return (dhd_pktid_log_handle_t *)NULL;
1560 	}
1561 
1562 	log->items = num_items;
1563 	log->index = 0;
1564 
1565 	return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1566 }
1567 
1568 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1569 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1570 {
1571 	dhd_pktid_log_t *log;
1572 	uint32 log_size;
1573 
1574 	if (handle == NULL) {
1575 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1576 		return;
1577 	}
1578 
1579 	log = (dhd_pktid_log_t *)handle;
1580 	log_size = DHD_PKTID_LOG_SZ(log->items);
1581 	MFREE(dhd->osh, handle, log_size);
1582 }
1583 
1584 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1585 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1586 	uint32 pktid, uint32 len, uint8 pkttype)
1587 {
1588 	dhd_pktid_log_t *log;
1589 	uint32 idx;
1590 
1591 	if (handle == NULL) {
1592 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1593 		return;
1594 	}
1595 
1596 	log = (dhd_pktid_log_t *)handle;
1597 	idx = log->index;
1598 	log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1599 	log->map[idx].pa = pa;
1600 	log->map[idx].pktid = pktid;
1601 	log->map[idx].size = len;
1602 	log->map[idx].pkttype = pkttype;
1603 	log->index = (idx + 1) % (log->items);	/* update index */
1604 }
1605 
1606 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1607 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1608 {
1609 	dhd_prot_t *prot = dhd->prot;
1610 	dhd_pktid_log_t *map_log, *unmap_log;
1611 	uint64 ts_sec, ts_usec;
1612 
1613 	if (prot == NULL) {
1614 		DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1615 		return;
1616 	}
1617 
1618 	map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1619 	unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1620 	OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1621 	if (map_log && unmap_log) {
1622 		DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1623 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
1624 			map_log->index, unmap_log->index,
1625 			(unsigned long)ts_sec, (unsigned long)ts_usec));
1626 		DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1627 			"pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1628 			(uint64)__virt_to_phys((ulong)(map_log->map)),
1629 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1630 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
1631 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1632 	}
1633 }
1634 #endif /* DHD_MAP_PKTID_LOGGING */
1635 
1636 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1637 
1638 /*
1639  * +---------------------------------------------------------------------------+
1640  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1641  * Main purpose is to save memory on the dongle, has other purposes as well.
1642  * The packet id map, also includes storage for some packet parameters that
1643  * may be saved. A native packet pointer along with the parameters may be saved
1644  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1645  * and the metadata may be retrieved using the previously allocated packet id.
1646  * +---------------------------------------------------------------------------+
1647  */
1648 #define DHD_PCIE_PKTID
1649 #define MAX_CTRL_PKTID		(1024) /* Maximum number of pktids supported */
1650 #define MAX_RX_PKTID		(1024)
1651 #define MAX_TX_PKTID		(3072 * 12)
1652 
1653 /* On Router, the pktptr serves as a pktid. */
1654 
1655 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1656 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1657 #endif // endif
1658 
1659 /* Enum for marking the buffer color based on usage */
1660 typedef enum dhd_pkttype {
1661 	PKTTYPE_DATA_TX = 0,
1662 	PKTTYPE_DATA_RX,
1663 	PKTTYPE_IOCTL_RX,
1664 	PKTTYPE_EVENT_RX,
1665 	PKTTYPE_INFO_RX,
1666 	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1667 	PKTTYPE_NO_CHECK,
1668 	PKTTYPE_TSBUF_RX
1669 } dhd_pkttype_t;
1670 
1671 #define DHD_PKTID_MIN_AVAIL_COUNT		512U
1672 #define DHD_PKTID_DEPLETED_MAX_COUNT		(DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1673 #define DHD_PKTID_INVALID			(0U)
1674 #define DHD_IOCTL_REQ_PKTID			(0xFFFE)
1675 #define DHD_FAKE_PKTID				(0xFACE)
1676 #define DHD_H2D_DBGRING_REQ_PKTID		0xFFFD
1677 #define DHD_D2H_DBGRING_REQ_PKTID		0xFFFC
1678 #define DHD_H2D_HOSTTS_REQ_PKTID		0xFFFB
1679 #define DHD_H2D_BTLOGRING_REQ_PKTID		0xFFFA
1680 #define DHD_D2H_BTLOGRING_REQ_PKTID		0xFFF9
1681 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID	0xFFF8
1682 #ifdef DHD_HP2P
1683 #define DHD_D2H_HPPRING_TXREQ_PKTID		0xFFF7
1684 #define DHD_D2H_HPPRING_RXREQ_PKTID		0xFFF6
1685 #endif /* DHD_HP2P */
1686 
1687 #define IS_FLOWRING(ring) \
1688 	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1689 
1690 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1691 
1692 /* Construct a packet id mapping table, returning an opaque map handle */
1693 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1694 
1695 /* Destroy a packet id mapping table, freeing all packets active in the table */
1696 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1697 
1698 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1699 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
1700 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
1701 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
1702 
1703 #ifdef MACOSX_DHD
1704 #undef DHD_PCIE_PKTID
1705 #define DHD_PCIE_PKTID 1
1706 #endif /* MACOSX_DHD */
1707 
1708 #if defined(DHD_PCIE_PKTID)
1709 #if defined(MACOSX_DHD)
1710 #define IOCTLRESP_USE_CONSTMEM
1711 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1712 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1713 #endif // endif
1714 
1715 /* Determine number of pktids that are available */
1716 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1717 
1718 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1719 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1720 	void *pkt, dhd_pkttype_t pkttype);
1721 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1722 	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1723 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
1724 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1725 	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1726 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
1727 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1728 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1729 	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1730 	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1731 
1732 /*
1733  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1734  *
1735  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1736  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1737  *
1738  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1739  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1740  */
1741 #if defined(DHD_PKTID_AUDIT_ENABLED)
1742 #define USE_DHD_PKTID_AUDIT_LOCK 1
1743 /* Audit the pktidmap allocator */
1744 /* #define DHD_PKTID_AUDIT_MAP */
1745 
1746 /* Audit the pktid during production/consumption of workitems */
1747 #define DHD_PKTID_AUDIT_RING
1748 
1749 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1750 #error "May only enabled audit of MAP or RING, at a time."
1751 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1752 
1753 #define DHD_DUPLICATE_ALLOC     1
1754 #define DHD_DUPLICATE_FREE      2
1755 #define DHD_TEST_IS_ALLOC       3
1756 #define DHD_TEST_IS_FREE        4
1757 
1758 typedef enum dhd_pktid_map_type {
1759 	DHD_PKTID_MAP_TYPE_CTRL = 1,
1760 	DHD_PKTID_MAP_TYPE_TX,
1761 	DHD_PKTID_MAP_TYPE_RX,
1762 	DHD_PKTID_MAP_TYPE_UNKNOWN
1763 } dhd_pktid_map_type_t;
1764 
1765 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1766 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          dhd_os_spin_lock_init(osh)
1767 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  dhd_os_spin_lock_deinit(osh, lock)
1768 #define DHD_PKTID_AUDIT_LOCK(lock)              dhd_os_spin_lock(lock)
1769 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     dhd_os_spin_unlock(lock, flags)
1770 #else
1771 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
1772 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
1773 #define DHD_PKTID_AUDIT_LOCK(lock)              0
1774 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
1775 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1776 
1777 #endif /* DHD_PKTID_AUDIT_ENABLED */
1778 
1779 #define USE_DHD_PKTID_LOCK   1
1780 
1781 #ifdef USE_DHD_PKTID_LOCK
1782 #define DHD_PKTID_LOCK_INIT(osh)                dhd_os_spin_lock_init(osh)
1783 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        dhd_os_spin_lock_deinit(osh, lock)
1784 #define DHD_PKTID_LOCK(lock, flags)             (flags) = dhd_os_spin_lock(lock)
1785 #define DHD_PKTID_UNLOCK(lock, flags)           dhd_os_spin_unlock(lock, flags)
1786 #else
1787 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
1788 #define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
1789 	do { \
1790 		BCM_REFERENCE(osh); \
1791 		BCM_REFERENCE(lock); \
1792 	} while (0)
1793 #define DHD_PKTID_LOCK(lock)                    0
1794 #define DHD_PKTID_UNLOCK(lock, flags)           \
1795 	do { \
1796 		BCM_REFERENCE(lock); \
1797 		BCM_REFERENCE(flags); \
1798 	} while (0)
1799 #endif /* !USE_DHD_PKTID_LOCK */
1800 
1801 typedef enum dhd_locker_state {
1802 	LOCKER_IS_FREE,
1803 	LOCKER_IS_BUSY,
1804 	LOCKER_IS_RSVD
1805 } dhd_locker_state_t;
1806 
1807 /* Packet metadata saved in packet id mapper */
1808 
1809 typedef struct dhd_pktid_item {
1810 	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
1811 	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
1812 	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1813 	uint16      len;      /* length of mapped packet's buffer */
1814 	void        *pkt;     /* opaque native pointer to a packet */
1815 	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
1816 	void        *dmah;    /* handle to OS specific DMA map */
1817 	void		*secdma;
1818 } dhd_pktid_item_t;
1819 
1820 typedef uint32 dhd_pktid_key_t;
1821 
1822 typedef struct dhd_pktid_map {
1823 	uint32      items;    /* total items in map */
1824 	uint32      avail;    /* total available items */
1825 	int         failures; /* lockers unavailable count */
1826 	/* Spinlock to protect dhd_pktid_map in process/tasklet context */
1827 	void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1828 
1829 #if defined(DHD_PKTID_AUDIT_ENABLED)
1830 	void		*pktid_audit_lock;
1831 	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1832 #endif /* DHD_PKTID_AUDIT_ENABLED */
1833 	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
1834 	dhd_pktid_item_t lockers[0];           /* metadata storage */
1835 } dhd_pktid_map_t;
1836 
1837 /*
1838  * PktId (Locker) #0 is never allocated and is considered invalid.
1839  *
1840  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1841  * depleted pktid pool and must not be used by the caller.
1842  *
1843  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1844  */
1845 
1846 #define DHD_PKTID_FREE_LOCKER           (FALSE)
1847 #define DHD_PKTID_RSV_LOCKER            (TRUE)
1848 
1849 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
1850 #define DHD_PKIDMAP_ITEMS(items)        (items)
1851 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
1852 	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1853 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
1854 
1855 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
1856 
1857 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1858 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
1859 	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1860 /* Reuse a previously reserved locker to save packet params */
1861 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1862 	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1863 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
1864 		(dhd_pkttype_t)(pkttype))
1865 /* Convert a packet to a pktid, and save packet params in locker */
1866 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1867 	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1868 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
1869 		(dhd_pkttype_t)(pkttype))
1870 
1871 /* Convert pktid to a packet, and free the locker */
1872 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1873 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1874 		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1875 		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1876 
1877 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1878 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1879 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1880 	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1881 	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1882 
1883 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
1884 
1885 #if defined(DHD_PKTID_AUDIT_ENABLED)
1886 
1887 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)1888 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1889 {
1890 	dhd_prot_t *prot = dhd->prot;
1891 	int pktid_map_type;
1892 
1893 	if (pktid_map == prot->pktid_ctrl_map) {
1894 		pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1895 	} else if (pktid_map == prot->pktid_tx_map) {
1896 		pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1897 	} else if (pktid_map == prot->pktid_rx_map) {
1898 		pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1899 	} else {
1900 		pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1901 	}
1902 
1903 	return pktid_map_type;
1904 }
1905 
1906 /**
1907 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1908 */
1909 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1910 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1911 	const int test_for, const char *errmsg)
1912 {
1913 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1914 	struct bcm_mwbmap *handle;
1915 	uint32	flags;
1916 	bool ignore_audit;
1917 	int error = BCME_OK;
1918 
1919 	if (pktid_map == (dhd_pktid_map_t *)NULL) {
1920 		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1921 		return BCME_OK;
1922 	}
1923 
1924 	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1925 
1926 	handle = pktid_map->pktid_audit;
1927 	if (handle == (struct bcm_mwbmap *)NULL) {
1928 		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1929 		goto out;
1930 	}
1931 
1932 	/* Exclude special pktids from audit */
1933 	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1934 	if (ignore_audit) {
1935 		goto out;
1936 	}
1937 
1938 	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1939 		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1940 		error = BCME_ERROR;
1941 		goto out;
1942 	}
1943 
1944 	/* Perform audit */
1945 	switch (test_for) {
1946 		case DHD_DUPLICATE_ALLOC:
1947 			if (!bcm_mwbmap_isfree(handle, pktid)) {
1948 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1949 				           errmsg, pktid));
1950 				error = BCME_ERROR;
1951 			} else {
1952 				bcm_mwbmap_force(handle, pktid);
1953 			}
1954 			break;
1955 
1956 		case DHD_DUPLICATE_FREE:
1957 			if (bcm_mwbmap_isfree(handle, pktid)) {
1958 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1959 				           errmsg, pktid));
1960 				error = BCME_ERROR;
1961 			} else {
1962 				bcm_mwbmap_free(handle, pktid);
1963 			}
1964 			break;
1965 
1966 		case DHD_TEST_IS_ALLOC:
1967 			if (bcm_mwbmap_isfree(handle, pktid)) {
1968 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1969 				           errmsg, pktid));
1970 				error = BCME_ERROR;
1971 			}
1972 			break;
1973 
1974 		case DHD_TEST_IS_FREE:
1975 			if (!bcm_mwbmap_isfree(handle, pktid)) {
1976 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1977 				           errmsg, pktid));
1978 				error = BCME_ERROR;
1979 			}
1980 			break;
1981 
1982 		default:
1983 			DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
1984 			error = BCME_ERROR;
1985 			break;
1986 	}
1987 
1988 out:
1989 	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1990 
1991 	if (error != BCME_OK) {
1992 		dhd->pktid_audit_failed = TRUE;
1993 	}
1994 
1995 	return error;
1996 }
1997 
1998 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1999 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2000 	const int test_for, const char *errmsg)
2001 {
2002 	int ret = BCME_OK;
2003 	ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2004 	if (ret == BCME_ERROR) {
2005 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2006 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2007 		dhd_pktid_error_handler(dhd);
2008 	}
2009 
2010 	return ret;
2011 }
2012 
2013 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
2014 	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
2015 
2016 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2017 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2018 	const int test_for, void *msg, uint32 msg_len, const char *func)
2019 {
2020 	int ret = BCME_OK;
2021 
2022 	if (dhd_query_bus_erros(dhdp)) {
2023 		return BCME_ERROR;
2024 	}
2025 
2026 	ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2027 	if (ret == BCME_ERROR) {
2028 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2029 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2030 		prhex(func, (uchar *)msg, msg_len);
2031 		dhd_pktid_error_handler(dhdp);
2032 	}
2033 	return ret;
2034 }
2035 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2036 	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2037 		(pktid), (test_for), msg, msg_len, __FUNCTION__)
2038 
2039 #endif /* DHD_PKTID_AUDIT_ENABLED */
2040 
2041 /**
2042  * +---------------------------------------------------------------------------+
2043  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2044  *
2045  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2046  *
2047  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2048  * packet id is returned. This unique packet id may be used to retrieve the
2049  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2050  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2051  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2052  *
2053  * Implementation Note:
2054  * Convert this into a <key,locker> abstraction and place into bcmutils !
2055  * Locker abstraction should treat contents as opaque storage, and a
2056  * callback should be registered to handle busy lockers on destructor.
2057  *
2058  * +---------------------------------------------------------------------------+
2059  */
2060 
2061 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2062 
2063 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2064 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2065 {
2066 	void* osh;
2067 	uint32 nkey;
2068 	dhd_pktid_map_t *map;
2069 	uint32 dhd_pktid_map_sz;
2070 	uint32 map_items;
2071 	uint32 map_keys_sz;
2072 	osh = dhd->osh;
2073 
2074 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2075 
2076 	map = (dhd_pktid_map_t *)VMALLOCZ(osh, dhd_pktid_map_sz);
2077 	if (map == NULL) {
2078 		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2079 			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
2080 		return (dhd_pktid_map_handle_t *)NULL;
2081 	}
2082 
2083 	map->items = num_items;
2084 	map->avail = num_items;
2085 
2086 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2087 
2088 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2089 
2090 	/* Initialize the lock that protects this structure */
2091 	map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2092 	if (map->pktid_lock == NULL) {
2093 		DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2094 		goto error;
2095 	}
2096 
2097 	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2098 	if (map->keys == NULL) {
2099 		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2100 			__FUNCTION__, __LINE__, map_keys_sz));
2101 		goto error;
2102 	}
2103 
2104 #if defined(DHD_PKTID_AUDIT_ENABLED)
2105 		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2106 		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2107 		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2108 			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2109 			goto error;
2110 		} else {
2111 			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2112 				__FUNCTION__, __LINE__, map_items + 1));
2113 		}
2114 		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2115 #endif /* DHD_PKTID_AUDIT_ENABLED */
2116 
2117 	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2118 		map->keys[nkey] = nkey; /* populate with unique keys */
2119 		map->lockers[nkey].state = LOCKER_IS_FREE;
2120 		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
2121 		map->lockers[nkey].len   = 0;
2122 	}
2123 
2124 	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2125 	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2126 	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
2127 	map->lockers[DHD_PKTID_INVALID].len   = 0;
2128 
2129 #if defined(DHD_PKTID_AUDIT_ENABLED)
2130 	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2131 	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2132 #endif /* DHD_PKTID_AUDIT_ENABLED */
2133 
2134 	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2135 
2136 error:
2137 	if (map) {
2138 #if defined(DHD_PKTID_AUDIT_ENABLED)
2139 		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2140 			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2141 			map->pktid_audit = (struct bcm_mwbmap *)NULL;
2142 			if (map->pktid_audit_lock)
2143 				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2144 		}
2145 #endif /* DHD_PKTID_AUDIT_ENABLED */
2146 
2147 		if (map->keys) {
2148 			MFREE(osh, map->keys, map_keys_sz);
2149 		}
2150 
2151 		if (map->pktid_lock) {
2152 			DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2153 		}
2154 
2155 		VMFREE(osh, map, dhd_pktid_map_sz);
2156 	}
2157 	return (dhd_pktid_map_handle_t *)NULL;
2158 }
2159 
2160 /**
2161  * Retrieve all allocated keys and free all <numbered_key, locker>.
2162  * Freeing implies: unmapping the buffers and freeing the native packet
2163  * This could have been a callback registered with the pktid mapper.
2164  */
2165 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2166 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2167 {
2168 	void *osh;
2169 	uint32 nkey;
2170 	dhd_pktid_map_t *map;
2171 	dhd_pktid_item_t *locker;
2172 	uint32 map_items;
2173 	unsigned long flags;
2174 	bool data_tx = FALSE;
2175 
2176 	map = (dhd_pktid_map_t *)handle;
2177 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2178 	osh = dhd->osh;
2179 
2180 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2181 	/* skip reserved KEY #0, and start from 1 */
2182 
2183 	for (nkey = 1; nkey <= map_items; nkey++) {
2184 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2185 			locker = &map->lockers[nkey];
2186 			locker->state = LOCKER_IS_FREE;
2187 			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2188 			if (data_tx) {
2189 				OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2190 			}
2191 
2192 #ifdef DHD_PKTID_AUDIT_RING
2193 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2194 #endif /* DHD_PKTID_AUDIT_RING */
2195 #ifdef DHD_MAP_PKTID_LOGGING
2196 			DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2197 				locker->pa, nkey, locker->len,
2198 				locker->pkttype);
2199 #endif /* DHD_MAP_PKTID_LOGGING */
2200 
2201 			{
2202 				if (SECURE_DMA_ENAB(dhd->osh))
2203 					SECURE_DMA_UNMAP(osh, locker->pa,
2204 						locker->len, locker->dir, 0,
2205 						locker->dmah, locker->secdma, 0);
2206 				else
2207 					DMA_UNMAP(osh, locker->pa, locker->len,
2208 						locker->dir, 0, locker->dmah);
2209 			}
2210 			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2211 				locker->pkttype, data_tx);
2212 		}
2213 		else {
2214 #ifdef DHD_PKTID_AUDIT_RING
2215 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2216 #endif /* DHD_PKTID_AUDIT_RING */
2217 		}
2218 		map->keys[nkey] = nkey; /* populate with unique keys */
2219 	}
2220 
2221 	map->avail = map_items;
2222 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2223 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2224 }
2225 
2226 #ifdef IOCTLRESP_USE_CONSTMEM
2227 /** Called in detach scenario. Releasing IOCTL buffers. */
2228 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2229 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2230 {
2231 	uint32 nkey;
2232 	dhd_pktid_map_t *map;
2233 	dhd_pktid_item_t *locker;
2234 	uint32 map_items;
2235 	unsigned long flags;
2236 
2237 	map = (dhd_pktid_map_t *)handle;
2238 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2239 
2240 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2241 	/* skip reserved KEY #0, and start from 1 */
2242 	for (nkey = 1; nkey <= map_items; nkey++) {
2243 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2244 			dhd_dma_buf_t retbuf;
2245 
2246 #ifdef DHD_PKTID_AUDIT_RING
2247 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2248 #endif /* DHD_PKTID_AUDIT_RING */
2249 
2250 			locker = &map->lockers[nkey];
2251 			retbuf.va = locker->pkt;
2252 			retbuf.len = locker->len;
2253 			retbuf.pa = locker->pa;
2254 			retbuf.dmah = locker->dmah;
2255 			retbuf.secdma = locker->secdma;
2256 
2257 			free_ioctl_return_buffer(dhd, &retbuf);
2258 		}
2259 		else {
2260 #ifdef DHD_PKTID_AUDIT_RING
2261 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2262 #endif /* DHD_PKTID_AUDIT_RING */
2263 		}
2264 		map->keys[nkey] = nkey; /* populate with unique keys */
2265 	}
2266 
2267 	map->avail = map_items;
2268 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2269 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2270 }
2271 #endif /* IOCTLRESP_USE_CONSTMEM */
2272 
2273 /**
2274  * Free the pktid map.
2275  */
2276 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2277 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2278 {
2279 	dhd_pktid_map_t *map;
2280 	uint32 dhd_pktid_map_sz;
2281 	uint32 map_keys_sz;
2282 
2283 	if (handle == NULL)
2284 		return;
2285 
2286 	/* Free any pending packets */
2287 	dhd_pktid_map_reset(dhd, handle);
2288 
2289 	map = (dhd_pktid_map_t *)handle;
2290 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2291 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2292 
2293 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2294 
2295 #if defined(DHD_PKTID_AUDIT_ENABLED)
2296 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2297 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2298 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2299 		if (map->pktid_audit_lock) {
2300 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2301 		}
2302 	}
2303 #endif /* DHD_PKTID_AUDIT_ENABLED */
2304 	MFREE(dhd->osh, map->keys, map_keys_sz);
2305 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2306 }
2307 #ifdef IOCTLRESP_USE_CONSTMEM
2308 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2309 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2310 {
2311 	dhd_pktid_map_t *map;
2312 	uint32 dhd_pktid_map_sz;
2313 	uint32 map_keys_sz;
2314 
2315 	if (handle == NULL)
2316 		return;
2317 
2318 	/* Free any pending packets */
2319 	dhd_pktid_map_reset_ioctl(dhd, handle);
2320 
2321 	map = (dhd_pktid_map_t *)handle;
2322 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2323 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2324 
2325 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2326 
2327 #if defined(DHD_PKTID_AUDIT_ENABLED)
2328 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2329 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2330 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2331 		if (map->pktid_audit_lock) {
2332 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2333 		}
2334 	}
2335 #endif /* DHD_PKTID_AUDIT_ENABLED */
2336 
2337 	MFREE(dhd->osh, map->keys, map_keys_sz);
2338 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2339 }
2340 #endif /* IOCTLRESP_USE_CONSTMEM */
2341 
2342 /** Get the pktid free count */
2343 static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t * handle)2344 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
2345 {
2346 	dhd_pktid_map_t *map;
2347 	uint32	avail;
2348 	unsigned long flags;
2349 
2350 	ASSERT(handle != NULL);
2351 	map = (dhd_pktid_map_t *)handle;
2352 
2353 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2354 	avail = map->avail;
2355 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2356 
2357 	return avail;
2358 }
2359 
2360 /**
2361  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2362  * yet populated. Invoke the pktid save api to populate the packet parameters
2363  * into the locker. This function is not reentrant, and is the caller's
2364  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2365  * a failure case, implying a depleted pool of pktids.
2366  */
2367 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2368 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2369 	void *pkt, dhd_pkttype_t pkttype)
2370 {
2371 	uint32 nkey;
2372 	dhd_pktid_map_t *map;
2373 	dhd_pktid_item_t *locker;
2374 	unsigned long flags;
2375 
2376 	ASSERT(handle != NULL);
2377 	map = (dhd_pktid_map_t *)handle;
2378 
2379 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2380 
2381 	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2382 		map->failures++;
2383 		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2384 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2385 		return DHD_PKTID_INVALID; /* failed alloc request */
2386 	}
2387 
2388 	ASSERT(map->avail <= map->items);
2389 	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2390 
2391 	if ((map->avail > map->items) || (nkey > map->items)) {
2392 		map->failures++;
2393 		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2394 			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2395 			__FUNCTION__, __LINE__, map->avail, nkey,
2396 			pkttype));
2397 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2398 		return DHD_PKTID_INVALID; /* failed alloc request */
2399 	}
2400 
2401 	locker = &map->lockers[nkey]; /* save packet metadata in locker */
2402 	map->avail--;
2403 	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2404 	locker->len = 0;
2405 	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2406 
2407 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2408 
2409 	ASSERT(nkey != DHD_PKTID_INVALID);
2410 
2411 	return nkey; /* return locker's numbered key */
2412 }
2413 
2414 /*
2415  * dhd_pktid_map_save - Save a packet's parameters into a locker
2416  * corresponding to a previously reserved unique numbered key.
2417  */
2418 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2419 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2420 	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2421 	dhd_pkttype_t pkttype)
2422 {
2423 	dhd_pktid_map_t *map;
2424 	dhd_pktid_item_t *locker;
2425 	unsigned long flags;
2426 
2427 	ASSERT(handle != NULL);
2428 	map = (dhd_pktid_map_t *)handle;
2429 
2430 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2431 
2432 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2433 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2434 			__FUNCTION__, __LINE__, nkey, pkttype));
2435 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2436 #ifdef DHD_FW_COREDUMP
2437 		if (dhd->memdump_enabled) {
2438 			/* collect core dump */
2439 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2440 			dhd_bus_mem_dump(dhd);
2441 		}
2442 #else
2443 		ASSERT(0);
2444 #endif /* DHD_FW_COREDUMP */
2445 		return;
2446 	}
2447 
2448 	locker = &map->lockers[nkey];
2449 
2450 	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2451 		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2452 
2453 	/* store contents in locker */
2454 	locker->dir = dir;
2455 	locker->pa = pa;
2456 	locker->len = (uint16)len; /* 16bit len */
2457 	locker->dmah = dmah; /* 16bit len */
2458 	locker->secdma = secdma;
2459 	locker->pkttype = pkttype;
2460 	locker->pkt = pkt;
2461 	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2462 #ifdef DHD_MAP_PKTID_LOGGING
2463 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2464 #endif /* DHD_MAP_PKTID_LOGGING */
2465 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2466 }
2467 
2468 /**
2469  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2470  * contents into the corresponding locker. Return the numbered key.
2471  */
2472 static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2473 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2474 	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2475 	dhd_pkttype_t pkttype)
2476 {
2477 	uint32 nkey;
2478 
2479 	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2480 	if (nkey != DHD_PKTID_INVALID) {
2481 		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2482 			len, dir, dmah, secdma, pkttype);
2483 	}
2484 
2485 	return nkey;
2486 }
2487 
2488 /**
2489  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2490  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2491  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2492  * value. Only a previously allocated pktid may be freed.
2493  */
2494 static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,uint32 nkey,dmaaddr_t * pa,uint32 * len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype,bool rsv_locker)2495 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2496 	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2497 	bool rsv_locker)
2498 {
2499 	dhd_pktid_map_t *map;
2500 	dhd_pktid_item_t *locker;
2501 	void * pkt;
2502 	unsigned long long locker_addr;
2503 	unsigned long flags;
2504 
2505 	ASSERT(handle != NULL);
2506 
2507 	map = (dhd_pktid_map_t *)handle;
2508 
2509 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2510 
2511 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2512 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2513 		           __FUNCTION__, __LINE__, nkey, pkttype));
2514 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2515 #ifdef DHD_FW_COREDUMP
2516 		if (dhd->memdump_enabled) {
2517 			/* collect core dump */
2518 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2519 			dhd_bus_mem_dump(dhd);
2520 		}
2521 #else
2522 		ASSERT(0);
2523 #endif /* DHD_FW_COREDUMP */
2524 		return NULL;
2525 	}
2526 
2527 	locker = &map->lockers[nkey];
2528 
2529 #if defined(DHD_PKTID_AUDIT_MAP)
2530 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2531 #endif /* DHD_PKTID_AUDIT_MAP */
2532 
2533 	/* Debug check for cloned numbered key */
2534 	if (locker->state == LOCKER_IS_FREE) {
2535 		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2536 		           __FUNCTION__, __LINE__, nkey));
2537 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2538 #ifdef DHD_FW_COREDUMP
2539 		if (dhd->memdump_enabled) {
2540 			/* collect core dump */
2541 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2542 			dhd_bus_mem_dump(dhd);
2543 		}
2544 #else
2545 		ASSERT(0);
2546 #endif /* DHD_FW_COREDUMP */
2547 		return NULL;
2548 	}
2549 
2550 	/* Check for the colour of the buffer i.e The buffer posted for TX,
2551 	 * should be freed for TX completion. Similarly the buffer posted for
2552 	 * IOCTL should be freed for IOCT completion etc.
2553 	 */
2554 	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2555 
2556 		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2557 			__FUNCTION__, __LINE__, nkey));
2558 #ifdef BCMDMA64OSL
2559 		PHYSADDRTOULONG(locker->pa, locker_addr);
2560 #else
2561 		locker_addr = PHYSADDRLO(locker->pa);
2562 #endif /* BCMDMA64OSL */
2563 		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2564 			"pkttype <%d> locker->pa <0x%llx> \n",
2565 			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
2566 			pkttype, locker_addr));
2567 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2568 #ifdef DHD_FW_COREDUMP
2569 		if (dhd->memdump_enabled) {
2570 			/* collect core dump */
2571 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2572 			dhd_bus_mem_dump(dhd);
2573 		}
2574 #else
2575 		ASSERT(0);
2576 #endif /* DHD_FW_COREDUMP */
2577 		return NULL;
2578 	}
2579 
2580 	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
2581 		map->avail++;
2582 		map->keys[map->avail] = nkey; /* make this numbered key available */
2583 		locker->state = LOCKER_IS_FREE; /* open and free Locker */
2584 	} else {
2585 		/* pktid will be reused, but the locker does not have a valid pkt */
2586 		locker->state = LOCKER_IS_RSVD;
2587 	}
2588 
2589 #if defined(DHD_PKTID_AUDIT_MAP)
2590 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2591 #endif /* DHD_PKTID_AUDIT_MAP */
2592 #ifdef DHD_MAP_PKTID_LOGGING
2593 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2594 		(uint32)locker->len, pkttype);
2595 #endif /* DHD_MAP_PKTID_LOGGING */
2596 
2597 	*pa = locker->pa; /* return contents of locker */
2598 	*len = (uint32)locker->len;
2599 	*dmah = locker->dmah;
2600 	*secdma = locker->secdma;
2601 
2602 	pkt = locker->pkt;
2603 	locker->pkt = NULL; /* Clear pkt */
2604 	locker->len = 0;
2605 
2606 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2607 
2608 	return pkt;
2609 }
2610 
2611 #else /* ! DHD_PCIE_PKTID */
2612 
2613 typedef struct pktlist {
2614 	PKT_LIST *tx_pkt_list;		/* list for tx packets */
2615 	PKT_LIST *rx_pkt_list;		/* list for rx packets */
2616 	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
2617 } pktlists_t;
2618 
2619 /*
2620  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2621  * of a one to one mapping 32bit pktptr and a 32bit pktid.
2622  *
2623  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2624  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2625  *   a lock.
2626  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2627  */
2628 #define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
2629 #define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
2630 
2631 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2632 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2633 	dhd_pkttype_t pkttype);
2634 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2635 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2636 	dhd_pkttype_t pkttype);
2637 
2638 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2639 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2640 {
2641 	osl_t *osh = dhd->osh;
2642 	pktlists_t *handle = NULL;
2643 
2644 	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2645 		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2646 		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2647 		goto error_done;
2648 	}
2649 
2650 	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2651 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2652 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2653 		goto error;
2654 	}
2655 
2656 	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2657 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2658 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2659 		goto error;
2660 	}
2661 
2662 	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2663 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2664 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2665 		goto error;
2666 	}
2667 
2668 	PKTLIST_INIT(handle->tx_pkt_list);
2669 	PKTLIST_INIT(handle->rx_pkt_list);
2670 	PKTLIST_INIT(handle->ctrl_pkt_list);
2671 
2672 	return (dhd_pktid_map_handle_t *) handle;
2673 
2674 error:
2675 	if (handle->ctrl_pkt_list) {
2676 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2677 	}
2678 
2679 	if (handle->rx_pkt_list) {
2680 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2681 	}
2682 
2683 	if (handle->tx_pkt_list) {
2684 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2685 	}
2686 
2687 	if (handle) {
2688 		MFREE(osh, handle, sizeof(pktlists_t));
2689 	}
2690 
2691 error_done:
2692 	return (dhd_pktid_map_handle_t *)NULL;
2693 }
2694 
2695 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)2696 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
2697 {
2698 	osl_t *osh = dhd->osh;
2699 
2700 	if (handle->ctrl_pkt_list) {
2701 		PKTLIST_FINI(handle->ctrl_pkt_list);
2702 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2703 	}
2704 
2705 	if (handle->rx_pkt_list) {
2706 		PKTLIST_FINI(handle->rx_pkt_list);
2707 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2708 	}
2709 
2710 	if (handle->tx_pkt_list) {
2711 		PKTLIST_FINI(handle->tx_pkt_list);
2712 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2713 	}
2714 }
2715 
2716 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)2717 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2718 {
2719 	osl_t *osh = dhd->osh;
2720 	pktlists_t *handle = (pktlists_t *) map;
2721 
2722 	ASSERT(handle != NULL);
2723 	if (handle == (pktlists_t *)NULL) {
2724 		return;
2725 	}
2726 
2727 	dhd_pktid_map_reset(dhd, handle);
2728 
2729 	if (handle) {
2730 		MFREE(osh, handle, sizeof(pktlists_t));
2731 	}
2732 }
2733 
2734 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2735 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)2736 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2737 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2738 	dhd_pkttype_t pkttype)
2739 {
2740 	pktlists_t *handle = (pktlists_t *) map;
2741 	ASSERT(pktptr32 != NULL);
2742 	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2743 	DHD_PKT_SET_DMAH(pktptr32, dmah);
2744 	DHD_PKT_SET_PA(pktptr32, pa);
2745 	DHD_PKT_SET_SECDMA(pktptr32, secdma);
2746 
2747 	if (pkttype == PKTTYPE_DATA_TX) {
2748 		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
2749 	} else if (pkttype == PKTTYPE_DATA_RX) {
2750 		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
2751 	} else {
2752 		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
2753 	}
2754 
2755 	return DHD_PKTID32(pktptr32);
2756 }
2757 
2758 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2759 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)2760 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2761 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2762 	dhd_pkttype_t pkttype)
2763 {
2764 	pktlists_t *handle = (pktlists_t *) map;
2765 	void *pktptr32;
2766 
2767 	ASSERT(pktid32 != 0U);
2768 	pktptr32 = DHD_PKTPTR32(pktid32);
2769 	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2770 	*dmah = DHD_PKT_GET_DMAH(pktptr32);
2771 	*pa = DHD_PKT_GET_PA(pktptr32);
2772 	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
2773 
2774 	if (pkttype == PKTTYPE_DATA_TX) {
2775 		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
2776 	} else if (pkttype == PKTTYPE_DATA_RX) {
2777 		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
2778 	} else {
2779 		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
2780 	}
2781 
2782 	return pktptr32;
2783 }
2784 
2785 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
2786 
2787 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2788 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2789 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2790 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2791 	})
2792 
2793 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2794 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2795 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2796 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2797 	})
2798 
2799 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2800 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
2801 		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2802 				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2803 				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2804 	})
2805 
2806 #define DHD_PKTID_AVAIL(map)  (~0)
2807 
2808 #endif /* ! DHD_PCIE_PKTID */
2809 
2810 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
2811 
2812 /**
2813  * The PCIE FD protocol layer is constructed in two phases:
2814  *    Phase 1. dhd_prot_attach()
2815  *    Phase 2. dhd_prot_init()
2816  *
2817  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2818  * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2819  * with DMA-able buffers).
2820  * All dhd_dma_buf_t objects are also allocated here.
2821  *
2822  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2823  * initialization of objects that requires information advertized by the dongle
2824  * may not be performed here.
2825  * E.g. the number of TxPost flowrings is not know at this point, neither do
2826  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2827  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2828  * rings (common + flow).
2829  *
2830  * dhd_prot_init() is invoked after the bus layer has fetched the information
2831  * advertized by the dongle in the pcie_shared_t.
2832  */
2833 int
dhd_prot_attach(dhd_pub_t * dhd)2834 dhd_prot_attach(dhd_pub_t *dhd)
2835 {
2836 	osl_t *osh = dhd->osh;
2837 	dhd_prot_t *prot;
2838 
2839 	/* FW going to DMA extended trap data,
2840 	 * allocate buffer for the maximum extended trap data.
2841 	 */
2842 	uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2843 
2844 	/* Allocate prot structure */
2845 	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2846 		sizeof(dhd_prot_t)))) {
2847 		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2848 		goto fail;
2849 	}
2850 	memset(prot, 0, sizeof(*prot));
2851 
2852 	prot->osh = osh;
2853 	dhd->prot = prot;
2854 
2855 	/* DMAing ring completes supported? FALSE by default  */
2856 	dhd->dma_d2h_ring_upd_support = FALSE;
2857 	dhd->dma_h2d_ring_upd_support = FALSE;
2858 	dhd->dma_ring_upd_overwrite = FALSE;
2859 
2860 	dhd->hwa_inited = 0;
2861 	dhd->idma_inited = 0;
2862 	dhd->ifrm_inited = 0;
2863 	dhd->dar_inited = 0;
2864 
2865 	/* Common Ring Allocations */
2866 
2867 	/* Ring  0: H2D Control Submission */
2868 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2869 	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2870 	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2871 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2872 			__FUNCTION__));
2873 		goto fail;
2874 	}
2875 
2876 	/* Ring  1: H2D Receive Buffer Post */
2877 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2878 	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2879 	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2880 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2881 			__FUNCTION__));
2882 		goto fail;
2883 	}
2884 
2885 	/* Ring  2: D2H Control Completion */
2886 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2887 	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2888 	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2889 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2890 			__FUNCTION__));
2891 		goto fail;
2892 	}
2893 
2894 	/* Ring  3: D2H Transmit Complete */
2895 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2896 	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2897 	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2898 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2899 			__FUNCTION__));
2900 		goto fail;
2901 
2902 	}
2903 
2904 	/* Ring  4: D2H Receive Complete */
2905 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2906 	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2907 	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2908 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2909 			__FUNCTION__));
2910 		goto fail;
2911 
2912 	}
2913 
2914 	/*
2915 	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2916 	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2917 	 * See dhd_prot_flowrings_pool_attach()
2918 	 */
2919 	/* ioctl response buffer */
2920 	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2921 		goto fail;
2922 	}
2923 
2924 	/* IOCTL request buffer */
2925 	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2926 		goto fail;
2927 	}
2928 
2929 	/* Host TS request buffer one buffer for now */
2930 	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2931 		goto fail;
2932 	}
2933 	prot->hostts_req_buf_inuse = FALSE;
2934 
2935 	/* Scratch buffer for dma rx offset */
2936 #ifdef BCM_HOST_BUF
2937 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2938 		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
2939 #else
2940 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2941 
2942 #endif /* BCM_HOST_BUF */
2943 
2944 		goto fail;
2945 	}
2946 
2947 	/* scratch buffer bus throughput measurement */
2948 	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2949 		goto fail;
2950 	}
2951 
2952 #ifdef DHD_RX_CHAINING
2953 	dhd_rxchain_reset(&prot->rxchain);
2954 #endif // endif
2955 
2956 	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2957 	if (prot->pktid_ctrl_map == NULL) {
2958 		goto fail;
2959 	}
2960 
2961 	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2962 	if (prot->pktid_rx_map == NULL)
2963 		goto fail;
2964 
2965 	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2966 	if (prot->pktid_tx_map == NULL)
2967 		goto fail;
2968 
2969 #ifdef IOCTLRESP_USE_CONSTMEM
2970 	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2971 		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2972 	if (prot->pktid_map_handle_ioctl == NULL) {
2973 		goto fail;
2974 	}
2975 #endif /* IOCTLRESP_USE_CONSTMEM */
2976 
2977 #ifdef DHD_MAP_PKTID_LOGGING
2978 	prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2979 	if (prot->pktid_dma_map == NULL) {
2980 		DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2981 			__FUNCTION__));
2982 	}
2983 
2984 	prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2985 	if (prot->pktid_dma_unmap == NULL) {
2986 		DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2987 			__FUNCTION__));
2988 	}
2989 #endif /* DHD_MAP_PKTID_LOGGING */
2990 
2991 	   /* Initialize the work queues to be used by the Load Balancing logic */
2992 #if defined(DHD_LB_TXC)
2993 	{
2994 		void *buffer;
2995 		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2996 		if (buffer == NULL) {
2997 			DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2998 			goto fail;
2999 		}
3000 		bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
3001 			buffer, DHD_LB_WORKQ_SZ);
3002 		prot->tx_compl_prod_sync = 0;
3003 		DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
3004 			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
3005 	   }
3006 #endif /* DHD_LB_TXC */
3007 
3008 #if defined(DHD_LB_RXC)
3009 	   {
3010 		void *buffer;
3011 		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
3012 		if (buffer == NULL) {
3013 			DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
3014 			goto fail;
3015 		}
3016 		bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
3017 			buffer, DHD_LB_WORKQ_SZ);
3018 		prot->rx_compl_prod_sync = 0;
3019 		DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
3020 			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
3021 	   }
3022 #endif /* DHD_LB_RXC */
3023 
3024 	/* Initialize trap buffer */
3025 	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3026 		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3027 		goto fail;
3028 	}
3029 
3030 	return BCME_OK;
3031 
3032 fail:
3033 
3034 	if (prot) {
3035 		/* Free up all allocated memories */
3036 		dhd_prot_detach(dhd);
3037 	}
3038 
3039 	return BCME_NOMEM;
3040 } /* dhd_prot_attach */
3041 
3042 static int
3043 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3044 {
3045 	int ret = BCME_OK;
3046 	sh_addr_t base_addr;
3047 	dhd_prot_t *prot = dhd->prot;
3048 	uint32 host_scb_size = 0;
3049 
3050 	if (dhd->hscb_enable) {
3051 		/* read number of bytes to allocate from F/W */
3052 		dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3053 		if (host_scb_size) {
3054 			/* alloc array of host scbs */
3055 			ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3056 			/* write host scb address to F/W */
3057 			if (ret == BCME_OK) {
3058 				dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3059 				dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3060 					HOST_SCB_ADDR, 0);
3061 			} else {
3062 				DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
3063 			}
3064 		} else {
3065 			DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
3066 		}
3067 	} else {
3068 		DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
3069 	}
3070 
3071 	return ret;
3072 }
3073 
3074 void
3075 dhd_set_host_cap(dhd_pub_t *dhd)
3076 {
3077 	uint32 data = 0;
3078 	dhd_prot_t *prot = dhd->prot;
3079 
3080 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3081 		if (dhd->h2d_phase_supported) {
3082 			data |= HOSTCAP_H2D_VALID_PHASE;
3083 			if (dhd->force_dongletrap_on_bad_h2d_phase)
3084 				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3085 		}
3086 		if (prot->host_ipc_version > prot->device_ipc_version)
3087 			prot->active_ipc_version = prot->device_ipc_version;
3088 		else
3089 			prot->active_ipc_version = prot->host_ipc_version;
3090 
3091 		data |= prot->active_ipc_version;
3092 
3093 		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3094 			DHD_INFO(("Advertise Hostready Capability\n"));
3095 			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3096 		}
3097 		{
3098 			/* Disable DS altogether */
3099 			data |= HOSTCAP_DS_NO_OOB_DW;
3100 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3101 		}
3102 
3103 		/* Indicate support for extended trap data */
3104 		data |= HOSTCAP_EXTENDED_TRAP_DATA;
3105 
3106 		/* Indicate support for TX status metadata */
3107 		if (dhd->pcie_txs_metadata_enable != 0)
3108 			data |= HOSTCAP_TXSTATUS_METADATA;
3109 
3110 		/* Enable fast delete ring in firmware if supported */
3111 		if (dhd->fast_delete_ring_support) {
3112 			data |= HOSTCAP_FAST_DELETE_RING;
3113 		}
3114 
3115 		if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
3116 			DHD_ERROR(("HWA inited\n"));
3117 			/* TODO: Is hostcap needed? */
3118 			dhd->hwa_inited = TRUE;
3119 		}
3120 
3121 		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3122 			DHD_ERROR(("IDMA inited\n"));
3123 			data |= HOSTCAP_H2D_IDMA;
3124 			dhd->idma_inited = TRUE;
3125 		}
3126 
3127 		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3128 			DHD_ERROR(("IFRM Inited\n"));
3129 			data |= HOSTCAP_H2D_IFRM;
3130 			dhd->ifrm_inited = TRUE;
3131 			dhd->dma_h2d_ring_upd_support = FALSE;
3132 			dhd_prot_dma_indx_free(dhd);
3133 		}
3134 
3135 		if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3136 			DHD_ERROR(("DAR doorbell Use\n"));
3137 			data |= HOSTCAP_H2D_DAR;
3138 			dhd->dar_inited = TRUE;
3139 		}
3140 
3141 		data |= HOSTCAP_UR_FW_NO_TRAP;
3142 
3143 		if (dhd->hscb_enable) {
3144 			data |= HOSTCAP_HSCB;
3145 		}
3146 
3147 #ifdef EWP_EDL
3148 		if (dhd->dongle_edl_support) {
3149 			data |= HOSTCAP_EDL_RING;
3150 			DHD_ERROR(("Enable EDL host cap\n"));
3151 		} else {
3152 			DHD_ERROR(("DO NOT SET EDL host cap\n"));
3153 		}
3154 #endif /* EWP_EDL */
3155 
3156 #ifdef DHD_HP2P
3157 		if (dhd->hp2p_capable) {
3158 			data |= HOSTCAP_PKT_TIMESTAMP;
3159 			data |= HOSTCAP_PKT_HP2P;
3160 			DHD_ERROR(("Enable HP2P in host cap\n"));
3161 		} else {
3162 			DHD_ERROR(("HP2P not enabled in host cap\n"));
3163 		}
3164 #endif // endif
3165 
3166 #ifdef DHD_DB0TS
3167 		if (dhd->db0ts_capable) {
3168 			data |= HOSTCAP_DB0_TIMESTAMP;
3169 			DHD_ERROR(("Enable DB0 TS in host cap\n"));
3170 		} else {
3171 			DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3172 		}
3173 #endif /* DHD_DB0TS */
3174 		if (dhd->extdtxs_in_txcpl) {
3175 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3176 			data |= HOSTCAP_PKT_TXSTATUS;
3177 		}
3178 		else {
3179 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3180 		}
3181 
3182 		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3183 			__FUNCTION__,
3184 			prot->active_ipc_version, prot->host_ipc_version,
3185 			prot->device_ipc_version));
3186 
3187 		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3188 		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3189 			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3190 	}
3191 
3192 }
3193 
3194 /**
3195  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3196  * completed it's initialization of the pcie_shared structure, we may now fetch
3197  * the dongle advertized features and adjust the protocol layer accordingly.
3198  *
3199  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3200  */
3201 int
3202 dhd_prot_init(dhd_pub_t *dhd)
3203 {
3204 	sh_addr_t base_addr;
3205 	dhd_prot_t *prot = dhd->prot;
3206 	int ret = 0;
3207 	uint32 idmacontrol;
3208 	uint32 waitcount = 0;
3209 
3210 #ifdef WL_MONITOR
3211 	dhd->monitor_enable = FALSE;
3212 #endif /* WL_MONITOR */
3213 
3214 	/**
3215 	 * A user defined value can be assigned to global variable h2d_max_txpost via
3216 	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3217 	 * 2. module parameter h2d_max_txpost
3218 	 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
3219 	 * if user has not defined any buffers by one of the above methods.
3220 	 */
3221 	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3222 
3223 	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3224 
3225 	/* Read max rx packets supported by dongle */
3226 	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3227 	if (prot->max_rxbufpost == 0) {
3228 		/* This would happen if the dongle firmware is not */
3229 		/* using the latest shared structure template */
3230 		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3231 	}
3232 	DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3233 
3234 	/* Initialize.  bzero() would blow away the dma pointers. */
3235 	prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
3236 	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3237 	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3238 	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3239 
3240 	prot->cur_ioctlresp_bufs_posted = 0;
3241 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3242 	prot->data_seq_no = 0;
3243 	prot->ioctl_seq_no = 0;
3244 	prot->rxbufpost = 0;
3245 	prot->cur_event_bufs_posted = 0;
3246 	prot->ioctl_state = 0;
3247 	prot->curr_ioctl_cmd = 0;
3248 	prot->cur_ts_bufs_posted = 0;
3249 	prot->infobufpost = 0;
3250 
3251 	prot->dmaxfer.srcmem.va = NULL;
3252 	prot->dmaxfer.dstmem.va = NULL;
3253 	prot->dmaxfer.in_progress = FALSE;
3254 
3255 	prot->metadata_dbg = FALSE;
3256 	prot->rx_metadata_offset = 0;
3257 	prot->tx_metadata_offset = 0;
3258 	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3259 
3260 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3261 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3262 	prot->ioctl_state = 0;
3263 	prot->ioctl_status = 0;
3264 	prot->ioctl_resplen = 0;
3265 	prot->ioctl_received = IOCTL_WAIT;
3266 
3267 	/* Initialize Common MsgBuf Rings */
3268 
3269 	prot->device_ipc_version = dhd->bus->api.fw_rev;
3270 	prot->host_ipc_version = PCIE_SHARED_VERSION;
3271 	prot->no_tx_resource = FALSE;
3272 
3273 	/* Init the host API version */
3274 	dhd_set_host_cap(dhd);
3275 
3276 	/* alloc and configure scb host address for dongle */
3277 	if ((ret = dhd_alloc_host_scbs(dhd))) {
3278 		return ret;
3279 	}
3280 
3281 	/* Register the interrupt function upfront */
3282 	/* remove corerev checks in data path */
3283 	/* do this after host/fw negotiation for DAR */
3284 	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
3285 	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
3286 
3287 	dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
3288 
3289 	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
3290 	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
3291 	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
3292 
3293 	/* Make it compatibile with pre-rev7 Firmware */
3294 	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
3295 		prot->d2hring_tx_cpln.item_len =
3296 			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
3297 		prot->d2hring_rx_cpln.item_len =
3298 			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
3299 	}
3300 	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
3301 	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
3302 
3303 	dhd_prot_d2h_sync_init(dhd);
3304 
3305 	dhd_prot_h2d_sync_init(dhd);
3306 
3307 	/* init the scratch buffer */
3308 	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
3309 	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3310 		D2H_DMA_SCRATCH_BUF, 0);
3311 	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
3312 		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
3313 
3314 	/* If supported by the host, indicate the memory block
3315 	 * for completion writes / submission reads to shared space
3316 	 */
3317 	if (dhd->dma_d2h_ring_upd_support) {
3318 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
3319 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3320 			D2H_DMA_INDX_WR_BUF, 0);
3321 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
3322 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3323 			H2D_DMA_INDX_RD_BUF, 0);
3324 	}
3325 
3326 	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
3327 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
3328 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3329 			H2D_DMA_INDX_WR_BUF, 0);
3330 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
3331 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3332 			D2H_DMA_INDX_RD_BUF, 0);
3333 	}
3334 	/* Signal to the dongle that common ring init is complete */
3335 	if (dhd->hostrdy_after_init)
3336 		dhd_bus_hostready(dhd->bus);
3337 
3338 	/*
3339 	 * If the DMA-able buffers for flowring needs to come from a specific
3340 	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
3341 	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
3342 	 * this contiguous memory region, for each of the flowrings.
3343 	 */
3344 
3345 	/* Pre-allocate pool of msgbuf_ring for flowrings */
3346 	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
3347 		return BCME_ERROR;
3348 	}
3349 
3350 	/* If IFRM is enabled, wait for FW to setup the DMA channel */
3351 	if (IFRM_ENAB(dhd)) {
3352 		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
3353 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3354 			H2D_IFRM_INDX_WR_BUF, 0);
3355 	}
3356 
3357 	/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
3358 	 * Waiting just before configuring doorbell
3359 	 */
3360 #ifdef BCMQT
3361 #define	IDMA_ENABLE_WAIT  100
3362 #else
3363 #define	IDMA_ENABLE_WAIT  10
3364 #endif // endif
3365 	if (IDMA_ACTIVE(dhd)) {
3366 		/* wait for idma_en bit in IDMAcontrol register to be set */
3367 		/* Loop till idma_en is not set */
3368 		uint buscorerev = dhd->bus->sih->buscorerev;
3369 		idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3370 			IDMAControl(buscorerev), 0, 0);
3371 		while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
3372 			(waitcount++ < IDMA_ENABLE_WAIT)) {
3373 
3374 			DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
3375 				waitcount, idmacontrol));
3376 #ifdef BCMQT
3377 			OSL_DELAY(200000); /* 200msec for BCMQT  */
3378 #else
3379 			OSL_DELAY(1000); /* 1ms as its onetime only */
3380 #endif // endif
3381 			idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3382 				IDMAControl(buscorerev), 0, 0);
3383 		}
3384 
3385 		if (waitcount < IDMA_ENABLE_WAIT) {
3386 			DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
3387 		} else {
3388 			DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
3389 				waitcount, idmacontrol));
3390 			return BCME_ERROR;
3391 		}
3392 	}
3393 
3394 	/* Host should configure soft doorbells if needed ... here */
3395 
3396 	/* Post to dongle host configured soft doorbells */
3397 	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
3398 
3399 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
3400 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3401 
3402 	prot->no_retry = FALSE;
3403 	prot->no_aggr = FALSE;
3404 	prot->fixed_rate = FALSE;
3405 
3406 	/*
3407 	 * Note that any communication with the Dongle should be added
3408 	 * below this point. Any other host data structure initialiation that
3409 	 * needs to be done prior to the DPC starts executing should be done
3410 	 * befor this point.
3411 	 * Because once we start sending H2D requests to Dongle, the Dongle
3412 	 * respond immediately. So the DPC context to handle this
3413 	 * D2H response could preempt the context in which dhd_prot_init is running.
3414 	 * We want to ensure that all the Host part of dhd_prot_init is
3415 	 * done before that.
3416 	 */
3417 
3418 	/* See if info rings could be created, info rings should be created
3419 	* only if dongle does not support EDL
3420 	*/
3421 #ifdef EWP_EDL
3422 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
3423 #else
3424 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
3425 #endif /* EWP_EDL */
3426 	{
3427 		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
3428 			/* For now log and proceed, further clean up action maybe necessary
3429 			 * when we have more clarity.
3430 			 */
3431 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
3432 				__FUNCTION__, ret));
3433 		}
3434 	}
3435 
3436 #ifdef EWP_EDL
3437 	/* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
3438 	if (dhd->dongle_edl_support) {
3439 		if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
3440 			DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
3441 				__FUNCTION__, ret));
3442 		}
3443 	}
3444 #endif /* EWP_EDL */
3445 
3446 #ifdef DHD_HP2P
3447 	/* create HPP txcmpl/rxcmpl rings */
3448 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
3449 		if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
3450 			/* For now log and proceed, further clean up action maybe necessary
3451 			 * when we have more clarity.
3452 			 */
3453 			DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
3454 				__FUNCTION__, ret));
3455 		}
3456 	}
3457 #endif /* DHD_HP2P */
3458 
3459 	return BCME_OK;
3460 } /* dhd_prot_init */
3461 
3462 /**
3463  * dhd_prot_detach - PCIE FD protocol layer destructor.
3464  * Unlink, frees allocated protocol memory (including dhd_prot)
3465  */
3466 void dhd_prot_detach(dhd_pub_t *dhd)
3467 {
3468 	dhd_prot_t *prot = dhd->prot;
3469 
3470 	/* Stop the protocol module */
3471 	if (prot) {
3472 
3473 		/* free up all DMA-able buffers allocated during prot attach/init */
3474 
3475 		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
3476 		dhd_dma_buf_free(dhd, &prot->retbuf);
3477 		dhd_dma_buf_free(dhd, &prot->ioctbuf);
3478 		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3479 		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3480 		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3481 		dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3482 
3483 		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3484 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
3485 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
3486 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
3487 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3488 
3489 		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
3490 
3491 		/* Common MsgBuf Rings */
3492 		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
3493 		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
3494 		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
3495 		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
3496 		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
3497 
3498 		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3499 		dhd_prot_flowrings_pool_detach(dhd);
3500 
3501 		/* detach info rings */
3502 		dhd_prot_detach_info_rings(dhd);
3503 
3504 #ifdef EWP_EDL
3505 		dhd_prot_detach_edl_rings(dhd);
3506 #endif // endif
3507 #ifdef DHD_HP2P
3508 		/* detach HPP rings */
3509 		dhd_prot_detach_hp2p_rings(dhd);
3510 #endif /* DHD_HP2P */
3511 
3512 		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3513 		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3514 		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3515 		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3516 		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3517 		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3518 		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3519 		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3520 		 */
3521 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3522 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3523 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3524 #ifdef IOCTLRESP_USE_CONSTMEM
3525 		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3526 #endif // endif
3527 #ifdef DHD_MAP_PKTID_LOGGING
3528 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3529 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3530 #endif /* DHD_MAP_PKTID_LOGGING */
3531 
3532 #if defined(DHD_LB_TXC)
3533 		if (prot->tx_compl_prod.buffer)
3534 			MFREE(dhd->osh, prot->tx_compl_prod.buffer,
3535 			      sizeof(void*) * DHD_LB_WORKQ_SZ);
3536 #endif /* DHD_LB_TXC */
3537 #if defined(DHD_LB_RXC)
3538 		if (prot->rx_compl_prod.buffer)
3539 			MFREE(dhd->osh, prot->rx_compl_prod.buffer,
3540 			      sizeof(void*) * DHD_LB_WORKQ_SZ);
3541 #endif /* DHD_LB_RXC */
3542 
3543 		DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
3544 
3545 		dhd->prot = NULL;
3546 	}
3547 } /* dhd_prot_detach */
3548 
3549 /**
3550  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3551  * This may be invoked to soft reboot the dongle, without having to
3552  * detach and attach the entire protocol layer.
3553  *
3554  * After dhd_prot_reset(), dhd_prot_init() may be invoked
3555  * without going througha dhd_prot_attach() phase.
3556  */
3557 void
3558 dhd_prot_reset(dhd_pub_t *dhd)
3559 {
3560 	struct dhd_prot *prot = dhd->prot;
3561 
3562 	DHD_TRACE(("%s\n", __FUNCTION__));
3563 
3564 	if (prot == NULL) {
3565 		return;
3566 	}
3567 
3568 	dhd_prot_flowrings_pool_reset(dhd);
3569 
3570 	/* Reset Common MsgBuf Rings */
3571 	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
3572 	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
3573 	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
3574 	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
3575 	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
3576 
3577 	/* Reset info rings */
3578 	if (prot->h2dring_info_subn) {
3579 		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3580 	}
3581 
3582 	if (prot->d2hring_info_cpln) {
3583 		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3584 	}
3585 #ifdef EWP_EDL
3586 	if (prot->d2hring_edl) {
3587 		dhd_prot_ring_reset(dhd, prot->d2hring_edl);
3588 	}
3589 #endif /* EWP_EDL */
3590 
3591 	/* Reset all DMA-able buffers allocated during prot attach */
3592 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3593 	dhd_dma_buf_reset(dhd, &prot->retbuf);
3594 	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
3595 	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3596 	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3597 	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3598 	dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
3599 
3600 	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3601 
3602 	/* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3603 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
3604 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
3605 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
3606 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
3607 
3608 	prot->rx_metadata_offset = 0;
3609 	prot->tx_metadata_offset = 0;
3610 
3611 	prot->rxbufpost = 0;
3612 	prot->cur_event_bufs_posted = 0;
3613 	prot->cur_ioctlresp_bufs_posted = 0;
3614 
3615 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3616 	prot->data_seq_no = 0;
3617 	prot->ioctl_seq_no = 0;
3618 	prot->ioctl_state = 0;
3619 	prot->curr_ioctl_cmd = 0;
3620 	prot->ioctl_received = IOCTL_WAIT;
3621 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3622 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3623 
3624 	/* dhd_flow_rings_init is located at dhd_bus_start,
3625 	 * so when stopping bus, flowrings shall be deleted
3626 	 */
3627 	if (dhd->flow_rings_inited) {
3628 		dhd_flow_rings_deinit(dhd);
3629 	}
3630 
3631 #ifdef DHD_HP2P
3632 	if (prot->d2hring_hp2p_txcpl) {
3633 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
3634 	}
3635 	if (prot->d2hring_hp2p_rxcpl) {
3636 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
3637 	}
3638 #endif /* DHD_HP2P */
3639 
3640 	/* Reset PKTID map */
3641 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3642 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3643 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
3644 #ifdef IOCTLRESP_USE_CONSTMEM
3645 	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3646 #endif /* IOCTLRESP_USE_CONSTMEM */
3647 #ifdef DMAMAP_STATS
3648 	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3649 	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3650 #ifndef IOCTLRESP_USE_CONSTMEM
3651 	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3652 #endif /* IOCTLRESP_USE_CONSTMEM */
3653 	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3654 	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3655 	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3656 #endif /* DMAMAP_STATS */
3657 } /* dhd_prot_reset */
3658 
3659 #if defined(DHD_LB_RXP)
3660 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
3661 #else /* !DHD_LB_RXP */
3662 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
3663 #endif /* !DHD_LB_RXP */
3664 
3665 #if defined(DHD_LB_RXC)
3666 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)	dhd_lb_dispatch_rx_compl(dhdp)
3667 #else /* !DHD_LB_RXC */
3668 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)	do { /* noop */ } while (0)
3669 #endif /* !DHD_LB_RXC */
3670 
3671 #if defined(DHD_LB_TXC)
3672 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)	dhd_lb_dispatch_tx_compl(dhdp)
3673 #else /* !DHD_LB_TXC */
3674 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)	do { /* noop */ } while (0)
3675 #endif /* !DHD_LB_TXC */
3676 
3677 #if defined(DHD_LB)
3678 /* DHD load balancing: deferral of work to another online CPU */
3679 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3680 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
3681 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
3682 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
3683 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
3684 
3685 #if defined(DHD_LB_RXP)
3686 /**
3687  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3688  * to other CPU cores
3689  */
3690 static INLINE void
3691 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
3692 {
3693 	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3694 }
3695 #endif /* DHD_LB_RXP */
3696 
3697 #if defined(DHD_LB_TXC)
3698 /**
3699  * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3700  * to other CPU cores
3701  */
3702 static INLINE void
3703 dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3704 {
3705 	bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3706 	dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
3707 }
3708 
3709 /**
3710  * DHD load balanced tx completion tasklet handler, that will perform the
3711  * freeing of packets on the selected CPU. Packet pointers are delivered to
3712  * this tasklet via the tx complete workq.
3713  */
3714 void
3715 dhd_lb_tx_compl_handler(unsigned long data)
3716 {
3717 	int elem_ix;
3718 	void *pkt, **elem;
3719 	dmaaddr_t pa;
3720 	uint32 pa_len;
3721 	dhd_pub_t *dhd = (dhd_pub_t *)data;
3722 	dhd_prot_t *prot = dhd->prot;
3723 	bcm_workq_t *workq = &prot->tx_compl_cons;
3724 	uint32 count = 0;
3725 
3726 	int curr_cpu;
3727 	curr_cpu = get_cpu();
3728 	put_cpu();
3729 
3730 	DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
3731 
3732 	while (1) {
3733 		elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3734 
3735 		if (elem_ix == BCM_RING_EMPTY) {
3736 			break;
3737 		}
3738 
3739 		elem = WORKQ_ELEMENT(void *, workq, elem_ix);
3740 		pkt = *elem;
3741 
3742 		DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
3743 
3744 		OSL_PREFETCH(PKTTAG(pkt));
3745 		OSL_PREFETCH(pkt);
3746 
3747 		pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
3748 		pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
3749 
3750 		DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
3751 #if defined(BCMPCIE)
3752 		dhd_txcomplete(dhd, pkt, true);
3753 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
3754 		dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
3755 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
3756 #endif // endif
3757 
3758 		PKTFREE(dhd->osh, pkt, TRUE);
3759 		count++;
3760 	}
3761 
3762 	/* smp_wmb(); */
3763 	bcm_workq_cons_sync(workq);
3764 	DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
3765 }
3766 #endif /* DHD_LB_TXC */
3767 
3768 #if defined(DHD_LB_RXC)
3769 
3770 /**
3771  * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3772  * to other CPU cores
3773  */
3774 static INLINE void
3775 dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3776 {
3777 	dhd_prot_t *prot = dhdp->prot;
3778 	/* Schedule the takslet only if we have to */
3779 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3780 		/* flush WR index */
3781 		bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3782 		dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3783 	}
3784 }
3785 
3786 void
3787 dhd_lb_rx_compl_handler(unsigned long data)
3788 {
3789 	dhd_pub_t *dhd = (dhd_pub_t *)data;
3790 	bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
3791 
3792 	DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
3793 
3794 	dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
3795 	bcm_workq_cons_sync(workq);
3796 }
3797 #endif /* DHD_LB_RXC */
3798 #endif /* DHD_LB */
3799 
3800 void
3801 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3802 {
3803 	dhd_prot_t *prot = dhd->prot;
3804 	prot->rx_dataoffset = rx_offset;
3805 }
3806 
3807 static int
3808 dhd_check_create_info_rings(dhd_pub_t *dhd)
3809 {
3810 	dhd_prot_t *prot = dhd->prot;
3811 	int ret = BCME_ERROR;
3812 	uint16 ringid;
3813 
3814 	{
3815 		/* dongle may increase max_submission_rings so keep
3816 		 * ringid at end of dynamic rings
3817 		 */
3818 		ringid = dhd->bus->max_tx_flowrings +
3819 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3820 			BCMPCIE_H2D_COMMON_MSGRINGS;
3821 	}
3822 
3823 	if (prot->d2hring_info_cpln) {
3824 		/* for d2hring re-entry case, clear inited flag */
3825 		prot->d2hring_info_cpln->inited = FALSE;
3826 	}
3827 
3828 	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3829 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3830 	}
3831 
3832 	if (prot->h2dring_info_subn == NULL) {
3833 		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3834 
3835 		if (prot->h2dring_info_subn == NULL) {
3836 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3837 				__FUNCTION__));
3838 			return BCME_NOMEM;
3839 		}
3840 
3841 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3842 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3843 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
3844 			ringid);
3845 		if (ret != BCME_OK) {
3846 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3847 				__FUNCTION__));
3848 			goto err;
3849 		}
3850 	}
3851 
3852 	if (prot->d2hring_info_cpln == NULL) {
3853 		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3854 
3855 		if (prot->d2hring_info_cpln == NULL) {
3856 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3857 				__FUNCTION__));
3858 			return BCME_NOMEM;
3859 		}
3860 
3861 		/* create the debug info completion ring next to debug info submit ring
3862 		* ringid = id next to debug info submit ring
3863 		*/
3864 		ringid = ringid + 1;
3865 
3866 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3867 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3868 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3869 			ringid);
3870 		if (ret != BCME_OK) {
3871 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3872 				__FUNCTION__));
3873 			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3874 			goto err;
3875 		}
3876 	}
3877 
3878 	return ret;
3879 err:
3880 	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3881 	prot->h2dring_info_subn = NULL;
3882 
3883 	if (prot->d2hring_info_cpln) {
3884 		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3885 		prot->d2hring_info_cpln = NULL;
3886 	}
3887 	return ret;
3888 } /* dhd_check_create_info_rings */
3889 
3890 int
3891 dhd_prot_init_info_rings(dhd_pub_t *dhd)
3892 {
3893 	dhd_prot_t *prot = dhd->prot;
3894 	int ret = BCME_OK;
3895 
3896 	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3897 		DHD_ERROR(("%s: info rings aren't created! \n",
3898 			__FUNCTION__));
3899 		return ret;
3900 	}
3901 
3902 	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3903 		DHD_INFO(("Info completion ring was created!\n"));
3904 		return ret;
3905 	}
3906 
3907 	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3908 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3909 		BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
3910 	if (ret != BCME_OK)
3911 		return ret;
3912 
3913 	prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
3914 	prot->h2dring_info_subn->current_phase = 0;
3915 	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3916 	prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3917 
3918 	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3919 	prot->h2dring_info_subn->n_completion_ids = 1;
3920 	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3921 
3922 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3923 		BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
3924 
3925 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3926 	 * so can not cleanup if one ring was created while the other failed
3927 	 */
3928 	return ret;
3929 } /* dhd_prot_init_info_rings */
3930 
3931 static void
3932 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3933 {
3934 	if (dhd->prot->h2dring_info_subn) {
3935 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3936 		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3937 		dhd->prot->h2dring_info_subn = NULL;
3938 	}
3939 	if (dhd->prot->d2hring_info_cpln) {
3940 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3941 		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3942 		dhd->prot->d2hring_info_cpln = NULL;
3943 	}
3944 }
3945 
3946 #ifdef DHD_HP2P
3947 static int
3948 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
3949 {
3950 	dhd_prot_t *prot = dhd->prot;
3951 	int ret = BCME_ERROR;
3952 	uint16 ringid;
3953 
3954 	/* Last 2 dynamic ring indices are used by hp2p rings */
3955 	ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
3956 
3957 	if (prot->d2hring_hp2p_txcpl == NULL) {
3958 		prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3959 
3960 		if (prot->d2hring_hp2p_txcpl == NULL) {
3961 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
3962 				__FUNCTION__));
3963 			return BCME_NOMEM;
3964 		}
3965 
3966 		DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
3967 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
3968 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
3969 			ringid);
3970 		if (ret != BCME_OK) {
3971 			DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
3972 				__FUNCTION__));
3973 			goto err2;
3974 		}
3975 	} else {
3976 		/* for re-entry case, clear inited flag */
3977 		prot->d2hring_hp2p_txcpl->inited = FALSE;
3978 	}
3979 	if (prot->d2hring_hp2p_rxcpl == NULL) {
3980 		prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3981 
3982 		if (prot->d2hring_hp2p_rxcpl == NULL) {
3983 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
3984 				__FUNCTION__));
3985 			return BCME_NOMEM;
3986 		}
3987 
3988 		/* create the hp2p rx completion ring next to hp2p tx compl ring
3989 		* ringid = id next to hp2p tx compl ring
3990 		*/
3991 		ringid = ringid + 1;
3992 
3993 		DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
3994 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
3995 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
3996 			ringid);
3997 		if (ret != BCME_OK) {
3998 			DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
3999 				__FUNCTION__));
4000 			goto err1;
4001 		}
4002 	} else {
4003 		/* for re-entry case, clear inited flag */
4004 		prot->d2hring_hp2p_rxcpl->inited = FALSE;
4005 	}
4006 
4007 	return ret;
4008 err1:
4009 	MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4010 	prot->d2hring_hp2p_rxcpl = NULL;
4011 
4012 err2:
4013 	MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4014 	prot->d2hring_hp2p_txcpl = NULL;
4015 	return ret;
4016 } /* dhd_check_create_hp2p_rings */
4017 
4018 int
4019 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4020 {
4021 	dhd_prot_t *prot = dhd->prot;
4022 	int ret = BCME_OK;
4023 
4024 	dhd->hp2p_ring_active = FALSE;
4025 
4026 	if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4027 		DHD_ERROR(("%s: hp2p rings aren't created! \n",
4028 			__FUNCTION__));
4029 		return ret;
4030 	}
4031 
4032 	if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4033 		DHD_INFO(("hp2p tx completion ring was created!\n"));
4034 		return ret;
4035 	}
4036 
4037 	DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4038 		prot->d2hring_hp2p_txcpl->idx));
4039 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4040 		BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4041 	if (ret != BCME_OK)
4042 		return ret;
4043 
4044 	prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4045 	prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4046 
4047 	if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4048 		DHD_INFO(("hp2p rx completion ring was created!\n"));
4049 		return ret;
4050 	}
4051 
4052 	DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4053 		prot->d2hring_hp2p_rxcpl->idx));
4054 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4055 		BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4056 	if (ret != BCME_OK)
4057 		return ret;
4058 
4059 	prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4060 	prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4061 
4062 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4063 	 * so can not cleanup if one ring was created while the other failed
4064 	 */
4065 	return BCME_OK;
4066 } /* dhd_prot_init_hp2p_rings */
4067 
4068 static void
4069 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4070 {
4071 	if (dhd->prot->d2hring_hp2p_txcpl) {
4072 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4073 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4074 		dhd->prot->d2hring_hp2p_txcpl = NULL;
4075 	}
4076 	if (dhd->prot->d2hring_hp2p_rxcpl) {
4077 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4078 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4079 		dhd->prot->d2hring_hp2p_rxcpl = NULL;
4080 	}
4081 }
4082 #endif /* DHD_HP2P */
4083 
4084 #ifdef EWP_EDL
4085 static int
4086 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4087 {
4088 	dhd_prot_t *prot = dhd->prot;
4089 	int ret = BCME_ERROR;
4090 	uint16 ringid;
4091 
4092 	{
4093 		/* dongle may increase max_submission_rings so keep
4094 		 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4095 		 */
4096 		ringid = dhd->bus->max_tx_flowrings +
4097 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4098 			BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4099 	}
4100 
4101 	if (prot->d2hring_edl) {
4102 		prot->d2hring_edl->inited = FALSE;
4103 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4104 	}
4105 
4106 	if (prot->d2hring_edl == NULL) {
4107 		prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4108 
4109 		if (prot->d2hring_edl == NULL) {
4110 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4111 				__FUNCTION__));
4112 			return BCME_NOMEM;
4113 		}
4114 
4115 		DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4116 			ringid));
4117 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4118 			D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4119 			ringid);
4120 		if (ret != BCME_OK) {
4121 			DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4122 				__FUNCTION__));
4123 			goto err;
4124 		}
4125 	}
4126 
4127 	return ret;
4128 err:
4129 	MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4130 	prot->d2hring_edl = NULL;
4131 
4132 	return ret;
4133 } /* dhd_check_create_btlog_rings */
4134 
4135 int
4136 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4137 {
4138 	dhd_prot_t *prot = dhd->prot;
4139 	int ret = BCME_ERROR;
4140 
4141 	if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4142 		DHD_ERROR(("%s: EDL rings aren't created! \n",
4143 			__FUNCTION__));
4144 		return ret;
4145 	}
4146 
4147 	if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4148 		DHD_INFO(("EDL completion ring was created!\n"));
4149 		return ret;
4150 	}
4151 
4152 	DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4153 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4154 		BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4155 	if (ret != BCME_OK)
4156 		return ret;
4157 
4158 	prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4159 	prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4160 
4161 	return BCME_OK;
4162 } /* dhd_prot_init_btlog_rings */
4163 
4164 static void
4165 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
4166 {
4167 	if (dhd->prot->d2hring_edl) {
4168 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
4169 		MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
4170 		dhd->prot->d2hring_edl = NULL;
4171 	}
4172 }
4173 #endif	/* EWP_EDL */
4174 
4175 /**
4176  * Initialize protocol: sync w/dongle state.
4177  * Sets dongle media info (iswl, drv_version, mac address).
4178  */
4179 int dhd_sync_with_dongle(dhd_pub_t *dhd)
4180 {
4181 	int ret = 0;
4182 	wlc_rev_info_t revinfo;
4183 	char buf[128];
4184 	dhd_prot_t *prot = dhd->prot;
4185 
4186 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4187 
4188 	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4189 
4190 	/* Post ts buffer after shim layer is attached */
4191 	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
4192 
4193 #ifndef OEM_ANDROID
4194 	/* Get the device MAC address */
4195 	memset(buf, 0, sizeof(buf));
4196 	strncpy(buf, "cur_etheraddr", sizeof(buf) - 1);
4197 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4198 	if (ret < 0) {
4199 		DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
4200 		goto done;
4201 	}
4202 	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
4203 	if (dhd_msg_level & DHD_INFO_VAL) {
4204 		bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
4205 	}
4206 #endif /* OEM_ANDROID */
4207 
4208 #ifdef DHD_FW_COREDUMP
4209 	/* Check the memdump capability */
4210 	dhd_get_memdump_info(dhd);
4211 #endif /* DHD_FW_COREDUMP */
4212 #ifdef BCMASSERT_LOG
4213 	dhd_get_assert_info(dhd);
4214 #endif /* BCMASSERT_LOG */
4215 
4216 	/* Get the device rev info */
4217 	memset(&revinfo, 0, sizeof(revinfo));
4218 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
4219 	if (ret < 0) {
4220 		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
4221 		goto done;
4222 	}
4223 	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
4224 		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
4225 
4226 	/* Get the RxBuf post size */
4227 	memset(buf, 0, sizeof(buf));
4228 	bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
4229 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4230 	if (ret < 0) {
4231 		DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
4232 			__FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4233 		prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4234 	} else {
4235 		memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
4236 		if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
4237 			DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
4238 				__FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4239 			prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4240 		} else {
4241 			DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
4242 		}
4243 	}
4244 
4245 	/* Post buffers for packet reception */
4246 	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4247 
4248 	DHD_SSSR_DUMP_INIT(dhd);
4249 
4250 	dhd_process_cid_mac(dhd, TRUE);
4251 	ret = dhd_preinit_ioctls(dhd);
4252 	dhd_process_cid_mac(dhd, FALSE);
4253 
4254 #if defined(DHD_H2D_LOG_TIME_SYNC)
4255 #ifdef DHD_HP2P
4256 	if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
4257 		if (dhd->hp2p_enable) {
4258 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
4259 		} else {
4260 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4261 		}
4262 #else
4263 	if (FW_SUPPORTED(dhd, h2dlogts)) {
4264 		dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4265 #endif // endif
4266 		dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
4267 		/* This is during initialization. */
4268 		dhd_h2d_log_time_sync(dhd);
4269 	} else {
4270 		dhd->dhd_rte_time_sync_ms = 0;
4271 	}
4272 #endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
4273 	/* Always assumes wl for now */
4274 	dhd->iswl = TRUE;
4275 done:
4276 	return ret;
4277 } /* dhd_sync_with_dongle */
4278 
4279 #define DHD_DBG_SHOW_METADATA	0
4280 
4281 #if DHD_DBG_SHOW_METADATA
4282 static void BCMFASTPATH
4283 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
4284 {
4285 	uint8 tlv_t;
4286 	uint8 tlv_l;
4287 	uint8 *tlv_v = (uint8 *)ptr;
4288 
4289 	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
4290 		return;
4291 
4292 	len -= BCMPCIE_D2H_METADATA_HDRLEN;
4293 	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
4294 
4295 	while (len > TLV_HDR_LEN) {
4296 		tlv_t = tlv_v[TLV_TAG_OFF];
4297 		tlv_l = tlv_v[TLV_LEN_OFF];
4298 
4299 		len -= TLV_HDR_LEN;
4300 		tlv_v += TLV_HDR_LEN;
4301 		if (len < tlv_l)
4302 			break;
4303 		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
4304 			break;
4305 
4306 		switch (tlv_t) {
4307 		case WLFC_CTL_TYPE_TXSTATUS: {
4308 			uint32 txs;
4309 			memcpy(&txs, tlv_v, sizeof(uint32));
4310 			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
4311 				printf("METADATA TX_STATUS: %08x\n", txs);
4312 			} else {
4313 				wl_txstatus_additional_info_t tx_add_info;
4314 				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
4315 					sizeof(wl_txstatus_additional_info_t));
4316 				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
4317 					" rate = %08x tries = %d - %d\n", txs,
4318 					tx_add_info.seq, tx_add_info.entry_ts,
4319 					tx_add_info.enq_ts, tx_add_info.last_ts,
4320 					tx_add_info.rspec, tx_add_info.rts_cnt,
4321 					tx_add_info.tx_cnt);
4322 			}
4323 			} break;
4324 
4325 		case WLFC_CTL_TYPE_RSSI: {
4326 			if (tlv_l == 1)
4327 				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
4328 			else
4329 				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
4330 					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
4331 					(int8)(*tlv_v), *(tlv_v + 1));
4332 			} break;
4333 
4334 		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
4335 			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
4336 			break;
4337 
4338 		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
4339 			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
4340 			break;
4341 
4342 		case WLFC_CTL_TYPE_RX_STAMP: {
4343 			struct {
4344 				uint32 rspec;
4345 				uint32 bus_time;
4346 				uint32 wlan_time;
4347 			} rx_tmstamp;
4348 			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
4349 			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
4350 				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
4351 			} break;
4352 
4353 		case WLFC_CTL_TYPE_TRANS_ID:
4354 			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
4355 			break;
4356 
4357 		case WLFC_CTL_TYPE_COMP_TXSTATUS:
4358 			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
4359 			break;
4360 
4361 		default:
4362 			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
4363 			break;
4364 		}
4365 
4366 		len -= tlv_l;
4367 		tlv_v += tlv_l;
4368 	}
4369 }
4370 #endif /* DHD_DBG_SHOW_METADATA */
4371 
4372 static INLINE void BCMFASTPATH
4373 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
4374 {
4375 	if (pkt) {
4376 		if (pkttype == PKTTYPE_IOCTL_RX ||
4377 			pkttype == PKTTYPE_EVENT_RX ||
4378 			pkttype == PKTTYPE_INFO_RX ||
4379 			pkttype == PKTTYPE_TSBUF_RX) {
4380 #ifdef DHD_USE_STATIC_CTRLBUF
4381 			PKTFREE_STATIC(dhd->osh, pkt, send);
4382 #else
4383 			PKTFREE(dhd->osh, pkt, send);
4384 #endif /* DHD_USE_STATIC_CTRLBUF */
4385 		} else {
4386 			PKTFREE(dhd->osh, pkt, send);
4387 		}
4388 	}
4389 }
4390 
4391 /**
4392  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
4393  * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
4394  * to ensure thread safety, so no need to hold any locks for this function
4395  */
4396 static INLINE void * BCMFASTPATH
4397 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
4398 {
4399 	void *PKTBUF;
4400 	dmaaddr_t pa;
4401 	uint32 len;
4402 	void *dmah;
4403 	void *secdma;
4404 
4405 #ifdef DHD_PCIE_PKTID
4406 	if (free_pktid) {
4407 		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
4408 			pktid, pa, len, dmah, secdma, pkttype);
4409 	} else {
4410 		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
4411 			pktid, pa, len, dmah, secdma, pkttype);
4412 	}
4413 #else
4414 	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
4415 		len, dmah, secdma, pkttype);
4416 #endif /* DHD_PCIE_PKTID */
4417 	if (PKTBUF) {
4418 		{
4419 			if (SECURE_DMA_ENAB(dhd->osh))
4420 				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
4421 					secdma, 0);
4422 			else
4423 				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4424 #ifdef DMAMAP_STATS
4425 			switch (pkttype) {
4426 #ifndef IOCTLRESP_USE_CONSTMEM
4427 				case PKTTYPE_IOCTL_RX:
4428 					dhd->dma_stats.ioctl_rx--;
4429 					dhd->dma_stats.ioctl_rx_sz -= len;
4430 					break;
4431 #endif /* IOCTLRESP_USE_CONSTMEM */
4432 				case PKTTYPE_EVENT_RX:
4433 					dhd->dma_stats.event_rx--;
4434 					dhd->dma_stats.event_rx_sz -= len;
4435 					break;
4436 				case PKTTYPE_INFO_RX:
4437 					dhd->dma_stats.info_rx--;
4438 					dhd->dma_stats.info_rx_sz -= len;
4439 					break;
4440 				case PKTTYPE_TSBUF_RX:
4441 					dhd->dma_stats.tsbuf_rx--;
4442 					dhd->dma_stats.tsbuf_rx_sz -= len;
4443 					break;
4444 			}
4445 #endif /* DMAMAP_STATS */
4446 		}
4447 	}
4448 
4449 	return PKTBUF;
4450 }
4451 
4452 #ifdef IOCTLRESP_USE_CONSTMEM
4453 static INLINE void BCMFASTPATH
4454 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
4455 {
4456 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4457 	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
4458 		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
4459 
4460 	return;
4461 }
4462 #endif // endif
4463 
4464 static void BCMFASTPATH
4465 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
4466 {
4467 	dhd_prot_t *prot = dhd->prot;
4468 	int16 fillbufs;
4469 	uint16 cnt = 256;
4470 	int retcount = 0;
4471 
4472 	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4473 	while (fillbufs >= RX_BUF_BURST) {
4474 		cnt--;
4475 		if (cnt == 0) {
4476 			/* find a better way to reschedule rx buf post if space not available */
4477 			DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
4478 			DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
4479 			break;
4480 		}
4481 
4482 		/* Post in a burst of 32 buffers at a time */
4483 		fillbufs = MIN(fillbufs, RX_BUF_BURST);
4484 
4485 		/* Post buffers */
4486 		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
4487 
4488 		if (retcount >= 0) {
4489 			prot->rxbufpost += (uint16)retcount;
4490 #ifdef DHD_LB_RXC
4491 			/* dhd_prot_rxbuf_post returns the number of buffers posted */
4492 			DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
4493 #endif /* DHD_LB_RXC */
4494 			/* how many more to post */
4495 			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4496 		} else {
4497 			/* Make sure we don't run loop any further */
4498 			fillbufs = 0;
4499 		}
4500 	}
4501 }
4502 
4503 /** Post 'count' no of rx buffers to dongle */
4504 static int BCMFASTPATH
4505 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
4506 {
4507 	void *p, **pktbuf;
4508 	uint8 *rxbuf_post_tmp;
4509 	host_rxbuf_post_t *rxbuf_post;
4510 	void *msg_start;
4511 	dmaaddr_t pa, *pktbuf_pa;
4512 	uint32 *pktlen;
4513 	uint16 i = 0, alloced = 0;
4514 	unsigned long flags;
4515 	uint32 pktid;
4516 	dhd_prot_t *prot = dhd->prot;
4517 	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
4518 	void *lcl_buf;
4519 	uint16 lcl_buf_size;
4520 	uint16 pktsz = prot->rxbufpost_sz;
4521 
4522 	/* allocate a local buffer to store pkt buffer va, pa and length */
4523 	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
4524 		RX_BUF_BURST;
4525 	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
4526 	if (!lcl_buf) {
4527 		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
4528 		return 0;
4529 	}
4530 	pktbuf = lcl_buf;
4531 	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
4532 	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
4533 
4534 	for (i = 0; i < count; i++) {
4535 		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
4536 			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
4537 			dhd->rx_pktgetfail++;
4538 			break;
4539 		}
4540 
4541 		pktlen[i] = PKTLEN(dhd->osh, p);
4542 		if (SECURE_DMA_ENAB(dhd->osh)) {
4543 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
4544 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4545 		}
4546 #ifndef BCM_SECURE_DMA
4547 		else
4548 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
4549 #endif /* #ifndef BCM_SECURE_DMA */
4550 
4551 		if (PHYSADDRISZERO(pa)) {
4552 			PKTFREE(dhd->osh, p, FALSE);
4553 			DHD_ERROR(("Invalid phyaddr 0\n"));
4554 			ASSERT(0);
4555 			break;
4556 		}
4557 #ifdef DMAMAP_STATS
4558 		dhd->dma_stats.rxdata++;
4559 		dhd->dma_stats.rxdata_sz += pktlen[i];
4560 #endif /* DMAMAP_STATS */
4561 
4562 		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
4563 		pktlen[i] = PKTLEN(dhd->osh, p);
4564 		pktbuf[i] = p;
4565 		pktbuf_pa[i] = pa;
4566 	}
4567 
4568 	/* only post what we have */
4569 	count = i;
4570 
4571 	/* grab the ring lock to allocate pktid and post on ring */
4572 	DHD_RING_LOCK(ring->ring_lock, flags);
4573 
4574 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
4575 	msg_start = (void *)
4576 		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
4577 	if (msg_start == NULL) {
4578 		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4579 		DHD_RING_UNLOCK(ring->ring_lock, flags);
4580 		goto cleanup;
4581 	}
4582 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4583 	ASSERT(alloced > 0);
4584 
4585 	rxbuf_post_tmp = (uint8*)msg_start;
4586 
4587 	for (i = 0; i < alloced; i++) {
4588 		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
4589 		p = pktbuf[i];
4590 		pa = pktbuf_pa[i];
4591 
4592 #if defined(DHD_LB_RXC)
4593 		if (use_rsv_pktid == TRUE) {
4594 			bcm_workq_t *workq = &prot->rx_compl_cons;
4595 			int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4596 
4597 			if (elem_ix == BCM_RING_EMPTY) {
4598 				DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
4599 				pktid = DHD_PKTID_INVALID;
4600 				goto alloc_pkt_id;
4601 			} else {
4602 				uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4603 				pktid = *elem;
4604 			}
4605 
4606 			rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4607 
4608 			/* Now populate the previous locker with valid information */
4609 			if (pktid != DHD_PKTID_INVALID) {
4610 				DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
4611 					p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
4612 					PKTTYPE_DATA_RX);
4613 			}
4614 		} else
4615 #endif /* ! DHD_LB_RXC */
4616 		{
4617 #if defined(DHD_LB_RXC)
4618 alloc_pkt_id:
4619 #endif /* DHD_LB_RXC */
4620 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
4621 			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
4622 #if defined(DHD_PCIE_PKTID)
4623 		if (pktid == DHD_PKTID_INVALID) {
4624 			break;
4625 		}
4626 #endif /* DHD_PCIE_PKTID */
4627 		}
4628 
4629 		/* Common msg header */
4630 		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
4631 		rxbuf_post->cmn_hdr.if_id = 0;
4632 		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4633 		rxbuf_post->cmn_hdr.flags = ring->current_phase;
4634 		ring->seqnum++;
4635 		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
4636 		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4637 		rxbuf_post->data_buf_addr.low_addr =
4638 			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
4639 
4640 		if (prot->rx_metadata_offset) {
4641 			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
4642 			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4643 			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
4644 		} else {
4645 			rxbuf_post->metadata_buf_len = 0;
4646 			rxbuf_post->metadata_buf_addr.high_addr = 0;
4647 			rxbuf_post->metadata_buf_addr.low_addr  = 0;
4648 		}
4649 
4650 #ifdef DHD_PKTID_AUDIT_RING
4651 		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
4652 #endif /* DHD_PKTID_AUDIT_RING */
4653 
4654 		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4655 
4656 		/* Move rxbuf_post_tmp to next item */
4657 		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
4658 
4659 #ifdef DHD_LBUF_AUDIT
4660 		PKTAUDIT(dhd->osh, p);
4661 #endif // endif
4662 	}
4663 
4664 	if (i < alloced) {
4665 		if (ring->wr < (alloced - i))
4666 			ring->wr = ring->max_items - (alloced - i);
4667 		else
4668 			ring->wr -= (alloced - i);
4669 
4670 		if (ring->wr == 0) {
4671 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
4672 				ring->current_phase = ring->current_phase ?
4673 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4674 		}
4675 
4676 		alloced = i;
4677 	}
4678 
4679 	/* update ring's WR index and ring doorbell to dongle */
4680 	if (alloced > 0) {
4681 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4682 	}
4683 
4684 	DHD_RING_UNLOCK(ring->ring_lock, flags);
4685 
4686 cleanup:
4687 	for (i = alloced; i < count; i++) {
4688 		p = pktbuf[i];
4689 		pa = pktbuf_pa[i];
4690 
4691 		if (SECURE_DMA_ENAB(dhd->osh))
4692 			SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
4693 				DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
4694 		else
4695 			DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
4696 		PKTFREE(dhd->osh, p, FALSE);
4697 	}
4698 
4699 	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
4700 
4701 	return alloced;
4702 } /* dhd_prot_rxbufpost */
4703 
4704 static int
4705 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
4706 {
4707 	unsigned long flags;
4708 	uint32 pktid;
4709 	dhd_prot_t *prot = dhd->prot;
4710 	uint16 alloced = 0;
4711 	uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
4712 	uint32 pktlen;
4713 	info_buf_post_msg_t *infobuf_post;
4714 	uint8 *infobuf_post_tmp;
4715 	void *p;
4716 	void* msg_start;
4717 	uint8 i = 0;
4718 	dmaaddr_t pa;
4719 	int16 count = 0;
4720 
4721 	if (ring == NULL)
4722 		return 0;
4723 
4724 	if (ring->inited != TRUE)
4725 		return 0;
4726 	if (ring == dhd->prot->h2dring_info_subn) {
4727 		if (prot->max_infobufpost == 0)
4728 			return 0;
4729 
4730 		count = prot->max_infobufpost - prot->infobufpost;
4731 	}
4732 	else {
4733 		DHD_ERROR(("Unknown ring\n"));
4734 		return 0;
4735 	}
4736 
4737 	if (count <= 0) {
4738 		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
4739 			__FUNCTION__));
4740 		return 0;
4741 	}
4742 
4743 	/* grab the ring lock to allocate pktid and post on ring */
4744 	DHD_RING_LOCK(ring->ring_lock, flags);
4745 
4746 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
4747 	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
4748 
4749 	if (msg_start == NULL) {
4750 		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4751 		DHD_RING_UNLOCK(ring->ring_lock, flags);
4752 		return -1;
4753 	}
4754 
4755 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4756 	ASSERT(alloced > 0);
4757 
4758 	infobuf_post_tmp = (uint8*) msg_start;
4759 
4760 	/* loop through each allocated message in the host ring */
4761 	for (i = 0; i < alloced; i++) {
4762 		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
4763 		/* Create a rx buffer */
4764 #ifdef DHD_USE_STATIC_CTRLBUF
4765 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4766 #else
4767 		p = PKTGET(dhd->osh, pktsz, FALSE);
4768 #endif /* DHD_USE_STATIC_CTRLBUF */
4769 		if (p == NULL) {
4770 			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4771 			dhd->rx_pktgetfail++;
4772 			break;
4773 		}
4774 		pktlen = PKTLEN(dhd->osh, p);
4775 		if (SECURE_DMA_ENAB(dhd->osh)) {
4776 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4777 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4778 		}
4779 #ifndef BCM_SECURE_DMA
4780 		else
4781 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4782 #endif /* #ifndef BCM_SECURE_DMA */
4783 		if (PHYSADDRISZERO(pa)) {
4784 			if (SECURE_DMA_ENAB(dhd->osh)) {
4785 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4786 					ring->dma_buf.secdma, 0);
4787 			}
4788 			else
4789 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4790 #ifdef DHD_USE_STATIC_CTRLBUF
4791 			PKTFREE_STATIC(dhd->osh, p, FALSE);
4792 #else
4793 			PKTFREE(dhd->osh, p, FALSE);
4794 #endif /* DHD_USE_STATIC_CTRLBUF */
4795 			DHD_ERROR(("Invalid phyaddr 0\n"));
4796 			ASSERT(0);
4797 			break;
4798 		}
4799 #ifdef DMAMAP_STATS
4800 		dhd->dma_stats.info_rx++;
4801 		dhd->dma_stats.info_rx_sz += pktlen;
4802 #endif /* DMAMAP_STATS */
4803 		pktlen = PKTLEN(dhd->osh, p);
4804 
4805 		/* Common msg header */
4806 		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4807 		infobuf_post->cmn_hdr.if_id = 0;
4808 		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4809 		infobuf_post->cmn_hdr.flags = ring->current_phase;
4810 		ring->seqnum++;
4811 
4812 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4813 			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
4814 
4815 #if defined(DHD_PCIE_PKTID)
4816 		if (pktid == DHD_PKTID_INVALID) {
4817 			if (SECURE_DMA_ENAB(dhd->osh)) {
4818 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4819 					ring->dma_buf.secdma, 0);
4820 			} else
4821 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4822 
4823 #ifdef DHD_USE_STATIC_CTRLBUF
4824 			PKTFREE_STATIC(dhd->osh, p, FALSE);
4825 #else
4826 			PKTFREE(dhd->osh, p, FALSE);
4827 #endif /* DHD_USE_STATIC_CTRLBUF */
4828 			DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4829 			break;
4830 		}
4831 #endif /* DHD_PCIE_PKTID */
4832 
4833 		infobuf_post->host_buf_len = htol16((uint16)pktlen);
4834 		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4835 		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4836 
4837 #ifdef DHD_PKTID_AUDIT_RING
4838 		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4839 #endif /* DHD_PKTID_AUDIT_RING */
4840 
4841 		DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4842 			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
4843 			infobuf_post->host_buf_addr.high_addr));
4844 
4845 		infobuf_post->cmn_hdr.request_id = htol32(pktid);
4846 		/* Move rxbuf_post_tmp to next item */
4847 		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4848 #ifdef DHD_LBUF_AUDIT
4849 		PKTAUDIT(dhd->osh, p);
4850 #endif // endif
4851 	}
4852 
4853 	if (i < alloced) {
4854 		if (ring->wr < (alloced - i))
4855 			ring->wr = ring->max_items - (alloced - i);
4856 		else
4857 			ring->wr -= (alloced - i);
4858 
4859 		alloced = i;
4860 		if (alloced && ring->wr == 0) {
4861 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
4862 			ring->current_phase = ring->current_phase ?
4863 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4864 		}
4865 	}
4866 
4867 	/* Update the write pointer in TCM & ring bell */
4868 	if (alloced > 0) {
4869 		if (ring == dhd->prot->h2dring_info_subn) {
4870 			prot->infobufpost += alloced;
4871 		}
4872 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4873 	}
4874 
4875 	DHD_RING_UNLOCK(ring->ring_lock, flags);
4876 
4877 	return alloced;
4878 } /* dhd_prot_infobufpost */
4879 
4880 #ifdef IOCTLRESP_USE_CONSTMEM
4881 static int
4882 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4883 {
4884 	int err;
4885 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4886 
4887 	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
4888 		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
4889 		ASSERT(0);
4890 		return BCME_NOMEM;
4891 	}
4892 
4893 	return BCME_OK;
4894 }
4895 
4896 static void
4897 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4898 {
4899 	/* retbuf (declared on stack) not fully populated ...  */
4900 	if (retbuf->va) {
4901 		uint32 dma_pad;
4902 		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
4903 		retbuf->len = IOCT_RETBUF_SIZE;
4904 		retbuf->_alloced = retbuf->len + dma_pad;
4905 	}
4906 
4907 	dhd_dma_buf_free(dhd, retbuf);
4908 	return;
4909 }
4910 #endif /* IOCTLRESP_USE_CONSTMEM */
4911 
4912 static int
4913 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
4914 {
4915 	void *p;
4916 	uint16 pktsz;
4917 	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
4918 	dmaaddr_t pa;
4919 	uint32 pktlen;
4920 	dhd_prot_t *prot = dhd->prot;
4921 	uint16 alloced = 0;
4922 	unsigned long flags;
4923 	dhd_dma_buf_t retbuf;
4924 	void *dmah = NULL;
4925 	uint32 pktid;
4926 	void *map_handle;
4927 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4928 	bool non_ioctl_resp_buf = 0;
4929 	dhd_pkttype_t buf_type;
4930 
4931 	if (dhd->busstate == DHD_BUS_DOWN) {
4932 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4933 		return -1;
4934 	}
4935 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4936 
4937 	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
4938 		buf_type = PKTTYPE_IOCTL_RX;
4939 	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
4940 		buf_type = PKTTYPE_EVENT_RX;
4941 	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
4942 		buf_type = PKTTYPE_TSBUF_RX;
4943 	else {
4944 		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
4945 		return -1;
4946 	}
4947 
4948 	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
4949 		non_ioctl_resp_buf = TRUE;
4950 	else
4951 		non_ioctl_resp_buf = FALSE;
4952 
4953 	if (non_ioctl_resp_buf) {
4954 		/* Allocate packet for not ioctl resp buffer post */
4955 		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4956 	} else {
4957 		/* Allocate packet for ctrl/ioctl buffer post */
4958 		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
4959 	}
4960 
4961 #ifdef IOCTLRESP_USE_CONSTMEM
4962 	if (!non_ioctl_resp_buf) {
4963 		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
4964 			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
4965 			return -1;
4966 		}
4967 		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
4968 		p = retbuf.va;
4969 		pktlen = retbuf.len;
4970 		pa = retbuf.pa;
4971 		dmah = retbuf.dmah;
4972 	} else
4973 #endif /* IOCTLRESP_USE_CONSTMEM */
4974 	{
4975 #ifdef DHD_USE_STATIC_CTRLBUF
4976 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4977 #else
4978 		p = PKTGET(dhd->osh, pktsz, FALSE);
4979 #endif /* DHD_USE_STATIC_CTRLBUF */
4980 		if (p == NULL) {
4981 			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
4982 				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
4983 				"EVENT" : "IOCTL RESP"));
4984 			dhd->rx_pktgetfail++;
4985 			return -1;
4986 		}
4987 
4988 		pktlen = PKTLEN(dhd->osh, p);
4989 
4990 		if (SECURE_DMA_ENAB(dhd->osh)) {
4991 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4992 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4993 		}
4994 #ifndef BCM_SECURE_DMA
4995 		else
4996 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4997 #endif /* #ifndef BCM_SECURE_DMA */
4998 
4999 		if (PHYSADDRISZERO(pa)) {
5000 			DHD_ERROR(("Invalid physaddr 0\n"));
5001 			ASSERT(0);
5002 			goto free_pkt_return;
5003 		}
5004 
5005 #ifdef DMAMAP_STATS
5006 		switch (buf_type) {
5007 #ifndef IOCTLRESP_USE_CONSTMEM
5008 			case PKTTYPE_IOCTL_RX:
5009 				dhd->dma_stats.ioctl_rx++;
5010 				dhd->dma_stats.ioctl_rx_sz += pktlen;
5011 				break;
5012 #endif /* !IOCTLRESP_USE_CONSTMEM */
5013 			case PKTTYPE_EVENT_RX:
5014 				dhd->dma_stats.event_rx++;
5015 				dhd->dma_stats.event_rx_sz += pktlen;
5016 				break;
5017 			case PKTTYPE_TSBUF_RX:
5018 				dhd->dma_stats.tsbuf_rx++;
5019 				dhd->dma_stats.tsbuf_rx_sz += pktlen;
5020 				break;
5021 			default:
5022 				break;
5023 		}
5024 #endif /* DMAMAP_STATS */
5025 
5026 	}
5027 
5028 	/* grab the ring lock to allocate pktid and post on ring */
5029 	DHD_RING_LOCK(ring->ring_lock, flags);
5030 
5031 	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5032 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5033 
5034 	if (rxbuf_post == NULL) {
5035 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5036 		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5037 			__FUNCTION__, __LINE__));
5038 
5039 #ifdef IOCTLRESP_USE_CONSTMEM
5040 		if (non_ioctl_resp_buf)
5041 #endif /* IOCTLRESP_USE_CONSTMEM */
5042 		{
5043 			if (SECURE_DMA_ENAB(dhd->osh)) {
5044 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5045 					ring->dma_buf.secdma, 0);
5046 			} else {
5047 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5048 			}
5049 		}
5050 		goto free_pkt_return;
5051 	}
5052 
5053 	/* CMN msg header */
5054 	rxbuf_post->cmn_hdr.msg_type = msg_type;
5055 
5056 #ifdef IOCTLRESP_USE_CONSTMEM
5057 	if (!non_ioctl_resp_buf) {
5058 		map_handle = dhd->prot->pktid_map_handle_ioctl;
5059 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5060 			ring->dma_buf.secdma, buf_type);
5061 	} else
5062 #endif /* IOCTLRESP_USE_CONSTMEM */
5063 	{
5064 		map_handle = dhd->prot->pktid_ctrl_map;
5065 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5066 			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5067 			buf_type);
5068 	}
5069 
5070 	if (pktid == DHD_PKTID_INVALID) {
5071 		if (ring->wr == 0) {
5072 			ring->wr = ring->max_items - 1;
5073 		} else {
5074 			ring->wr--;
5075 			if (ring->wr == 0) {
5076 				ring->current_phase = ring->current_phase ? 0 :
5077 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5078 			}
5079 		}
5080 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5081 		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5082 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5083 		goto free_pkt_return;
5084 	}
5085 
5086 #ifdef DHD_PKTID_AUDIT_RING
5087 	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
5088 #endif /* DHD_PKTID_AUDIT_RING */
5089 
5090 	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5091 	rxbuf_post->cmn_hdr.if_id = 0;
5092 	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
5093 	ring->seqnum++;
5094 	rxbuf_post->cmn_hdr.flags = ring->current_phase;
5095 
5096 #if defined(DHD_PCIE_PKTID)
5097 	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
5098 		if (ring->wr == 0) {
5099 			ring->wr = ring->max_items - 1;
5100 		} else {
5101 			if (ring->wr == 0) {
5102 				ring->current_phase = ring->current_phase ? 0 :
5103 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5104 			}
5105 		}
5106 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5107 #ifdef IOCTLRESP_USE_CONSTMEM
5108 		if (non_ioctl_resp_buf)
5109 #endif /* IOCTLRESP_USE_CONSTMEM */
5110 		{
5111 			if (SECURE_DMA_ENAB(dhd->osh)) {
5112 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5113 					ring->dma_buf.secdma, 0);
5114 			} else
5115 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5116 		}
5117 		goto free_pkt_return;
5118 	}
5119 #endif /* DHD_PCIE_PKTID */
5120 
5121 #ifndef IOCTLRESP_USE_CONSTMEM
5122 	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
5123 #else
5124 	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
5125 #endif /* IOCTLRESP_USE_CONSTMEM */
5126 	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5127 	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5128 
5129 #ifdef DHD_LBUF_AUDIT
5130 	if (non_ioctl_resp_buf)
5131 		PKTAUDIT(dhd->osh, p);
5132 #endif // endif
5133 
5134 	/* update ring's WR index and ring doorbell to dongle */
5135 	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
5136 
5137 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5138 
5139 	return 1;
5140 
5141 free_pkt_return:
5142 	if (!non_ioctl_resp_buf) {
5143 #ifdef IOCTLRESP_USE_CONSTMEM
5144 		free_ioctl_return_buffer(dhd, &retbuf);
5145 #else
5146 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5147 #endif /* IOCTLRESP_USE_CONSTMEM */
5148 	} else {
5149 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5150 	}
5151 
5152 	return -1;
5153 } /* dhd_prot_rxbufpost_ctrl */
5154 
5155 static uint16
5156 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
5157 {
5158 	uint32 i = 0;
5159 	int32 ret_val;
5160 
5161 	DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
5162 
5163 	if (dhd->busstate == DHD_BUS_DOWN) {
5164 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5165 		return 0;
5166 	}
5167 
5168 	while (i < max_to_post) {
5169 		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
5170 		if (ret_val < 0)
5171 			break;
5172 		i++;
5173 	}
5174 	DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
5175 	return (uint16)i;
5176 }
5177 
5178 static void
5179 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
5180 {
5181 	dhd_prot_t *prot = dhd->prot;
5182 	int max_to_post;
5183 
5184 	DHD_INFO(("ioctl resp buf post\n"));
5185 	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
5186 	if (max_to_post <= 0) {
5187 		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
5188 			__FUNCTION__));
5189 		return;
5190 	}
5191 	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5192 		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
5193 }
5194 
5195 static void
5196 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
5197 {
5198 	dhd_prot_t *prot = dhd->prot;
5199 	int max_to_post;
5200 
5201 	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
5202 	if (max_to_post <= 0) {
5203 		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
5204 			__FUNCTION__));
5205 		return;
5206 	}
5207 	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5208 		MSG_TYPE_EVENT_BUF_POST, max_to_post);
5209 }
5210 
5211 static int
5212 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
5213 {
5214 	return 0;
5215 }
5216 
5217 bool BCMFASTPATH
5218 dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
5219 {
5220 	dhd_prot_t *prot = dhd->prot;
5221 	bool more = TRUE;
5222 	uint n = 0;
5223 	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
5224 	unsigned long flags;
5225 
5226 	if (ring == NULL)
5227 		return FALSE;
5228 	if (ring->inited != TRUE)
5229 		return FALSE;
5230 
5231 	/* Process all the messages - DTOH direction */
5232 	while (!dhd_is_device_removed(dhd)) {
5233 		uint8 *msg_addr;
5234 		uint32 msg_len;
5235 
5236 		if (dhd_query_bus_erros(dhd)) {
5237 			more = FALSE;
5238 			break;
5239 		}
5240 
5241 		if (dhd->hang_was_sent) {
5242 			more = FALSE;
5243 			break;
5244 		}
5245 
5246 		if (dhd->smmu_fault_occurred) {
5247 			more = FALSE;
5248 			break;
5249 		}
5250 
5251 		DHD_RING_LOCK(ring->ring_lock, flags);
5252 		/* Get the message from ring */
5253 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5254 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5255 		if (msg_addr == NULL) {
5256 			more = FALSE;
5257 			break;
5258 		}
5259 
5260 		/* Prefetch data to populate the cache */
5261 		OSL_PREFETCH(msg_addr);
5262 
5263 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5264 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
5265 				__FUNCTION__, msg_len));
5266 		}
5267 
5268 		/* Update read pointer */
5269 		dhd_prot_upd_read_idx(dhd, ring);
5270 
5271 		/* After batch processing, check RX bound */
5272 		n += msg_len / ring->item_len;
5273 		if (n >= bound) {
5274 			break;
5275 		}
5276 	}
5277 
5278 	return more;
5279 }
5280 
5281 #ifdef EWP_EDL
5282 bool
5283 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
5284 {
5285 	dhd_prot_t *prot = dhd->prot;
5286 	msgbuf_ring_t *ring = prot->d2hring_edl;
5287 	unsigned long flags = 0;
5288 	uint32 items = 0;
5289 	uint16 rd = 0;
5290 	uint16 depth = 0;
5291 
5292 	if (ring == NULL)
5293 		return FALSE;
5294 	if (ring->inited != TRUE)
5295 		return FALSE;
5296 	if (ring->item_len == 0) {
5297 		DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
5298 			__FUNCTION__, ring->idx, ring->item_len));
5299 		return FALSE;
5300 	}
5301 
5302 	if (dhd_query_bus_erros(dhd)) {
5303 		return FALSE;
5304 	}
5305 
5306 	if (dhd->hang_was_sent) {
5307 		return FALSE;
5308 	}
5309 
5310 	/* in this DPC context just check if wr index has moved
5311 	 * and schedule deferred context to actually process the
5312 	 * work items.
5313 	*/
5314 	/* update the write index */
5315 	DHD_RING_LOCK(ring->ring_lock, flags);
5316 	if (dhd->dma_d2h_ring_upd_support) {
5317 		/* DMAing write/read indices supported */
5318 		ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5319 	} else {
5320 		dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
5321 	}
5322 	rd = ring->rd;
5323 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5324 
5325 	depth = ring->max_items;
5326 	/* check for avail space, in number of ring items */
5327 	items = READ_AVAIL_SPACE(ring->wr, rd, depth);
5328 	if (items == 0) {
5329 		/* no work items in edl ring */
5330 		return FALSE;
5331 	}
5332 	if (items > ring->max_items) {
5333 		DHD_ERROR(("\r\n======================= \r\n"));
5334 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5335 			__FUNCTION__, ring, ring->name, ring->max_items, items));
5336 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n",
5337 			ring->wr, ring->rd, depth));
5338 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
5339 			dhd->busstate, dhd->bus->wait_for_d3_ack));
5340 		DHD_ERROR(("\r\n======================= \r\n"));
5341 #ifdef SUPPORT_LINKDOWN_RECOVERY
5342 		if (ring->wr >= ring->max_items) {
5343 			dhd->bus->read_shm_fail = TRUE;
5344 		}
5345 #else
5346 #ifdef DHD_FW_COREDUMP
5347 		if (dhd->memdump_enabled) {
5348 			/* collect core dump */
5349 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
5350 			dhd_bus_mem_dump(dhd);
5351 
5352 		}
5353 #endif /* DHD_FW_COREDUMP */
5354 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5355 		dhd_schedule_reset(dhd);
5356 
5357 		return FALSE;
5358 	}
5359 
5360 	if (items > D2HRING_EDL_WATERMARK) {
5361 		DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
5362 			" rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
5363 			ring->rd, ring->wr, depth));
5364 	}
5365 
5366 	dhd_schedule_logtrace(dhd->info);
5367 
5368 	return FALSE;
5369 }
5370 
5371 /* This is called either from work queue context of 'event_log_dispatcher_work' or
5372 * from the kthread context of dhd_logtrace_thread
5373 */
5374 int
5375 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
5376 {
5377 	dhd_prot_t *prot = NULL;
5378 	msgbuf_ring_t *ring = NULL;
5379 	int err = 0;
5380 	unsigned long flags = 0;
5381 	cmn_msg_hdr_t *msg = NULL;
5382 	uint8 *msg_addr = NULL;
5383 	uint32 max_items_to_process = 0, n = 0;
5384 	uint32 num_items = 0, new_items = 0;
5385 	uint16 depth = 0;
5386 	volatile uint16 wr = 0;
5387 
5388 	if (!dhd || !dhd->prot)
5389 		return 0;
5390 
5391 	prot = dhd->prot;
5392 	ring = prot->d2hring_edl;
5393 	if (!ring || !evt_decode_data) {
5394 		return 0;
5395 	}
5396 
5397 	if (dhd->hang_was_sent) {
5398 		return FALSE;
5399 	}
5400 
5401 	DHD_RING_LOCK(ring->ring_lock, flags);
5402 	ring->curr_rd = ring->rd;
5403 	wr = ring->wr;
5404 	depth = ring->max_items;
5405 	/* check for avail space, in number of ring items
5406 	 * Note, that this will only give the # of items
5407 	 * from rd to wr if wr>=rd, or from rd to ring end
5408 	 * if wr < rd. So in the latter case strictly speaking
5409 	 * not all the items are read. But this is OK, because
5410 	 * these will be processed in the next doorbell as rd
5411 	 * would have wrapped around. Processing in the next
5412 	 * doorbell is acceptable since EDL only contains debug data
5413 	 */
5414 	num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5415 
5416 	if (num_items == 0) {
5417 		/* no work items in edl ring */
5418 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5419 		return 0;
5420 	}
5421 
5422 	DHD_INFO(("%s: EDL work items [%u] available \n",
5423 			__FUNCTION__, num_items));
5424 
5425 	/* if space is available, calculate address to be read */
5426 	msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
5427 
5428 	max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
5429 
5430 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5431 
5432 	/* Prefetch data to populate the cache */
5433 	OSL_PREFETCH(msg_addr);
5434 
5435 	n = max_items_to_process;
5436 	while (n > 0) {
5437 		msg = (cmn_msg_hdr_t *)msg_addr;
5438 		/* wait for DMA of work item to complete */
5439 		if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
5440 			DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
5441 				"ring; err = %d\n", __FUNCTION__, err));
5442 		}
5443 
5444 		/*
5445 		 * Update the curr_rd to the current index in the ring, from where
5446 		 * the work item is fetched. This way if the fetched work item
5447 		 * fails in LIVELOCK, we can print the exact read index in the ring
5448 		 * that shows up the corrupted work item.
5449 		 */
5450 		if ((ring->curr_rd + 1) >= ring->max_items) {
5451 			ring->curr_rd = 0;
5452 		} else {
5453 			ring->curr_rd += 1;
5454 		}
5455 
5456 		if (err != BCME_OK) {
5457 			return 0;
5458 		}
5459 
5460 		/* process the edl work item, i.e, the event log */
5461 		err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
5462 
5463 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
5464 		OSL_SLEEP(0);
5465 
5466 		/* Prefetch data to populate the cache */
5467 		OSL_PREFETCH(msg_addr + ring->item_len);
5468 
5469 		msg_addr += ring->item_len;
5470 		--n;
5471 	}
5472 
5473 	DHD_RING_LOCK(ring->ring_lock, flags);
5474 	/* update host ring read pointer */
5475 	if ((ring->rd + max_items_to_process) >= ring->max_items)
5476 		ring->rd = 0;
5477 	else
5478 		ring->rd += max_items_to_process;
5479 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5480 
5481 	/* Now after processing max_items_to_process update dongle rd index.
5482 	 * The TCM rd index is updated only if bus is not
5483 	 * in D3. Else, the rd index is updated from resume
5484 	 * context in - 'dhdpcie_bus_suspend'
5485 	 */
5486 	DHD_GENERAL_LOCK(dhd, flags);
5487 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
5488 		DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5489 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
5490 		DHD_GENERAL_UNLOCK(dhd, flags);
5491 	} else {
5492 		DHD_GENERAL_UNLOCK(dhd, flags);
5493 		DHD_EDL_RING_TCM_RD_UPDATE(dhd);
5494 	}
5495 
5496 	/* if num_items > bound, then anyway we will reschedule and
5497 	 * this function runs again, so that if in between the DPC has
5498 	 * updated the wr index, then the updated wr is read. But if
5499 	 * num_items <= bound, and if DPC executes and updates the wr index
5500 	 * when the above while loop is running, then the updated 'wr' index
5501 	 * needs to be re-read from here, If we don't do so, then till
5502 	 * the next time this function is scheduled
5503 	 * the event logs will not be processed.
5504 	*/
5505 	if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
5506 		/* read the updated wr index if reqd. and update num_items */
5507 		DHD_RING_LOCK(ring->ring_lock, flags);
5508 		if (wr != (volatile uint16)ring->wr) {
5509 			wr = (volatile uint16)ring->wr;
5510 			new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5511 			DHD_INFO(("%s: new items [%u] avail in edl\n",
5512 				__FUNCTION__, new_items));
5513 			num_items += new_items;
5514 		}
5515 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5516 	}
5517 
5518 	/* if # of items processed is less than num_items, need to re-schedule
5519 	* the deferred ctx
5520 	*/
5521 	if (max_items_to_process < num_items) {
5522 		DHD_INFO(("%s: EDL bound hit / new items found, "
5523 				"items processed=%u; remaining=%u, "
5524 				"resched deferred ctx...\n",
5525 				__FUNCTION__, max_items_to_process,
5526 				num_items - max_items_to_process));
5527 		return (num_items - max_items_to_process);
5528 	}
5529 
5530 	return 0;
5531 
5532 }
5533 
5534 void
5535 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
5536 {
5537 	dhd_prot_t *prot = NULL;
5538 	unsigned long flags = 0;
5539 	msgbuf_ring_t *ring = NULL;
5540 
5541 	if (!dhd)
5542 		return;
5543 
5544 	prot = dhd->prot;
5545 	if (!prot || !prot->d2hring_edl)
5546 		return;
5547 
5548 	ring = prot->d2hring_edl;
5549 	DHD_RING_LOCK(ring->ring_lock, flags);
5550 	dhd_prot_upd_read_idx(dhd, ring);
5551 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5552 }
5553 #endif /* EWP_EDL */
5554 
5555 /* called when DHD needs to check for 'receive complete' messages from the dongle */
5556 bool BCMFASTPATH
5557 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5558 {
5559 	bool more = FALSE;
5560 	uint n = 0;
5561 	dhd_prot_t *prot = dhd->prot;
5562 	msgbuf_ring_t *ring;
5563 	uint16 item_len;
5564 	host_rxbuf_cmpl_t *msg = NULL;
5565 	uint8 *msg_addr;
5566 	uint32 msg_len;
5567 	uint16 pkt_cnt, pkt_cnt_newidx;
5568 	unsigned long flags;
5569 	dmaaddr_t pa;
5570 	uint32 len;
5571 	void *dmah;
5572 	void *secdma;
5573 	int ifidx = 0, if_newidx = 0;
5574 	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
5575 	uint32 pktid;
5576 	int i;
5577 	uint8 sync;
5578 	ts_timestamp_t *ts;
5579 
5580 	BCM_REFERENCE(ts);
5581 #ifdef DHD_HP2P
5582 	if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
5583 		ring = prot->d2hring_hp2p_rxcpl;
5584 	else
5585 #endif /* DHD_HP2P */
5586 		ring = &prot->d2hring_rx_cpln;
5587 	item_len = ring->item_len;
5588 	while (1) {
5589 		if (dhd_is_device_removed(dhd))
5590 			break;
5591 
5592 		if (dhd_query_bus_erros(dhd))
5593 			break;
5594 
5595 		if (dhd->hang_was_sent)
5596 			break;
5597 
5598 		if (dhd->smmu_fault_occurred) {
5599 			break;
5600 		}
5601 
5602 		pkt_cnt = 0;
5603 		pktqhead = pkt_newidx = NULL;
5604 		pkt_cnt_newidx = 0;
5605 
5606 		DHD_RING_LOCK(ring->ring_lock, flags);
5607 
5608 		/* Get the address of the next message to be read from ring */
5609 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5610 		if (msg_addr == NULL) {
5611 			DHD_RING_UNLOCK(ring->ring_lock, flags);
5612 			break;
5613 		}
5614 
5615 		while (msg_len > 0) {
5616 			msg = (host_rxbuf_cmpl_t *)msg_addr;
5617 
5618 			/* Wait until DMA completes, then fetch msg_type */
5619 			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
5620 			/*
5621 			 * Update the curr_rd to the current index in the ring, from where
5622 			 * the work item is fetched. This way if the fetched work item
5623 			 * fails in LIVELOCK, we can print the exact read index in the ring
5624 			 * that shows up the corrupted work item.
5625 			 */
5626 			if ((ring->curr_rd + 1) >= ring->max_items) {
5627 				ring->curr_rd = 0;
5628 			} else {
5629 				ring->curr_rd += 1;
5630 			}
5631 
5632 			if (!sync) {
5633 				msg_len -= item_len;
5634 				msg_addr += item_len;
5635 				continue;
5636 			}
5637 
5638 			pktid = ltoh32(msg->cmn_hdr.request_id);
5639 
5640 #ifdef DHD_PKTID_AUDIT_RING
5641 			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
5642 				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
5643 #endif /* DHD_PKTID_AUDIT_RING */
5644 
5645 			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
5646 			        len, dmah, secdma, PKTTYPE_DATA_RX);
5647 			if (!pkt) {
5648 				msg_len -= item_len;
5649 				msg_addr += item_len;
5650 				continue;
5651 			}
5652 
5653 			if (SECURE_DMA_ENAB(dhd->osh))
5654 				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
5655 				    dmah, secdma, 0);
5656 			else
5657 				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5658 
5659 #ifdef DMAMAP_STATS
5660 			dhd->dma_stats.rxdata--;
5661 			dhd->dma_stats.rxdata_sz -= len;
5662 #endif /* DMAMAP_STATS */
5663 			DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
5664 				"pktdata %p, metalen %d\n",
5665 				ltoh32(msg->cmn_hdr.request_id),
5666 				ltoh16(msg->data_offset),
5667 				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
5668 				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
5669 				ltoh16(msg->metadata_len)));
5670 
5671 			pkt_cnt++;
5672 			msg_len -= item_len;
5673 			msg_addr += item_len;
5674 
5675 #if DHD_DBG_SHOW_METADATA
5676 			if (prot->metadata_dbg && prot->rx_metadata_offset &&
5677 			        msg->metadata_len) {
5678 				uchar *ptr;
5679 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
5680 				/* header followed by data */
5681 				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
5682 				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
5683 			}
5684 #endif /* DHD_DBG_SHOW_METADATA */
5685 
5686 			/* data_offset from buf start */
5687 			if (ltoh16(msg->data_offset)) {
5688 				/* data offset given from dongle after split rx */
5689 				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
5690 			}
5691 			else if (prot->rx_dataoffset) {
5692 				/* DMA RX offset updated through shared area */
5693 				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
5694 			}
5695 			/* Actual length of the packet */
5696 			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
5697 
5698 #if defined(WL_MONITOR)
5699 			if (dhd_monitor_enabled(dhd, ifidx)) {
5700 				if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
5701 					dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
5702 					continue;
5703 				} else {
5704 					DHD_ERROR(("Received non 802.11 packet, "
5705 						"when monitor mode is enabled\n"));
5706 				}
5707 			}
5708 #endif /* WL_MONITOR */
5709 
5710 			if (msg->flags & BCMPCIE_PKT_FLAGS_NO_FORWARD) {
5711 				DHD_PKT_FLAGS_SET_NO_FWD(pkt);
5712 			}
5713 
5714 			if (!pktqhead) {
5715 				pktqhead = prevpkt = pkt;
5716 				ifidx = msg->cmn_hdr.if_id;
5717 			} else {
5718 				if (ifidx != msg->cmn_hdr.if_id) {
5719 					pkt_newidx = pkt;
5720 					if_newidx = msg->cmn_hdr.if_id;
5721 					pkt_cnt--;
5722 					pkt_cnt_newidx = 1;
5723 					break;
5724 				} else {
5725 					PKTSETNEXT(dhd->osh, prevpkt, pkt);
5726 					prevpkt = pkt;
5727 				}
5728 			}
5729 
5730 #ifdef DHD_HP2P
5731 			if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
5732 #ifdef DHD_HP2P_DEBUG
5733 				bcm_print_bytes("Rxcpl", (uchar *)msg,  sizeof(host_rxbuf_cmpl_t));
5734 #endif /* DHD_HP2P_DEBUG */
5735 				dhd_update_hp2p_rxstats(dhd, msg);
5736 			}
5737 #endif /* DHD_HP2P */
5738 
5739 #ifdef DHD_LBUF_AUDIT
5740 			PKTAUDIT(dhd->osh, pkt);
5741 #endif // endif
5742 		}
5743 
5744 		/* roll back read pointer for unprocessed message */
5745 		if (msg_len > 0) {
5746 			if (ring->rd < msg_len / item_len)
5747 				ring->rd = ring->max_items - msg_len / item_len;
5748 			else
5749 				ring->rd -= msg_len / item_len;
5750 		}
5751 
5752 		/* Update read pointer */
5753 		dhd_prot_upd_read_idx(dhd, ring);
5754 
5755 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5756 
5757 		pkt = pktqhead;
5758 		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
5759 			nextpkt = PKTNEXT(dhd->osh, pkt);
5760 			PKTSETNEXT(dhd->osh, pkt, NULL);
5761 #ifdef DHD_LB_RXP
5762 			dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
5763 #elif defined(DHD_RX_CHAINING)
5764 			dhd_rxchain_frame(dhd, pkt, ifidx);
5765 #else
5766 			dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5767 #endif /* DHD_LB_RXP */
5768 		}
5769 
5770 		if (pkt_newidx) {
5771 #ifdef DHD_LB_RXP
5772 			dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
5773 #elif defined(DHD_RX_CHAINING)
5774 			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
5775 #else
5776 			dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
5777 #endif /* DHD_LB_RXP */
5778 		}
5779 
5780 		pkt_cnt += pkt_cnt_newidx;
5781 
5782 		/* Post another set of rxbufs to the device */
5783 		dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
5784 
5785 #ifdef DHD_RX_CHAINING
5786 		dhd_rxchain_commit(dhd);
5787 #endif // endif
5788 
5789 		/* After batch processing, check RX bound */
5790 		n += pkt_cnt;
5791 		if (n >= bound) {
5792 			more = TRUE;
5793 			break;
5794 		}
5795 	}
5796 
5797 	/* Call lb_dispatch only if packets are queued */
5798 	if (n &&
5799 #ifdef WL_MONITOR
5800 	!(dhd_monitor_enabled(dhd, ifidx)) &&
5801 #endif /* WL_MONITOR */
5802 	TRUE) {
5803 		DHD_LB_DISPATCH_RX_COMPL(dhd);
5804 		DHD_LB_DISPATCH_RX_PROCESS(dhd);
5805 	}
5806 
5807 	return more;
5808 
5809 }
5810 
5811 /**
5812  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
5813  */
5814 void
5815 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
5816 {
5817 	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
5818 
5819 	if (ring == NULL) {
5820 		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
5821 		return;
5822 	}
5823 	/* Update read pointer */
5824 	if (dhd->dma_d2h_ring_upd_support) {
5825 		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
5826 	}
5827 
5828 	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
5829 		ring->idx, flowid, ring->wr, ring->rd));
5830 
5831 	/* Need more logic here, but for now use it directly */
5832 	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
5833 }
5834 
5835 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
5836 bool BCMFASTPATH
5837 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5838 {
5839 	bool more = TRUE;
5840 	uint n = 0;
5841 	msgbuf_ring_t *ring;
5842 	unsigned long flags;
5843 
5844 #ifdef DHD_HP2P
5845 	if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
5846 		ring = dhd->prot->d2hring_hp2p_txcpl;
5847 	else
5848 #endif /* DHD_HP2P */
5849 		ring = &dhd->prot->d2hring_tx_cpln;
5850 
5851 	/* Process all the messages - DTOH direction */
5852 	while (!dhd_is_device_removed(dhd)) {
5853 		uint8 *msg_addr;
5854 		uint32 msg_len;
5855 
5856 		if (dhd_query_bus_erros(dhd)) {
5857 			more = FALSE;
5858 			break;
5859 		}
5860 
5861 		if (dhd->hang_was_sent) {
5862 			more = FALSE;
5863 			break;
5864 		}
5865 
5866 		if (dhd->smmu_fault_occurred) {
5867 			more = FALSE;
5868 			break;
5869 		}
5870 
5871 		DHD_RING_LOCK(ring->ring_lock, flags);
5872 		/* Get the address of the next message to be read from ring */
5873 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5874 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5875 
5876 		if (msg_addr == NULL) {
5877 			more = FALSE;
5878 			break;
5879 		}
5880 
5881 		/* Prefetch data to populate the cache */
5882 		OSL_PREFETCH(msg_addr);
5883 
5884 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5885 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5886 				__FUNCTION__, ring->name, msg_addr, msg_len));
5887 		}
5888 
5889 		/* Write to dngl rd ptr */
5890 		dhd_prot_upd_read_idx(dhd, ring);
5891 
5892 		/* After batch processing, check bound */
5893 		n += msg_len / ring->item_len;
5894 		if (n >= bound) {
5895 			break;
5896 		}
5897 	}
5898 
5899 	DHD_LB_DISPATCH_TX_COMPL(dhd);
5900 
5901 	return more;
5902 }
5903 
5904 int BCMFASTPATH
5905 dhd_prot_process_trapbuf(dhd_pub_t *dhd)
5906 {
5907 	uint32 data;
5908 	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
5909 
5910 	/* Interrupts can come in before this struct
5911 	 *  has been initialized.
5912 	 */
5913 	if (trap_addr->va == NULL) {
5914 		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
5915 		return 0;
5916 	}
5917 
5918 	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
5919 	data = *(uint32 *)(trap_addr->va);
5920 
5921 	if (data & D2H_DEV_FWHALT) {
5922 		DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
5923 
5924 		if (data & D2H_DEV_EXT_TRAP_DATA)
5925 		{
5926 			if (dhd->extended_trap_data) {
5927 				OSL_CACHE_INV((void *)trap_addr->va,
5928 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5929 				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
5930 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5931 			}
5932 			DHD_ERROR(("Extended trap data available\n"));
5933 		}
5934 		return data;
5935 	}
5936 	return 0;
5937 }
5938 
5939 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
5940 int BCMFASTPATH
5941 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
5942 {
5943 	dhd_prot_t *prot = dhd->prot;
5944 	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
5945 	unsigned long flags;
5946 
5947 	/* Process all the messages - DTOH direction */
5948 	while (!dhd_is_device_removed(dhd)) {
5949 		uint8 *msg_addr;
5950 		uint32 msg_len;
5951 
5952 		if (dhd_query_bus_erros(dhd)) {
5953 			break;
5954 		}
5955 
5956 		if (dhd->hang_was_sent) {
5957 			break;
5958 		}
5959 
5960 		if (dhd->smmu_fault_occurred) {
5961 			break;
5962 		}
5963 
5964 		DHD_RING_LOCK(ring->ring_lock, flags);
5965 		/* Get the address of the next message to be read from ring */
5966 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5967 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5968 
5969 		if (msg_addr == NULL) {
5970 			break;
5971 		}
5972 
5973 		/* Prefetch data to populate the cache */
5974 		OSL_PREFETCH(msg_addr);
5975 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5976 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5977 				__FUNCTION__, ring->name, msg_addr, msg_len));
5978 		}
5979 
5980 		/* Write to dngl rd ptr */
5981 		dhd_prot_upd_read_idx(dhd, ring);
5982 	}
5983 
5984 	return 0;
5985 }
5986 
5987 /**
5988  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
5989  * memory has completed, before invoking the message handler via a table lookup
5990  * of the cmn_msg_hdr::msg_type.
5991  */
5992 static int BCMFASTPATH
5993 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
5994 {
5995 	uint32 buf_len = len;
5996 	uint16 item_len;
5997 	uint8 msg_type;
5998 	cmn_msg_hdr_t *msg = NULL;
5999 	int ret = BCME_OK;
6000 
6001 	ASSERT(ring);
6002 	item_len = ring->item_len;
6003 	if (item_len == 0) {
6004 		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
6005 			__FUNCTION__, ring->idx, item_len, buf_len));
6006 		return BCME_ERROR;
6007 	}
6008 
6009 	while (buf_len > 0) {
6010 		if (dhd->hang_was_sent) {
6011 			ret = BCME_ERROR;
6012 			goto done;
6013 		}
6014 
6015 		if (dhd->smmu_fault_occurred) {
6016 			ret = BCME_ERROR;
6017 			goto done;
6018 		}
6019 
6020 		msg = (cmn_msg_hdr_t *)buf;
6021 
6022 		/* Wait until DMA completes, then fetch msg_type */
6023 		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
6024 
6025 		/*
6026 		 * Update the curr_rd to the current index in the ring, from where
6027 		 * the work item is fetched. This way if the fetched work item
6028 		 * fails in LIVELOCK, we can print the exact read index in the ring
6029 		 * that shows up the corrupted work item.
6030 		 */
6031 		if ((ring->curr_rd + 1) >= ring->max_items) {
6032 			ring->curr_rd = 0;
6033 		} else {
6034 			ring->curr_rd += 1;
6035 		}
6036 
6037 		/* Prefetch data to populate the cache */
6038 		OSL_PREFETCH(buf + item_len);
6039 
6040 		DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
6041 			msg_type, item_len, buf_len));
6042 
6043 		if (msg_type == MSG_TYPE_LOOPBACK) {
6044 			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
6045 			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
6046 		}
6047 
6048 		ASSERT(msg_type < DHD_PROT_FUNCS);
6049 		if (msg_type >= DHD_PROT_FUNCS) {
6050 			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
6051 				__FUNCTION__, msg_type, item_len, buf_len));
6052 			ret = BCME_ERROR;
6053 			goto done;
6054 		}
6055 
6056 		if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
6057 			if (ring == dhd->prot->d2hring_info_cpln) {
6058 				if (!dhd->prot->infobufpost) {
6059 					DHD_ERROR(("infobuf posted are zero,"
6060 						   "but there is a completion\n"));
6061 					goto done;
6062 				}
6063 				dhd->prot->infobufpost--;
6064 				dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
6065 				dhd_prot_process_infobuf_complete(dhd, buf);
6066 			}
6067 		} else
6068 		if (table_lookup[msg_type]) {
6069 			table_lookup[msg_type](dhd, buf);
6070 		}
6071 
6072 		if (buf_len < item_len) {
6073 			ret = BCME_ERROR;
6074 			goto done;
6075 		}
6076 		buf_len = buf_len - item_len;
6077 		buf = buf + item_len;
6078 	}
6079 
6080 done:
6081 
6082 #ifdef DHD_RX_CHAINING
6083 	dhd_rxchain_commit(dhd);
6084 #endif // endif
6085 
6086 	return ret;
6087 } /* dhd_prot_process_msgtype */
6088 
6089 static void
6090 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
6091 {
6092 	return;
6093 }
6094 
6095 /** called on MSG_TYPE_RING_STATUS message received from dongle */
6096 static void
6097 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
6098 {
6099 	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
6100 	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
6101 	uint16 status = ltoh16(ring_status->compl_hdr.status);
6102 	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
6103 
6104 	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
6105 		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
6106 
6107 	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
6108 		return;
6109 	if (status == BCMPCIE_BAD_PHASE) {
6110 		/* bad phase report from */
6111 		DHD_ERROR(("Bad phase\n"));
6112 	}
6113 	if (status != BCMPCIE_BADOPTION)
6114 		return;
6115 
6116 	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
6117 		if (dhd->prot->h2dring_info_subn != NULL) {
6118 			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
6119 				DHD_ERROR(("H2D ring create failed for info ring\n"));
6120 				dhd->prot->h2dring_info_subn->create_pending = FALSE;
6121 			}
6122 			else
6123 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6124 		} else {
6125 			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
6126 		}
6127 	}
6128 	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
6129 		if (dhd->prot->d2hring_info_cpln != NULL) {
6130 			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
6131 				DHD_ERROR(("D2H ring create failed for info ring\n"));
6132 				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
6133 			}
6134 			else
6135 				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
6136 		} else {
6137 			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
6138 		}
6139 	}
6140 #ifdef DHD_HP2P
6141 	else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
6142 		if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
6143 			if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
6144 				DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
6145 				dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
6146 			}
6147 			else
6148 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6149 		} else {
6150 			DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
6151 		}
6152 	}
6153 	else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
6154 		if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
6155 			if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
6156 				DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
6157 				dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
6158 			}
6159 			else
6160 				DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
6161 		} else {
6162 			DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
6163 		}
6164 	}
6165 #endif /* DHD_HP2P */
6166 	else {
6167 		DHD_ERROR(("don;t know how to pair with original request\n"));
6168 	}
6169 	/* How do we track this to pair it with ??? */
6170 	return;
6171 }
6172 
6173 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
6174 static void
6175 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
6176 {
6177 	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
6178 	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
6179 		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
6180 		gen_status->compl_hdr.flow_ring_id));
6181 
6182 	/* How do we track this to pair it with ??? */
6183 	return;
6184 }
6185 
6186 /**
6187  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
6188  * dongle received the ioctl message in dongle memory.
6189  */
6190 static void
6191 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
6192 {
6193 	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
6194 	unsigned long flags;
6195 #if defined(DHD_PKTID_AUDIT_RING)
6196 	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6197 #endif // endif
6198 
6199 #if defined(DHD_PKTID_AUDIT_RING)
6200 	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
6201 	if (pktid != DHD_IOCTL_REQ_PKTID) {
6202 #ifndef IOCTLRESP_USE_CONSTMEM
6203 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6204 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6205 #else
6206 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
6207 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6208 #endif /* !IOCTLRESP_USE_CONSTMEM */
6209 	}
6210 #endif // endif
6211 
6212 	dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
6213 
6214 	DHD_GENERAL_LOCK(dhd, flags);
6215 	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
6216 		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6217 		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
6218 	} else {
6219 		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
6220 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6221 		prhex("dhd_prot_ioctack_process:",
6222 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6223 	}
6224 	DHD_GENERAL_UNLOCK(dhd, flags);
6225 
6226 	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
6227 		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
6228 		ioct_ack->compl_hdr.flow_ring_id));
6229 	if (ioct_ack->compl_hdr.status != 0)  {
6230 		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
6231 	}
6232 }
6233 
6234 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
6235 static void
6236 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
6237 {
6238 	dhd_prot_t *prot = dhd->prot;
6239 	uint32 pkt_id, xt_id;
6240 	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
6241 	void *pkt;
6242 	unsigned long flags;
6243 	dhd_dma_buf_t retbuf;
6244 
6245 	/* Check for ioctl timeout induce flag, which is set by firing
6246 	 * dhd iovar to induce IOCTL timeout. If flag is set,
6247 	 * return from here, which results in to IOCTL timeout.
6248 	 */
6249 	if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
6250 		DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
6251 		return;
6252 	}
6253 
6254 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
6255 
6256 	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
6257 
6258 #if defined(DHD_PKTID_AUDIT_RING)
6259 #ifndef IOCTLRESP_USE_CONSTMEM
6260 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
6261 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6262 #else
6263 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
6264 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6265 #endif /* !IOCTLRESP_USE_CONSTMEM */
6266 #endif // endif
6267 
6268 	DHD_GENERAL_LOCK(dhd, flags);
6269 	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
6270 		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6271 		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
6272 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6273 		prhex("dhd_prot_ioctcmplt_process:",
6274 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6275 		DHD_GENERAL_UNLOCK(dhd, flags);
6276 		return;
6277 	}
6278 
6279 	dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
6280 
6281 	/* Clear Response pending bit */
6282 	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
6283 	DHD_GENERAL_UNLOCK(dhd, flags);
6284 
6285 #ifndef IOCTLRESP_USE_CONSTMEM
6286 	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
6287 #else
6288 	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
6289 	pkt = retbuf.va;
6290 #endif /* !IOCTLRESP_USE_CONSTMEM */
6291 	if (!pkt) {
6292 		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
6293 		prhex("dhd_prot_ioctcmplt_process:",
6294 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6295 		return;
6296 	}
6297 
6298 	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
6299 	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
6300 	xt_id = ltoh16(ioct_resp->trans_id);
6301 
6302 	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
6303 		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
6304 			__FUNCTION__, xt_id, prot->ioctl_trans_id,
6305 			prot->curr_ioctl_cmd, ioct_resp->cmd));
6306 		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
6307 		dhd_prot_debug_info_print(dhd);
6308 #ifdef DHD_FW_COREDUMP
6309 		if (dhd->memdump_enabled) {
6310 			/* collect core dump */
6311 			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
6312 			dhd_bus_mem_dump(dhd);
6313 		}
6314 #else
6315 		ASSERT(0);
6316 #endif /* DHD_FW_COREDUMP */
6317 		dhd_schedule_reset(dhd);
6318 		goto exit;
6319 	}
6320 	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
6321 		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
6322 
6323 	if (prot->ioctl_resplen > 0) {
6324 #ifndef IOCTLRESP_USE_CONSTMEM
6325 		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
6326 #else
6327 		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
6328 #endif /* !IOCTLRESP_USE_CONSTMEM */
6329 	}
6330 
6331 	/* wake up any dhd_os_ioctl_resp_wait() */
6332 	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
6333 
6334 exit:
6335 #ifndef IOCTLRESP_USE_CONSTMEM
6336 	dhd_prot_packet_free(dhd, pkt,
6337 		PKTTYPE_IOCTL_RX, FALSE);
6338 #else
6339 	free_ioctl_return_buffer(dhd, &retbuf);
6340 #endif /* !IOCTLRESP_USE_CONSTMEM */
6341 
6342 	/* Post another ioctl buf to the device */
6343 	if (prot->cur_ioctlresp_bufs_posted > 0) {
6344 		prot->cur_ioctlresp_bufs_posted--;
6345 	}
6346 
6347 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
6348 }
6349 
6350 int
6351 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
6352 {
6353 	return dhd->prot->no_tx_resource;
6354 }
6355 
6356 void
6357 dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
6358 {
6359 	dhd->prot->pktid_txq_stop_cnt++;
6360 }
6361 
6362 void
6363 dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
6364 {
6365 	dhd->prot->pktid_txq_start_cnt++;
6366 }
6367 
6368 /** called on MSG_TYPE_TX_STATUS message received from dongle */
6369 static void BCMFASTPATH
6370 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
6371 {
6372 	dhd_prot_t *prot = dhd->prot;
6373 	host_txbuf_cmpl_t * txstatus;
6374 	unsigned long flags;
6375 	uint32 pktid;
6376 	void *pkt;
6377 	dmaaddr_t pa;
6378 	uint32 len;
6379 	void *dmah;
6380 	void *secdma;
6381 	bool pkt_fate;
6382 	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
6383 #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
6384 	flow_info_t *flow_info;
6385 	uint64 tx_status_latency;
6386 #endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
6387 #if defined(TX_STATUS_LATENCY_STATS)
6388 	flow_ring_node_t *flow_ring_node;
6389 	uint16 flowid;
6390 #endif // endif
6391 	ts_timestamp_t *ts;
6392 
6393 	BCM_REFERENCE(ts);
6394 	txstatus = (host_txbuf_cmpl_t *)msg;
6395 #if defined(TX_STATUS_LATENCY_STATS)
6396 	flowid = txstatus->compl_hdr.flow_ring_id;
6397 	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
6398 #endif // endif
6399 
6400 	/* locks required to protect circular buffer accesses */
6401 	DHD_RING_LOCK(ring->ring_lock, flags);
6402 	pktid = ltoh32(txstatus->cmn_hdr.request_id);
6403 	pkt_fate = TRUE;
6404 
6405 #if defined(DHD_PKTID_AUDIT_RING)
6406 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
6407 			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
6408 #endif // endif
6409 
6410 	DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
6411 	if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
6412 		DHD_ERROR(("Extra packets are freed\n"));
6413 	}
6414 	ASSERT(pktid != 0);
6415 
6416 	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6417 		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
6418 	if (!pkt) {
6419 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6420 		DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
6421 		prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
6422 #ifdef DHD_FW_COREDUMP
6423 		if (dhd->memdump_enabled) {
6424 			/* collect core dump */
6425 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
6426 			dhd_bus_mem_dump(dhd);
6427 		}
6428 #else
6429 		ASSERT(0);
6430 #endif /* DHD_FW_COREDUMP */
6431 		return;
6432 	}
6433 
6434 	if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
6435 		dhd->prot->no_tx_resource = FALSE;
6436 		dhd_bus_start_queue(dhd->bus);
6437 	}
6438 
6439 	if (SECURE_DMA_ENAB(dhd->osh)) {
6440 		int offset = 0;
6441 		BCM_REFERENCE(offset);
6442 
6443 		if (dhd->prot->tx_metadata_offset)
6444 			offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
6445 		SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
6446 			(uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
6447 			secdma, offset);
6448 	} else {
6449 		DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6450 	}
6451 
6452 #ifdef TX_STATUS_LATENCY_STATS
6453 	/* update the tx status latency for flowid */
6454 	flow_info = &flow_ring_node->flow_info;
6455 	tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
6456 	flow_info->cum_tx_status_latency += tx_status_latency;
6457 	flow_info->num_tx_status++;
6458 #endif /* TX_STATUS_LATENCY_STATS */
6459 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
6460 	{
6461 		int elem_ix;
6462 		void **elem;
6463 		bcm_workq_t *workq;
6464 
6465 		workq = &prot->tx_compl_prod;
6466 		/*
6467 		 * Produce the packet into the tx_compl workq for the tx compl tasklet
6468 		 * to consume.
6469 		 */
6470 		OSL_PREFETCH(PKTTAG(pkt));
6471 
6472 		/* fetch next available slot in workq */
6473 		elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
6474 
6475 		DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
6476 		DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
6477 
6478 		if (elem_ix == BCM_RING_FULL) {
6479 			DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
6480 			goto workq_ring_full;
6481 		}
6482 
6483 		elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
6484 		*elem = pkt;
6485 
6486 		smp_wmb();
6487 
6488 		/* Sync WR index to consumer if the SYNC threshold has been reached */
6489 		if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
6490 			bcm_workq_prod_sync(workq);
6491 			prot->tx_compl_prod_sync = 0;
6492 		}
6493 
6494 		DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
6495 			__FUNCTION__, pkt, prot->tx_compl_prod_sync));
6496 
6497 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6498 		return;
6499 	}
6500 
6501 workq_ring_full:
6502 
6503 #endif /* !DHD_LB_TXC */
6504 
6505 #ifdef DMAMAP_STATS
6506 	dhd->dma_stats.txdata--;
6507 	dhd->dma_stats.txdata_sz -= len;
6508 #endif /* DMAMAP_STATS */
6509 	pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
6510 			ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
6511 #ifdef DHD_PKT_LOGGING
6512 	if (dhd->d11_tx_status) {
6513 		uint16 status = ltoh16(txstatus->compl_hdr.status) &
6514 			WLFC_CTL_PKTFLAG_MASK;
6515 		uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
6516 		DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
6517 		dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
6518 			(uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
6519 			&pkthash, &status);
6520 	}
6521 #endif /* DHD_PKT_LOGGING */
6522 
6523 #if defined(BCMPCIE)
6524 	dhd_txcomplete(dhd, pkt, pkt_fate);
6525 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6526 	dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
6527 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6528 #endif // endif
6529 
6530 #if DHD_DBG_SHOW_METADATA
6531 	if (dhd->prot->metadata_dbg &&
6532 		dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
6533 		uchar *ptr;
6534 		/* The Ethernet header of TX frame was copied and removed.
6535 		 * Here, move the data pointer forward by Ethernet header size.
6536 		 */
6537 		PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
6538 		ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
6539 		bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
6540 		dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
6541 	}
6542 #endif /* DHD_DBG_SHOW_METADATA */
6543 
6544 #ifdef DHD_HP2P
6545 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6546 #ifdef DHD_HP2P_DEBUG
6547 		bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
6548 #endif /* DHD_HP2P_DEBUG */
6549 		dhd_update_hp2p_txstats(dhd, txstatus);
6550 	}
6551 #endif /* DHD_HP2P */
6552 
6553 #ifdef DHD_LBUF_AUDIT
6554 	PKTAUDIT(dhd->osh, pkt);
6555 #endif // endif
6556 
6557 	DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
6558 		txstatus->tx_status);
6559 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6560 	PKTFREE(dhd->osh, pkt, TRUE);
6561 	return;
6562 } /* dhd_prot_txstatus_process */
6563 
6564 /** called on MSG_TYPE_WL_EVENT message received from dongle */
6565 static void
6566 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
6567 {
6568 	wlevent_req_msg_t *evnt;
6569 	uint32 bufid;
6570 	uint16 buflen;
6571 	int ifidx = 0;
6572 	void* pkt;
6573 	dhd_prot_t *prot = dhd->prot;
6574 
6575 	/* Event complete header */
6576 	evnt = (wlevent_req_msg_t *)msg;
6577 	bufid = ltoh32(evnt->cmn_hdr.request_id);
6578 
6579 #if defined(DHD_PKTID_AUDIT_RING)
6580 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
6581 			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6582 #endif // endif
6583 
6584 	buflen = ltoh16(evnt->event_data_len);
6585 
6586 	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
6587 
6588 	/* Post another rxbuf to the device */
6589 	if (prot->cur_event_bufs_posted)
6590 		prot->cur_event_bufs_posted--;
6591 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
6592 
6593 	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
6594 
6595 	if (!pkt) {
6596 		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
6597 		return;
6598 	}
6599 
6600 	/* DMA RX offset updated through shared area */
6601 	if (dhd->prot->rx_dataoffset)
6602 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6603 
6604 	PKTSETLEN(dhd->osh, pkt, buflen);
6605 #ifdef DHD_LBUF_AUDIT
6606 	PKTAUDIT(dhd->osh, pkt);
6607 #endif // endif
6608 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
6609 }
6610 
6611 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
6612 static void BCMFASTPATH
6613 dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
6614 {
6615 	info_buf_resp_t *resp;
6616 	uint32 pktid;
6617 	uint16 buflen;
6618 	void * pkt;
6619 
6620 	resp = (info_buf_resp_t *)buf;
6621 	pktid = ltoh32(resp->cmn_hdr.request_id);
6622 	buflen = ltoh16(resp->info_data_len);
6623 
6624 #ifdef DHD_PKTID_AUDIT_RING
6625 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6626 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
6627 #endif /* DHD_PKTID_AUDIT_RING */
6628 
6629 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
6630 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
6631 		dhd->prot->rx_dataoffset));
6632 
6633 	if (dhd->debug_buf_dest_support) {
6634 		if (resp->dest < DEBUG_BUF_DEST_MAX) {
6635 			dhd->debug_buf_dest_stat[resp->dest]++;
6636 		}
6637 	}
6638 
6639 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
6640 	if (!pkt)
6641 		return;
6642 
6643 	/* DMA RX offset updated through shared area */
6644 	if (dhd->prot->rx_dataoffset)
6645 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6646 
6647 	PKTSETLEN(dhd->osh, pkt, buflen);
6648 
6649 #ifdef DHD_LBUF_AUDIT
6650 	PKTAUDIT(dhd->osh, pkt);
6651 #endif // endif
6652 
6653 	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
6654 	 * special ifidx of -1.  This is just internal to dhd to get the data to
6655 	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
6656 	 */
6657 	dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
6658 }
6659 
6660 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
6661 static void BCMFASTPATH
6662 dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
6663 {
6664 }
6665 
6666 /** Stop protocol: sync w/dongle state. */
6667 void dhd_prot_stop(dhd_pub_t *dhd)
6668 {
6669 	ASSERT(dhd);
6670 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6671 
6672 }
6673 
6674 /* Add any protocol-specific data header.
6675  * Caller must reserve prot_hdrlen prepend space.
6676  */
6677 void BCMFASTPATH
6678 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
6679 {
6680 	return;
6681 }
6682 
6683 uint
6684 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
6685 {
6686 	return 0;
6687 }
6688 
6689 #define MAX_MTU_SZ (1600u)
6690 
6691 #define PKTBUF pktbuf
6692 
6693 /**
6694  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
6695  * the corresponding flow ring.
6696  */
6697 int BCMFASTPATH
6698 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
6699 {
6700 	unsigned long flags;
6701 	dhd_prot_t *prot = dhd->prot;
6702 	host_txbuf_post_t *txdesc = NULL;
6703 	dmaaddr_t pa, meta_pa;
6704 	uint8 *pktdata;
6705 	uint32 pktlen;
6706 	uint32 pktid;
6707 	uint8	prio;
6708 	uint16 flowid = 0;
6709 	uint16 alloced = 0;
6710 	uint16	headroom;
6711 	msgbuf_ring_t *ring;
6712 	flow_ring_table_t *flow_ring_table;
6713 	flow_ring_node_t *flow_ring_node;
6714 #ifdef DHD_PKT_LOGGING
6715 	uint32 pkthash;
6716 #endif /* DHD_PKT_LOGGING */
6717 
6718 	if (dhd->flow_ring_table == NULL) {
6719 		DHD_ERROR(("dhd flow_ring_table is NULL\n"));
6720 		return BCME_NORESOURCE;
6721 	}
6722 #ifdef DHD_PCIE_PKTID
6723 	if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
6724 		if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
6725 			dhd_bus_stop_queue(dhd->bus);
6726 			dhd->prot->no_tx_resource = TRUE;
6727 		}
6728 		dhd->prot->pktid_depleted_cnt++;
6729 		goto err_no_res;
6730 	} else {
6731 		dhd->prot->pktid_depleted_cnt = 0;
6732 	}
6733 #endif /* DHD_PCIE_PKTID */
6734 
6735 	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
6736 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
6737 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
6738 
6739 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
6740 
6741 	DHD_RING_LOCK(ring->ring_lock, flags);
6742 
6743 	/* Create a unique 32-bit packet id */
6744 	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
6745 		PKTBUF, PKTTYPE_DATA_TX);
6746 #if defined(DHD_PCIE_PKTID)
6747 	if (pktid == DHD_PKTID_INVALID) {
6748 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
6749 		/*
6750 		 * If we return error here, the caller would queue the packet
6751 		 * again. So we'll just free the skb allocated in DMA Zone.
6752 		 * Since we have not freed the original SKB yet the caller would
6753 		 * requeue the same.
6754 		 */
6755 		goto err_no_res_pktfree;
6756 	}
6757 #endif /* DHD_PCIE_PKTID */
6758 
6759 	/* Reserve space in the circular buffer */
6760 	txdesc = (host_txbuf_post_t *)
6761 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6762 	if (txdesc == NULL) {
6763 		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
6764 			__FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
6765 		goto err_free_pktid;
6766 	}
6767 
6768 	/* Extract the data pointer and length information */
6769 	pktdata = PKTDATA(dhd->osh, PKTBUF);
6770 	pktlen  = PKTLEN(dhd->osh, PKTBUF);
6771 
6772 	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
6773 #ifdef DHD_PKT_LOGGING
6774 	DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
6775 	/* Dump TX packet */
6776 	pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
6777 	dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
6778 #endif /* DHD_PKT_LOGGING */
6779 
6780 	/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
6781 	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
6782 
6783 	/* Extract the ethernet header and adjust the data pointer and length */
6784 	pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6785 	pktlen -= ETHER_HDR_LEN;
6786 
6787 	/* Map the data pointer to a DMA-able address */
6788 	if (SECURE_DMA_ENAB(dhd->osh)) {
6789 		int offset = 0;
6790 		BCM_REFERENCE(offset);
6791 
6792 		if (prot->tx_metadata_offset)
6793 			offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6794 
6795 		pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
6796 			DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
6797 	}
6798 #ifndef BCM_SECURE_DMA
6799 	else
6800 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
6801 #endif /* #ifndef BCM_SECURE_DMA */
6802 
6803 	if (PHYSADDRISZERO(pa)) {
6804 		DHD_ERROR(("%s: Something really bad, unless 0 is "
6805 			"a valid phyaddr for pa\n", __FUNCTION__));
6806 		ASSERT(0);
6807 		goto err_rollback_idx;
6808 	}
6809 
6810 #ifdef DMAMAP_STATS
6811 	dhd->dma_stats.txdata++;
6812 	dhd->dma_stats.txdata_sz += pktlen;
6813 #endif /* DMAMAP_STATS */
6814 	/* No need to lock. Save the rest of the packet's metadata */
6815 	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
6816 	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
6817 
6818 #ifdef TXP_FLUSH_NITEMS
6819 	if (ring->pend_items_count == 0)
6820 		ring->start_addr = (void *)txdesc;
6821 	ring->pend_items_count++;
6822 #endif // endif
6823 
6824 	/* Form the Tx descriptor message buffer */
6825 
6826 	/* Common message hdr */
6827 	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
6828 	txdesc->cmn_hdr.if_id = ifidx;
6829 	txdesc->cmn_hdr.flags = ring->current_phase;
6830 
6831 	txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
6832 	prio = (uint8)PKTPRIO(PKTBUF);
6833 
6834 	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
6835 	txdesc->seg_cnt = 1;
6836 
6837 	txdesc->data_len = htol16((uint16) pktlen);
6838 	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6839 	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
6840 
6841 	/* Move data pointer to keep ether header in local PKTBUF for later reference */
6842 	PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6843 
6844 	/* Handle Tx metadata */
6845 	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
6846 	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
6847 		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
6848 		prot->tx_metadata_offset, headroom));
6849 
6850 	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
6851 		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
6852 
6853 		/* Adjust the data pointer to account for meta data in DMA_MAP */
6854 		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6855 
6856 		if (SECURE_DMA_ENAB(dhd->osh)) {
6857 			meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6858 				prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
6859 				0, ring->dma_buf.secdma);
6860 		}
6861 #ifndef BCM_SECURE_DMA
6862 		else
6863 			meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6864 				prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
6865 #endif /* #ifndef BCM_SECURE_DMA */
6866 
6867 		if (PHYSADDRISZERO(meta_pa)) {
6868 			/* Unmap the data pointer to a DMA-able address */
6869 			if (SECURE_DMA_ENAB(dhd->osh)) {
6870 				int offset = 0;
6871 				BCM_REFERENCE(offset);
6872 
6873 				if (prot->tx_metadata_offset) {
6874 					offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6875 				}
6876 
6877 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
6878 					DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
6879 			}
6880 #ifndef BCM_SECURE_DMA
6881 			else {
6882 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
6883 			}
6884 #endif /* #ifndef BCM_SECURE_DMA */
6885 #ifdef TXP_FLUSH_NITEMS
6886 			/* update pend_items_count */
6887 			ring->pend_items_count--;
6888 #endif /* TXP_FLUSH_NITEMS */
6889 
6890 			DHD_ERROR(("%s: Something really bad, unless 0 is "
6891 				"a valid phyaddr for meta_pa\n", __FUNCTION__));
6892 			ASSERT(0);
6893 			goto err_rollback_idx;
6894 		}
6895 
6896 		/* Adjust the data pointer back to original value */
6897 		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6898 
6899 		txdesc->metadata_buf_len = prot->tx_metadata_offset;
6900 		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
6901 		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
6902 	} else {
6903 #ifdef DHD_HP2P
6904 		if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6905 			dhd_update_hp2p_txdesc(dhd, txdesc);
6906 		} else
6907 #endif /* DHD_HP2P */
6908 	if (1)
6909 	{
6910 			txdesc->metadata_buf_len = htol16(0);
6911 			txdesc->metadata_buf_addr.high_addr = 0;
6912 			txdesc->metadata_buf_addr.low_addr = 0;
6913 		}
6914 	}
6915 
6916 #ifdef DHD_PKTID_AUDIT_RING
6917 	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
6918 #endif /* DHD_PKTID_AUDIT_RING */
6919 
6920 	txdesc->cmn_hdr.request_id = htol32(pktid);
6921 
6922 	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
6923 		txdesc->cmn_hdr.request_id));
6924 
6925 #ifdef DHD_LBUF_AUDIT
6926 	PKTAUDIT(dhd->osh, PKTBUF);
6927 #endif // endif
6928 
6929 	if (pktlen > MAX_MTU_SZ) {
6930 		DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
6931 			__FUNCTION__, pktlen, MAX_MTU_SZ));
6932 		dhd_prhex("txringitem", (volatile uchar*)txdesc,
6933 			sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
6934 	}
6935 
6936 	/* Update the write pointer in TCM & ring bell */
6937 #if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
6938 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6939 		dhd_calc_hp2p_burst(dhd, ring, flowid);
6940 	} else {
6941 		if ((ring->pend_items_count == prot->txp_threshold) ||
6942 			((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6943 			dhd_prot_txdata_write_flush(dhd, flowid);
6944 		}
6945 	}
6946 #elif defined(TXP_FLUSH_NITEMS)
6947 	/* Flush if we have either hit the txp_threshold or if this msg is */
6948 	/* occupying the last slot in the flow_ring - before wrap around.  */
6949 	if ((ring->pend_items_count == prot->txp_threshold) ||
6950 		((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6951 		dhd_prot_txdata_write_flush(dhd, flowid);
6952 	}
6953 #else
6954 	/* update ring's WR index and ring doorbell to dongle */
6955 	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
6956 #endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
6957 
6958 #if defined(TX_STATUS_LATENCY_STATS)
6959 	/* set the time when pkt is queued to flowring */
6960 	DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
6961 #endif // endif
6962 
6963 	OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
6964 	/*
6965 	 * Take a wake lock, do not sleep if we have atleast one packet
6966 	 * to finish.
6967 	 */
6968 	DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
6969 
6970 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6971 
6972 #ifdef TX_STATUS_LATENCY_STATS
6973 	flow_ring_node->flow_info.num_tx_pkts++;
6974 #endif /* TX_STATUS_LATENCY_STATS */
6975 	return BCME_OK;
6976 
6977 err_rollback_idx:
6978 	/* roll back write pointer for unprocessed message */
6979 	if (ring->wr == 0) {
6980 		ring->wr = ring->max_items - 1;
6981 	} else {
6982 		ring->wr--;
6983 		if (ring->wr == 0) {
6984 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
6985 			ring->current_phase = ring->current_phase ?
6986 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6987 		}
6988 	}
6989 
6990 err_free_pktid:
6991 #if defined(DHD_PCIE_PKTID)
6992 	{
6993 		void *dmah;
6994 		void *secdma;
6995 		/* Free up the PKTID. physaddr and pktlen will be garbage. */
6996 		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6997 			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
6998 	}
6999 
7000 err_no_res_pktfree:
7001 #endif /* DHD_PCIE_PKTID */
7002 
7003 	DHD_RING_UNLOCK(ring->ring_lock, flags);
7004 err_no_res:
7005 	return BCME_NORESOURCE;
7006 } /* dhd_prot_txdata */
7007 
7008 /* called with a ring_lock */
7009 /** optimization to write "n" tx items at a time to ring */
7010 void BCMFASTPATH
7011 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
7012 {
7013 #ifdef TXP_FLUSH_NITEMS
7014 	flow_ring_table_t *flow_ring_table;
7015 	flow_ring_node_t *flow_ring_node;
7016 	msgbuf_ring_t *ring;
7017 
7018 	if (dhd->flow_ring_table == NULL) {
7019 		return;
7020 	}
7021 
7022 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
7023 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
7024 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
7025 
7026 	if (ring->pend_items_count) {
7027 		/* update ring's WR index and ring doorbell to dongle */
7028 		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
7029 			ring->pend_items_count);
7030 		ring->pend_items_count = 0;
7031 		ring->start_addr = NULL;
7032 	}
7033 #endif /* TXP_FLUSH_NITEMS */
7034 }
7035 
7036 #undef PKTBUF	/* Only defined in the above routine */
7037 
7038 int BCMFASTPATH
7039 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
7040 {
7041 	return 0;
7042 }
7043 
7044 /** post a set of receive buffers to the dongle */
7045 static void BCMFASTPATH
7046 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
7047 {
7048 	dhd_prot_t *prot = dhd->prot;
7049 #if defined(DHD_LB_RXC)
7050 	int elem_ix;
7051 	uint32 *elem;
7052 	bcm_workq_t *workq;
7053 
7054 	workq = &prot->rx_compl_prod;
7055 
7056 	/* Produce the work item */
7057 	elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
7058 	if (elem_ix == BCM_RING_FULL) {
7059 		DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
7060 		ASSERT(0);
7061 		return;
7062 	}
7063 
7064 	elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
7065 	*elem = pktid;
7066 
7067 	smp_wmb();
7068 
7069 	/* Sync WR index to consumer if the SYNC threshold has been reached */
7070 	if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
7071 		bcm_workq_prod_sync(workq);
7072 		prot->rx_compl_prod_sync = 0;
7073 	}
7074 
7075 	DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
7076 		__FUNCTION__, pktid, prot->rx_compl_prod_sync));
7077 
7078 #endif /* DHD_LB_RXC */
7079 
7080 	if (prot->rxbufpost >= rxcnt) {
7081 		prot->rxbufpost -= (uint16)rxcnt;
7082 	} else {
7083 		/* ASSERT(0); */
7084 		prot->rxbufpost = 0;
7085 	}
7086 
7087 #if !defined(DHD_LB_RXC)
7088 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
7089 		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
7090 #endif /* !DHD_LB_RXC */
7091 	return;
7092 }
7093 
7094 /* called before an ioctl is sent to the dongle */
7095 static void
7096 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
7097 {
7098 	dhd_prot_t *prot = dhd->prot;
7099 	int slen = 0;
7100 
7101 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
7102 		pcie_bus_tput_params_t *tput_params;
7103 
7104 		slen = strlen("pcie_bus_tput") + 1;
7105 		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
7106 		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
7107 			sizeof(tput_params->host_buf_addr));
7108 		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
7109 	}
7110 
7111 }
7112 
7113 /* called after an ioctl returns from dongle */
7114 static void
7115 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
7116 	int ifidx, int ret, int len)
7117 {
7118 
7119 	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
7120 		/* Intercept the wme_dp ioctl here */
7121 		if (!strcmp(buf, "wme_dp")) {
7122 			int slen, val = 0;
7123 
7124 			slen = strlen("wme_dp") + 1;
7125 			if (len >= (int)(slen + sizeof(int)))
7126 				bcopy(((char *)buf + slen), &val, sizeof(int));
7127 			dhd->wme_dp = (uint8) ltoh32(val);
7128 		}
7129 
7130 	}
7131 
7132 }
7133 
7134 #ifdef DHD_PM_CONTROL_FROM_FILE
7135 extern bool g_pm_control;
7136 #endif /* DHD_PM_CONTROL_FROM_FILE */
7137 
7138 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
7139 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
7140 {
7141 	int ret = -1;
7142 	uint8 action;
7143 
7144 	if (dhd->bus->is_linkdown) {
7145 		DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7146 		goto done;
7147 	}
7148 
7149 	if (dhd_query_bus_erros(dhd)) {
7150 		DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
7151 		goto done;
7152 	}
7153 
7154 	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
7155 		DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
7156 			" bus state: %d, sent hang: %d\n", __FUNCTION__,
7157 			dhd->busstate, dhd->hang_was_sent));
7158 		goto done;
7159 	}
7160 
7161 	if (dhd->busstate == DHD_BUS_SUSPEND) {
7162 		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
7163 		goto done;
7164 	}
7165 
7166 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7167 
7168 	if (ioc->cmd == WLC_SET_PM) {
7169 #ifdef DHD_PM_CONTROL_FROM_FILE
7170 		if (g_pm_control == TRUE) {
7171 			DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
7172 				__FUNCTION__, buf ? *(char *)buf : 0));
7173 			goto done;
7174 		}
7175 #endif /* DHD_PM_CONTROL_FROM_FILE */
7176 		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
7177 	}
7178 
7179 	ASSERT(len <= WLC_IOCTL_MAXLEN);
7180 
7181 	if (len > WLC_IOCTL_MAXLEN)
7182 		goto done;
7183 
7184 	action = ioc->set;
7185 
7186 	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
7187 
7188 	if (action & WL_IOCTL_ACTION_SET) {
7189 		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7190 	} else {
7191 		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7192 		if (ret > 0)
7193 			ioc->used = ret;
7194 	}
7195 
7196 	/* Too many programs assume ioctl() returns 0 on success */
7197 	if (ret >= 0) {
7198 		ret = 0;
7199 	} else {
7200 		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
7201 		dhd->dongle_error = ret;
7202 	}
7203 
7204 	dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
7205 
7206 done:
7207 	return ret;
7208 
7209 } /* dhd_prot_ioctl */
7210 
7211 /** test / loopback */
7212 
7213 int
7214 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
7215 {
7216 	unsigned long flags;
7217 	dhd_prot_t *prot = dhd->prot;
7218 	uint16 alloced = 0;
7219 
7220 	ioct_reqst_hdr_t *ioct_rqst;
7221 
7222 	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
7223 	uint16 msglen = len + hdrlen;
7224 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7225 
7226 	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
7227 	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
7228 
7229 	DHD_RING_LOCK(ring->ring_lock, flags);
7230 
7231 	ioct_rqst = (ioct_reqst_hdr_t *)
7232 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7233 
7234 	if (ioct_rqst == NULL) {
7235 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7236 		return 0;
7237 	}
7238 
7239 	{
7240 		uint8 *ptr;
7241 		uint16 i;
7242 
7243 		ptr = (uint8 *)ioct_rqst;
7244 		for (i = 0; i < msglen; i++) {
7245 			ptr[i] = i % 256;
7246 		}
7247 	}
7248 
7249 	/* Common msg buf hdr */
7250 	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7251 	ring->seqnum++;
7252 
7253 	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
7254 	ioct_rqst->msg.if_id = 0;
7255 	ioct_rqst->msg.flags = ring->current_phase;
7256 
7257 	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
7258 
7259 	/* update ring's WR index and ring doorbell to dongle */
7260 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
7261 
7262 	DHD_RING_UNLOCK(ring->ring_lock, flags);
7263 
7264 	return 0;
7265 }
7266 
7267 /** test / loopback */
7268 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
7269 {
7270 	if (dmaxfer == NULL)
7271 		return;
7272 
7273 	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7274 	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
7275 }
7276 
7277 /** test / loopback */
7278 int
7279 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
7280 {
7281 	dhd_prot_t *prot = dhdp->prot;
7282 	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
7283 	dmaxref_mem_map_t *dmap = NULL;
7284 
7285 	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
7286 	if (!dmap) {
7287 		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
7288 		goto mem_alloc_fail;
7289 	}
7290 	dmap->srcmem = &(dmaxfer->srcmem);
7291 	dmap->dstmem = &(dmaxfer->dstmem);
7292 
7293 	DMAXFER_FREE(dhdp, dmap);
7294 	return BCME_OK;
7295 
7296 mem_alloc_fail:
7297 	if (dmap) {
7298 		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
7299 		dmap = NULL;
7300 	}
7301 	return BCME_NOMEM;
7302 } /* dhd_prepare_schedule_dmaxfer_free */
7303 
7304 /** test / loopback */
7305 void
7306 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
7307 {
7308 
7309 	dhd_dma_buf_free(dhdp, dmmap->srcmem);
7310 	dhd_dma_buf_free(dhdp, dmmap->dstmem);
7311 
7312 	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
7313 
7314 	dhdp->bus->dmaxfer_complete = TRUE;
7315 	dhd_os_dmaxfer_wake(dhdp);
7316 
7317 	dmmap = NULL;
7318 
7319 } /* dmaxfer_free_prev_dmaaddr */
7320 
7321 /** test / loopback */
7322 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
7323 	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
7324 {
7325 	uint i = 0, j = 0;
7326 	if (!dmaxfer)
7327 		return BCME_ERROR;
7328 
7329 	/* First free up existing buffers */
7330 	dmaxfer_free_dmaaddr(dhd, dmaxfer);
7331 
7332 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
7333 		return BCME_NOMEM;
7334 	}
7335 
7336 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
7337 		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7338 		return BCME_NOMEM;
7339 	}
7340 
7341 	dmaxfer->len = len;
7342 
7343 	/* Populate source with a pattern like below
7344 	 * 0x00000000
7345 	 * 0x01010101
7346 	 * 0x02020202
7347 	 * 0x03030303
7348 	 * 0x04040404
7349 	 * 0x05050505
7350 	 * ...
7351 	 * 0xFFFFFFFF
7352 	 */
7353 	while (i < dmaxfer->len) {
7354 		((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
7355 		i++;
7356 		if (i % 4 == 0) {
7357 			j++;
7358 		}
7359 	}
7360 
7361 	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
7362 
7363 	dmaxfer->srcdelay = srcdelay;
7364 	dmaxfer->destdelay = destdelay;
7365 
7366 	return BCME_OK;
7367 } /* dmaxfer_prepare_dmaaddr */
7368 
7369 static void
7370 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
7371 {
7372 	dhd_prot_t *prot = dhd->prot;
7373 	uint64 end_usec;
7374 	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
7375 	int buf_free_scheduled;
7376 
7377 	BCM_REFERENCE(cmplt);
7378 	end_usec = OSL_SYSUPTIME_US();
7379 
7380 	DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
7381 	prot->dmaxfer.status = cmplt->compl_hdr.status;
7382 	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7383 	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
7384 		if (memcmp(prot->dmaxfer.srcmem.va,
7385 		        prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
7386 		        cmplt->compl_hdr.status != BCME_OK) {
7387 		        DHD_ERROR(("DMA loopback failed\n"));
7388 			/* it is observed that some times the completion
7389 			 * header status is set as OK, but the memcmp fails
7390 			 * hence always explicitly set the dmaxfer status
7391 			 * as error if this happens.
7392 			 */
7393 			prot->dmaxfer.status = BCME_ERROR;
7394 			prhex("XFER SRC: ",
7395 			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
7396 			prhex("XFER DST: ",
7397 			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7398 		}
7399 		else {
7400 			switch (prot->dmaxfer.d11_lpbk) {
7401 			case M2M_DMA_LPBK: {
7402 				DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
7403 				} break;
7404 			case D11_LPBK: {
7405 				DHD_ERROR(("DMA successful with d11 loopback\n"));
7406 				} break;
7407 			case BMC_LPBK: {
7408 				DHD_ERROR(("DMA successful with bmc loopback\n"));
7409 				} break;
7410 			case M2M_NON_DMA_LPBK: {
7411 				DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
7412 				} break;
7413 			case D11_HOST_MEM_LPBK: {
7414 				DHD_ERROR(("DMA successful d11 host mem loopback\n"));
7415 				} break;
7416 			case BMC_HOST_MEM_LPBK: {
7417 				DHD_ERROR(("DMA successful bmc host mem loopback\n"));
7418 				} break;
7419 			default: {
7420 				DHD_ERROR(("Invalid loopback option\n"));
7421 				} break;
7422 			}
7423 
7424 			if (DHD_LPBKDTDUMP_ON()) {
7425 				/* debug info print of the Tx and Rx buffers */
7426 				dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
7427 					prot->dmaxfer.len, DHD_INFO_VAL);
7428 				dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
7429 					prot->dmaxfer.len, DHD_INFO_VAL);
7430 			}
7431 		}
7432 	}
7433 
7434 	buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
7435 	end_usec -= prot->dmaxfer.start_usec;
7436 	if (end_usec) {
7437 		prot->dmaxfer.time_taken = end_usec;
7438 		DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
7439 			prot->dmaxfer.len, (unsigned long)end_usec,
7440 			(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
7441 	}
7442 	dhd->prot->dmaxfer.in_progress = FALSE;
7443 
7444 	if (buf_free_scheduled != BCME_OK) {
7445 		dhd->bus->dmaxfer_complete = TRUE;
7446 		dhd_os_dmaxfer_wake(dhd);
7447 	}
7448 }
7449 
7450 /** Test functionality.
7451  * Transfers bytes from host to dongle and to host again using DMA
7452  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
7453  * by a spinlock.
7454  */
7455 int
7456 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
7457 	uint d11_lpbk, uint core_num)
7458 {
7459 	unsigned long flags;
7460 	int ret = BCME_OK;
7461 	dhd_prot_t *prot = dhd->prot;
7462 	pcie_dma_xfer_params_t *dmap;
7463 	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
7464 	uint16 alloced = 0;
7465 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7466 
7467 	if (prot->dmaxfer.in_progress) {
7468 		DHD_ERROR(("DMA is in progress...\n"));
7469 		return BCME_ERROR;
7470 	}
7471 
7472 	if (d11_lpbk >= MAX_LPBK) {
7473 		DHD_ERROR(("loopback mode should be either"
7474 			" 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
7475 		return BCME_ERROR;
7476 	}
7477 
7478 	DHD_RING_LOCK(ring->ring_lock, flags);
7479 
7480 	prot->dmaxfer.in_progress = TRUE;
7481 	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
7482 	        &prot->dmaxfer)) != BCME_OK) {
7483 		prot->dmaxfer.in_progress = FALSE;
7484 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7485 		return ret;
7486 	}
7487 
7488 	dmap = (pcie_dma_xfer_params_t *)
7489 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7490 
7491 	if (dmap == NULL) {
7492 		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
7493 		prot->dmaxfer.in_progress = FALSE;
7494 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7495 		return BCME_NOMEM;
7496 	}
7497 
7498 	/* Common msg buf hdr */
7499 	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
7500 	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
7501 	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7502 	dmap->cmn_hdr.flags = ring->current_phase;
7503 	ring->seqnum++;
7504 
7505 	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
7506 	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
7507 	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
7508 	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
7509 	dmap->xfer_len = htol32(prot->dmaxfer.len);
7510 	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
7511 	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
7512 	prot->dmaxfer.d11_lpbk = d11_lpbk;
7513 	dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
7514 			<< PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
7515 			((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
7516 			 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
7517 	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
7518 
7519 	/* update ring's WR index and ring doorbell to dongle */
7520 	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
7521 
7522 	DHD_RING_UNLOCK(ring->ring_lock, flags);
7523 
7524 	DHD_ERROR(("DMA loopback Started...\n"));
7525 
7526 	return BCME_OK;
7527 } /* dhdmsgbuf_dmaxfer_req */
7528 
7529 int
7530 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
7531 {
7532 	dhd_prot_t *prot = dhd->prot;
7533 
7534 	if (prot->dmaxfer.in_progress)
7535 		result->status = DMA_XFER_IN_PROGRESS;
7536 	else if (prot->dmaxfer.status == 0)
7537 		result->status = DMA_XFER_SUCCESS;
7538 	else
7539 		result->status = DMA_XFER_FAILED;
7540 
7541 	result->type = prot->dmaxfer.d11_lpbk;
7542 	result->error_code = prot->dmaxfer.status;
7543 	result->num_bytes = prot->dmaxfer.len;
7544 	result->time_taken = prot->dmaxfer.time_taken;
7545 	if (prot->dmaxfer.time_taken) {
7546 		/* throughput in kBps */
7547 		result->tput =
7548 			(prot->dmaxfer.len * (1000 * 1000 / 1024)) /
7549 			(uint32)prot->dmaxfer.time_taken;
7550 	}
7551 
7552 	return BCME_OK;
7553 }
7554 
7555 /** Called in the process of submitting an ioctl to the dongle */
7556 static int
7557 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7558 {
7559 	int ret = 0;
7560 	uint copylen = 0;
7561 
7562 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7563 
7564 	if (dhd->bus->is_linkdown) {
7565 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7566 			__FUNCTION__));
7567 		return -EIO;
7568 	}
7569 
7570 	if (dhd->busstate == DHD_BUS_DOWN) {
7571 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7572 		return -EIO;
7573 	}
7574 
7575 	/* don't talk to the dongle if fw is about to be reloaded */
7576 	if (dhd->hang_was_sent) {
7577 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7578 			__FUNCTION__));
7579 		return -EIO;
7580 	}
7581 
7582 	if (cmd == WLC_GET_VAR && buf)
7583 	{
7584 		if (!len || !*(uint8 *)buf) {
7585 			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
7586 			ret = BCME_BADARG;
7587 			goto done;
7588 		}
7589 
7590 		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
7591 		copylen = MIN(len, BCME_STRLEN);
7592 
7593 		if ((len >= strlen("bcmerrorstr")) &&
7594 			(!strcmp((char *)buf, "bcmerrorstr"))) {
7595 			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
7596 			*(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
7597 			goto done;
7598 		} else if ((len >= strlen("bcmerror")) &&
7599 			!strcmp((char *)buf, "bcmerror")) {
7600 			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
7601 			goto done;
7602 		}
7603 	}
7604 
7605 	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
7606 	    action, ifidx, cmd, len));
7607 
7608 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7609 
7610 	if (ret < 0) {
7611 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7612 		goto done;
7613 	}
7614 
7615 	/* wait for IOCTL completion message from dongle and get first fragment */
7616 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7617 
7618 done:
7619 	return ret;
7620 }
7621 
7622 void
7623 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
7624 {
7625 	uint32 intstatus;
7626 	dhd_prot_t *prot = dhd->prot;
7627 	dhd->rxcnt_timeout++;
7628 	dhd->rx_ctlerrs++;
7629 	dhd->iovar_timeout_occured = TRUE;
7630 	DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
7631 		"trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
7632 		dhd->is_sched_error ? " due to scheduling problem" : "",
7633 		dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
7634 		prot->ioctl_state, dhd->busstate, prot->ioctl_received));
7635 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7636 		if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
7637 			/* change g_assert_type to trigger Kernel panic */
7638 			g_assert_type = 2;
7639 			/* use ASSERT() to trigger panic */
7640 			ASSERT(0);
7641 		}
7642 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7643 
7644 	if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
7645 			prot->curr_ioctl_cmd == WLC_GET_VAR) {
7646 		char iovbuf[32];
7647 		int i;
7648 		int dump_size = 128;
7649 		uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
7650 		memset(iovbuf, 0, sizeof(iovbuf));
7651 		strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
7652 		iovbuf[sizeof(iovbuf) - 1] = '\0';
7653 		DHD_ERROR(("Current IOVAR (%s): %s\n",
7654 			prot->curr_ioctl_cmd == WLC_SET_VAR ?
7655 			"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
7656 		DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
7657 		for (i = 0; i < dump_size; i++) {
7658 			DHD_ERROR(("%02X ", ioctl_buf[i]));
7659 			if ((i % 32) == 31) {
7660 				DHD_ERROR(("\n"));
7661 			}
7662 		}
7663 		DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
7664 	}
7665 
7666 	/* Check the PCIe link status by reading intstatus register */
7667 	intstatus = si_corereg(dhd->bus->sih,
7668 		dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7669 	if (intstatus == (uint32)-1) {
7670 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
7671 		dhd->bus->is_linkdown = TRUE;
7672 	}
7673 
7674 	dhd_bus_dump_console_buffer(dhd->bus);
7675 	dhd_prot_debug_info_print(dhd);
7676 }
7677 
7678 /**
7679  * Waits for IOCTL completion message from the dongle, copies this into caller
7680  * provided parameter 'buf'.
7681  */
7682 static int
7683 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
7684 {
7685 	dhd_prot_t *prot = dhd->prot;
7686 	int timeleft;
7687 	unsigned long flags;
7688 	int ret = 0;
7689 
7690 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7691 
7692 	if (dhd_query_bus_erros(dhd)) {
7693 		ret = -EIO;
7694 		goto out;
7695 	}
7696 
7697 	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7698 
7699 	if (prot->ioctl_received == 0) {
7700 		uint32 intstatus = si_corereg(dhd->bus->sih,
7701 			dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7702 		int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
7703 		if ((intstatus) && (intstatus != (uint32)-1) &&
7704 			(timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
7705 			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
7706 				" host_irq_disabled=%d\n",
7707 				__FUNCTION__, intstatus, host_irq_disbled));
7708 			dhd_pcie_intr_count_dump(dhd);
7709 			dhd_print_tasklet_status(dhd);
7710 			dhd_prot_process_ctrlbuf(dhd);
7711 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7712 			/* Clear Interrupts */
7713 			dhdpcie_bus_clear_intstatus(dhd->bus);
7714 		}
7715 	}
7716 
7717 	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7718 		/* check if resumed on time out related to scheduling issue */
7719 		dhd->is_sched_error = FALSE;
7720 		if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
7721 			dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
7722 		}
7723 
7724 		dhd_msgbuf_iovar_timeout_dump(dhd);
7725 
7726 #ifdef DHD_FW_COREDUMP
7727 		/* Collect socram dump */
7728 		if (dhd->memdump_enabled) {
7729 			/* collect core dump */
7730 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
7731 			dhd_bus_mem_dump(dhd);
7732 		}
7733 #endif /* DHD_FW_COREDUMP */
7734 
7735 #ifdef SUPPORT_LINKDOWN_RECOVERY
7736 #ifdef CONFIG_ARCH_MSM
7737 		dhd->bus->no_cfg_restore = 1;
7738 #endif /* CONFIG_ARCH_MSM */
7739 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7740 		ret = -ETIMEDOUT;
7741 		goto out;
7742 	} else {
7743 		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
7744 			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
7745 				__FUNCTION__, prot->ioctl_received));
7746 			ret = -EINVAL;
7747 			goto out;
7748 		}
7749 		dhd->rxcnt_timeout = 0;
7750 		dhd->rx_ctlpkts++;
7751 		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
7752 			__FUNCTION__, prot->ioctl_resplen));
7753 	}
7754 
7755 	if (dhd->prot->ioctl_resplen > len)
7756 		dhd->prot->ioctl_resplen = (uint16)len;
7757 	if (buf)
7758 		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
7759 
7760 	ret = (int)(dhd->prot->ioctl_status);
7761 
7762 out:
7763 	DHD_GENERAL_LOCK(dhd, flags);
7764 	dhd->prot->ioctl_state = 0;
7765 	dhd->prot->ioctl_resplen = 0;
7766 	dhd->prot->ioctl_received = IOCTL_WAIT;
7767 	dhd->prot->curr_ioctl_cmd = 0;
7768 	DHD_GENERAL_UNLOCK(dhd, flags);
7769 
7770 	return ret;
7771 } /* dhd_msgbuf_wait_ioctl_cmplt */
7772 
7773 static int
7774 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7775 {
7776 	int ret = 0;
7777 
7778 	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
7779 
7780 	if (dhd->bus->is_linkdown) {
7781 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7782 			__FUNCTION__));
7783 		return -EIO;
7784 	}
7785 
7786 	if (dhd->busstate == DHD_BUS_DOWN) {
7787 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7788 		return -EIO;
7789 	}
7790 
7791 	/* don't talk to the dongle if fw is about to be reloaded */
7792 	if (dhd->hang_was_sent) {
7793 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7794 			__FUNCTION__));
7795 		return -EIO;
7796 	}
7797 
7798 	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
7799 		action, ifidx, cmd, len));
7800 
7801 	/* Fill up msgbuf for ioctl req */
7802 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7803 
7804 	if (ret < 0) {
7805 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7806 		goto done;
7807 	}
7808 
7809 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7810 
7811 done:
7812 	return ret;
7813 }
7814 
7815 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
7816 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
7817 {
7818 	return 0;
7819 }
7820 
7821 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
7822 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
7823                              void *params, int plen, void *arg, int len, bool set)
7824 {
7825 	return BCME_UNSUPPORTED;
7826 }
7827 
7828 #ifdef DHD_DUMP_PCIE_RINGS
7829 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
7830 	unsigned long *file_posn, bool file_write)
7831 {
7832 	dhd_prot_t *prot;
7833 	msgbuf_ring_t *ring;
7834 	int ret = 0;
7835 	uint16 h2d_flowrings_total;
7836 	uint16 flowid;
7837 
7838 	if (!(dhd) || !(dhd->prot)) {
7839 		goto exit;
7840 	}
7841 	prot = dhd->prot;
7842 
7843 	/* Below is the same ring dump sequence followed in parser as well. */
7844 	ring = &prot->h2dring_ctrl_subn;
7845 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7846 		goto exit;
7847 
7848 	ring = &prot->h2dring_rxp_subn;
7849 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7850 		goto exit;
7851 
7852 	ring = &prot->d2hring_ctrl_cpln;
7853 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7854 		goto exit;
7855 
7856 	ring = &prot->d2hring_tx_cpln;
7857 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7858 		goto exit;
7859 
7860 	ring = &prot->d2hring_rx_cpln;
7861 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7862 		goto exit;
7863 
7864 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7865 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7866 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7867 			goto exit;
7868 		}
7869 	}
7870 
7871 #ifdef EWP_EDL
7872 	if (dhd->dongle_edl_support) {
7873 		ring = prot->d2hring_edl;
7874 		if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
7875 			goto exit;
7876 	}
7877 	else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
7878 #else
7879 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7880 #endif /* EWP_EDL */
7881 	{
7882 		ring = prot->h2dring_info_subn;
7883 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7884 			goto exit;
7885 
7886 		ring = prot->d2hring_info_cpln;
7887 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7888 			goto exit;
7889 	}
7890 
7891 exit :
7892 	return ret;
7893 }
7894 
7895 /* Write to file */
7896 static
7897 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
7898 	const void *user_buf, unsigned long *file_posn)
7899 {
7900 	int ret = 0;
7901 
7902 	if (ring == NULL) {
7903 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7904 			__FUNCTION__));
7905 		return BCME_ERROR;
7906 	}
7907 	if (file) {
7908 		ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
7909 				((unsigned long)(ring->max_items) * (ring->item_len)));
7910 		if (ret < 0) {
7911 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7912 			ret = BCME_ERROR;
7913 		}
7914 	} else if (user_buf) {
7915 		ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
7916 			((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
7917 	}
7918 	return ret;
7919 }
7920 #endif /* DHD_DUMP_PCIE_RINGS */
7921 
7922 #ifdef EWP_EDL
7923 /* Write to file */
7924 static
7925 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
7926 	unsigned long *file_posn)
7927 {
7928 	int ret = 0, nitems = 0;
7929 	char *buf = NULL, *ptr = NULL;
7930 	uint8 *msg_addr = NULL;
7931 	uint16	rd = 0;
7932 
7933 	if (ring == NULL) {
7934 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7935 			__FUNCTION__));
7936 		ret = BCME_ERROR;
7937 		goto done;
7938 	}
7939 
7940 	buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7941 	if (buf == NULL) {
7942 		DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
7943 		ret = BCME_ERROR;
7944 		goto done;
7945 	}
7946 	ptr = buf;
7947 
7948 	for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
7949 		msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
7950 		memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
7951 		ptr += D2HRING_EDL_HDR_SIZE;
7952 	}
7953 	if (file) {
7954 		ret = dhd_os_write_file_posn(file, file_posn, buf,
7955 				(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
7956 		if (ret < 0) {
7957 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7958 			goto done;
7959 		}
7960 	}
7961 	else {
7962 		ret = dhd_export_debug_data(buf, NULL, user_buf,
7963 			(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
7964 	}
7965 
7966 done:
7967 	if (buf) {
7968 		MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7969 	}
7970 	return ret;
7971 }
7972 #endif /* EWP_EDL */
7973 
7974 /** Add prot dump output to a buffer */
7975 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
7976 {
7977 
7978 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
7979 		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
7980 	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
7981 		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
7982 	else
7983 		bcm_bprintf(b, "\nd2h_sync: NONE:");
7984 	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
7985 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
7986 
7987 	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
7988 		dhd->dma_h2d_ring_upd_support,
7989 		dhd->dma_d2h_ring_upd_support,
7990 		dhd->prot->rw_index_sz);
7991 	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
7992 		h2d_max_txpost, dhd->prot->h2d_max_txpost);
7993 	bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
7994 	bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
7995 	bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
7996 }
7997 
7998 /* Update local copy of dongle statistics */
7999 void dhd_prot_dstats(dhd_pub_t *dhd)
8000 {
8001 	return;
8002 }
8003 
8004 /** Called by upper DHD layer */
8005 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
8006 	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
8007 {
8008 	return 0;
8009 }
8010 
8011 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
8012 int
8013 dhd_post_dummy_msg(dhd_pub_t *dhd)
8014 {
8015 	unsigned long flags;
8016 	hostevent_hdr_t *hevent = NULL;
8017 	uint16 alloced = 0;
8018 
8019 	dhd_prot_t *prot = dhd->prot;
8020 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8021 
8022 	DHD_RING_LOCK(ring->ring_lock, flags);
8023 
8024 	hevent = (hostevent_hdr_t *)
8025 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8026 
8027 	if (hevent == NULL) {
8028 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8029 		return -1;
8030 	}
8031 
8032 	/* CMN msg header */
8033 	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8034 	ring->seqnum++;
8035 	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
8036 	hevent->msg.if_id = 0;
8037 	hevent->msg.flags = ring->current_phase;
8038 
8039 	/* Event payload */
8040 	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
8041 
8042 	/* Since, we are filling the data directly into the bufptr obtained
8043 	 * from the msgbuf, we can directly call the write_complete
8044 	 */
8045 	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
8046 
8047 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8048 
8049 	return 0;
8050 }
8051 
8052 /**
8053  * If exactly_nitems is true, this function will allocate space for nitems or fail
8054  * If exactly_nitems is false, this function will allocate space for nitems or less
8055  */
8056 static void * BCMFASTPATH
8057 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
8058 	uint16 nitems, uint16 * alloced, bool exactly_nitems)
8059 {
8060 	void * ret_buf;
8061 
8062 	/* Alloc space for nitems in the ring */
8063 	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8064 
8065 	if (ret_buf == NULL) {
8066 		/* HWA TODO, need to get RD pointer from different array
8067 		 * which HWA will directly write into host memory
8068 		 */
8069 		/* if alloc failed , invalidate cached read ptr */
8070 		if (dhd->dma_d2h_ring_upd_support) {
8071 			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
8072 		} else {
8073 			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
8074 #ifdef SUPPORT_LINKDOWN_RECOVERY
8075 			/* Check if ring->rd is valid */
8076 			if (ring->rd >= ring->max_items) {
8077 				DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
8078 				dhd->bus->read_shm_fail = TRUE;
8079 				return NULL;
8080 			}
8081 #endif /* SUPPORT_LINKDOWN_RECOVERY */
8082 		}
8083 
8084 		/* Try allocating once more */
8085 		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8086 
8087 		if (ret_buf == NULL) {
8088 			DHD_INFO(("%s: Ring space not available  \n", ring->name));
8089 			return NULL;
8090 		}
8091 	}
8092 
8093 	if (ret_buf == HOST_RING_BASE(ring)) {
8094 		DHD_INFO(("%s: setting the phase now\n", ring->name));
8095 		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
8096 	}
8097 
8098 	/* Return alloced space */
8099 	return ret_buf;
8100 }
8101 
8102 /**
8103  * Non inline ioct request.
8104  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
8105  * Form a separate request buffer where a 4 byte cmn header is added in the front
8106  * buf contents from parent function is copied to remaining section of this buffer
8107  */
8108 static int
8109 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
8110 {
8111 	dhd_prot_t *prot = dhd->prot;
8112 	ioctl_req_msg_t *ioct_rqst;
8113 	void * ioct_buf;	/* For ioctl payload */
8114 	uint16  rqstlen, resplen;
8115 	unsigned long flags;
8116 	uint16 alloced = 0;
8117 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8118 
8119 	if (dhd_query_bus_erros(dhd)) {
8120 		return -EIO;
8121 	}
8122 
8123 	rqstlen = len;
8124 	resplen = len;
8125 
8126 	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
8127 	/* 8K allocation of dongle buffer fails */
8128 	/* dhd doesnt give separate input & output buf lens */
8129 	/* so making the assumption that input length can never be more than 2k */
8130 	rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
8131 
8132 	DHD_RING_LOCK(ring->ring_lock, flags);
8133 
8134 	if (prot->ioctl_state) {
8135 		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
8136 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8137 		return BCME_BUSY;
8138 	} else {
8139 		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
8140 	}
8141 
8142 	/* Request for cbuf space */
8143 	ioct_rqst = (ioctl_req_msg_t*)
8144 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8145 	if (ioct_rqst == NULL) {
8146 		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
8147 		prot->ioctl_state = 0;
8148 		prot->curr_ioctl_cmd = 0;
8149 		prot->ioctl_received = IOCTL_WAIT;
8150 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8151 		return -1;
8152 	}
8153 
8154 	/* Common msg buf hdr */
8155 	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
8156 	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
8157 	ioct_rqst->cmn_hdr.flags = ring->current_phase;
8158 	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
8159 	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8160 	ring->seqnum++;
8161 
8162 	ioct_rqst->cmd = htol32(cmd);
8163 	prot->curr_ioctl_cmd = cmd;
8164 	ioct_rqst->output_buf_len = htol16(resplen);
8165 	prot->ioctl_trans_id++;
8166 	ioct_rqst->trans_id = prot->ioctl_trans_id;
8167 
8168 	/* populate ioctl buffer info */
8169 	ioct_rqst->input_buf_len = htol16(rqstlen);
8170 	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
8171 	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
8172 	/* copy ioct payload */
8173 	ioct_buf = (void *) prot->ioctbuf.va;
8174 
8175 	prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
8176 
8177 	if (buf)
8178 		memcpy(ioct_buf, buf, len);
8179 
8180 	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
8181 
8182 	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
8183 		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
8184 
8185 	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
8186 		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
8187 		ioct_rqst->trans_id));
8188 
8189 	/* update ring's WR index and ring doorbell to dongle */
8190 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
8191 
8192 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8193 
8194 	return 0;
8195 } /* dhd_fillup_ioct_reqst */
8196 
8197 /**
8198  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
8199  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
8200  * information is posted to the dongle.
8201  *
8202  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
8203  * each flowring in pool of flowrings.
8204  *
8205  * returns BCME_OK=0 on success
8206  * returns non-zero negative error value on failure.
8207  */
8208 static int
8209 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
8210 	uint16 max_items, uint16 item_len, uint16 ringid)
8211 {
8212 	int dma_buf_alloced = BCME_NOMEM;
8213 	uint32 dma_buf_len = max_items * item_len;
8214 	dhd_prot_t *prot = dhd->prot;
8215 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8216 	dhd_dma_buf_t *dma_buf = NULL;
8217 
8218 	ASSERT(ring);
8219 	ASSERT(name);
8220 	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
8221 
8222 	/* Init name */
8223 	strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
8224 	ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
8225 
8226 	ring->idx = ringid;
8227 
8228 	ring->max_items = max_items;
8229 	ring->item_len = item_len;
8230 
8231 	/* A contiguous space may be reserved for all flowrings */
8232 	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8233 		/* Carve out from the contiguous DMA-able flowring buffer */
8234 		uint16 flowid;
8235 		uint32 base_offset;
8236 
8237 		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
8238 		dma_buf = &ring->dma_buf;
8239 
8240 		flowid = DHD_RINGID_TO_FLOWID(ringid);
8241 		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
8242 
8243 		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
8244 
8245 		dma_buf->len = dma_buf_len;
8246 		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
8247 		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
8248 		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
8249 
8250 		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
8251 		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
8252 
8253 		dma_buf->dmah   = rsv_buf->dmah;
8254 		dma_buf->secdma = rsv_buf->secdma;
8255 
8256 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8257 	} else {
8258 #ifdef EWP_EDL
8259 		if (ring == dhd->prot->d2hring_edl) {
8260 			/* For EDL ring, memory is alloced during attach,
8261 			* so just need to copy the dma_buf to the ring's dma_buf
8262 			*/
8263 			memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
8264 			dma_buf = &ring->dma_buf;
8265 			if (dma_buf->va == NULL) {
8266 				return BCME_NOMEM;
8267 			}
8268 		} else
8269 #endif /* EWP_EDL */
8270 		{
8271 			/* Allocate a dhd_dma_buf */
8272 			dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
8273 			if (dma_buf_alloced != BCME_OK) {
8274 				return BCME_NOMEM;
8275 			}
8276 		}
8277 	}
8278 
8279 	/* CAUTION: Save ring::base_addr in little endian format! */
8280 	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
8281 
8282 #ifdef BCM_SECURE_DMA
8283 	if (SECURE_DMA_ENAB(prot->osh)) {
8284 		ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
8285 		if (ring->dma_buf.secdma == NULL) {
8286 			goto free_dma_buf;
8287 		}
8288 	}
8289 #endif /* BCM_SECURE_DMA */
8290 
8291 	ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
8292 
8293 	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
8294 		"ring start %p buf phys addr  %x:%x \n",
8295 		ring->name, ring->max_items, ring->item_len,
8296 		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8297 		ltoh32(ring->base_addr.low_addr)));
8298 
8299 	return BCME_OK;
8300 
8301 #ifdef BCM_SECURE_DMA
8302 free_dma_buf:
8303 	if (dma_buf_alloced == BCME_OK) {
8304 		dhd_dma_buf_free(dhd, &ring->dma_buf);
8305 	}
8306 #endif /* BCM_SECURE_DMA */
8307 
8308 	return BCME_NOMEM;
8309 
8310 } /* dhd_prot_ring_attach */
8311 
8312 /**
8313  * dhd_prot_ring_init - Post the common ring information to dongle.
8314  *
8315  * Used only for common rings.
8316  *
8317  * The flowrings information is passed via the create flowring control message
8318  * (tx_flowring_create_request_t) sent over the H2D control submission common
8319  * ring.
8320  */
8321 static void
8322 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8323 {
8324 	ring->wr = 0;
8325 	ring->rd = 0;
8326 	ring->curr_rd = 0;
8327 	/* Reset hwa_db_type for all rings,
8328 	 * for data path rings, it will be assigned separately post init
8329 	 * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
8330 	 */
8331 	ring->hwa_db_type = 0;
8332 
8333 	/* CAUTION: ring::base_addr already in Little Endian */
8334 	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
8335 		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
8336 	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
8337 		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
8338 	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
8339 		sizeof(uint16), RING_ITEM_LEN, ring->idx);
8340 
8341 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8342 		sizeof(uint16), RING_WR_UPD, ring->idx);
8343 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8344 		sizeof(uint16), RING_RD_UPD, ring->idx);
8345 
8346 	/* ring inited */
8347 	ring->inited = TRUE;
8348 
8349 } /* dhd_prot_ring_init */
8350 
8351 /**
8352  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
8353  * Reset WR and RD indices to 0.
8354  */
8355 static void
8356 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8357 {
8358 	DHD_TRACE(("%s\n", __FUNCTION__));
8359 
8360 	dhd_dma_buf_reset(dhd, &ring->dma_buf);
8361 
8362 	ring->rd = ring->wr = 0;
8363 	ring->curr_rd = 0;
8364 	ring->inited = FALSE;
8365 	ring->create_pending = FALSE;
8366 }
8367 
8368 /**
8369  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
8370  * hanging off the msgbuf_ring.
8371  */
8372 static void
8373 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8374 {
8375 	dhd_prot_t *prot = dhd->prot;
8376 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8377 	ASSERT(ring);
8378 
8379 	ring->inited = FALSE;
8380 	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
8381 
8382 #ifdef BCM_SECURE_DMA
8383 	if (SECURE_DMA_ENAB(prot->osh)) {
8384 		if (ring->dma_buf.secdma) {
8385 			SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
8386 			MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
8387 			ring->dma_buf.secdma = NULL;
8388 		}
8389 	}
8390 #endif /* BCM_SECURE_DMA */
8391 
8392 	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
8393 	 * memory, then simply stop using it.
8394 	 */
8395 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8396 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8397 		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
8398 	} else {
8399 		dhd_dma_buf_free(dhd, &ring->dma_buf);
8400 	}
8401 
8402 	dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
8403 
8404 } /* dhd_prot_ring_detach */
8405 
8406 /* Fetch number of H2D flowrings given the total number of h2d rings */
8407 uint16
8408 dhd_get_max_flow_rings(dhd_pub_t *dhd)
8409 {
8410 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
8411 		return dhd->bus->max_tx_flowrings;
8412 	else
8413 		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
8414 }
8415 
8416 /**
8417  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
8418  *
8419  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
8420  * Dongle includes common rings when it advertizes the number of H2D rings.
8421  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
8422  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
8423  *
8424  * dhd_prot_ring_attach is invoked to perform the actual initialization and
8425  * attaching the DMA-able buffer.
8426  *
8427  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
8428  * initialized msgbuf_ring_t object.
8429  *
8430  * returns BCME_OK=0 on success
8431  * returns non-zero negative error value on failure.
8432  */
8433 static int
8434 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
8435 {
8436 	uint16 flowid;
8437 	msgbuf_ring_t *ring;
8438 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
8439 	dhd_prot_t *prot = dhd->prot;
8440 	char ring_name[RING_NAME_MAX_LENGTH];
8441 
8442 	if (prot->h2d_flowrings_pool != NULL)
8443 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
8444 
8445 	ASSERT(prot->h2d_rings_total == 0);
8446 
8447 	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
8448 	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
8449 
8450 	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
8451 		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
8452 			__FUNCTION__, prot->h2d_rings_total));
8453 		return BCME_ERROR;
8454 	}
8455 
8456 	/* Subtract number of H2D common rings, to determine number of flowrings */
8457 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8458 
8459 	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
8460 
8461 	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
8462 	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
8463 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8464 
8465 	if (prot->h2d_flowrings_pool == NULL) {
8466 		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
8467 			__FUNCTION__, h2d_flowrings_total));
8468 		goto fail;
8469 	}
8470 
8471 	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
8472 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8473 		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
8474 		if (dhd_prot_ring_attach(dhd, ring, ring_name,
8475 		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
8476 		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
8477 			goto attach_fail;
8478 		}
8479 		/*
8480 		 * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
8481 		 * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
8482 		 */
8483 		ring->hwa_db_type = 0;
8484 	}
8485 
8486 	return BCME_OK;
8487 
8488 attach_fail:
8489 	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
8490 
8491 fail:
8492 	prot->h2d_rings_total = 0;
8493 	return BCME_NOMEM;
8494 
8495 } /* dhd_prot_flowrings_pool_attach */
8496 
8497 /**
8498  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
8499  * Invokes dhd_prot_ring_reset to perform the actual reset.
8500  *
8501  * The DMA-able buffer is not freed during reset and neither is the flowring
8502  * pool freed.
8503  *
8504  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
8505  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
8506  * from a previous flowring pool instantiation will be reused.
8507  *
8508  * This will avoid a fragmented DMA-able memory condition, if multiple
8509  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
8510  * cycle.
8511  */
8512 static void
8513 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
8514 {
8515 	uint16 flowid, h2d_flowrings_total;
8516 	msgbuf_ring_t *ring;
8517 	dhd_prot_t *prot = dhd->prot;
8518 
8519 	if (prot->h2d_flowrings_pool == NULL) {
8520 		ASSERT(prot->h2d_rings_total == 0);
8521 		return;
8522 	}
8523 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8524 	/* Reset each flowring in the flowring pool */
8525 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8526 		dhd_prot_ring_reset(dhd, ring);
8527 		ring->inited = FALSE;
8528 	}
8529 
8530 	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
8531 }
8532 
8533 /**
8534  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
8535  * DMA-able buffers for flowrings.
8536  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
8537  * de-initialization of each msgbuf_ring_t.
8538  */
8539 static void
8540 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
8541 {
8542 	int flowid;
8543 	msgbuf_ring_t *ring;
8544 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
8545 	dhd_prot_t *prot = dhd->prot;
8546 
8547 	if (prot->h2d_flowrings_pool == NULL) {
8548 		ASSERT(prot->h2d_rings_total == 0);
8549 		return;
8550 	}
8551 
8552 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8553 	/* Detach the DMA-able buffer for each flowring in the flowring pool */
8554 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8555 		dhd_prot_ring_detach(dhd, ring);
8556 	}
8557 
8558 	MFREE(prot->osh, prot->h2d_flowrings_pool,
8559 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8560 
8561 	prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
8562 	prot->h2d_rings_total = 0;
8563 
8564 } /* dhd_prot_flowrings_pool_detach */
8565 
8566 /**
8567  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
8568  * msgbuf_ring from the flowring pool, and assign it.
8569  *
8570  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
8571  * ring information to the dongle, a flowring's information is passed via a
8572  * flowring create control message.
8573  *
8574  * Only the ring state (WR, RD) index are initialized.
8575  */
8576 static msgbuf_ring_t *
8577 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
8578 {
8579 	msgbuf_ring_t *ring;
8580 	dhd_prot_t *prot = dhd->prot;
8581 
8582 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8583 	ASSERT(flowid < prot->h2d_rings_total);
8584 	ASSERT(prot->h2d_flowrings_pool != NULL);
8585 
8586 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8587 
8588 	/* ASSERT flow_ring->inited == FALSE */
8589 
8590 	ring->wr = 0;
8591 	ring->rd = 0;
8592 	ring->curr_rd = 0;
8593 	ring->inited = TRUE;
8594 	/**
8595 	 * Every time a flowring starts dynamically, initialize current_phase with 0
8596 	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
8597 	 */
8598 	ring->current_phase = 0;
8599 	return ring;
8600 }
8601 
8602 /**
8603  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
8604  * msgbuf_ring back to the flow_ring pool.
8605  */
8606 void
8607 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
8608 {
8609 	msgbuf_ring_t *ring;
8610 	dhd_prot_t *prot = dhd->prot;
8611 
8612 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8613 	ASSERT(flowid < prot->h2d_rings_total);
8614 	ASSERT(prot->h2d_flowrings_pool != NULL);
8615 
8616 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8617 
8618 	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
8619 	/* ASSERT flow_ring->inited == TRUE */
8620 
8621 	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8622 
8623 	ring->wr = 0;
8624 	ring->rd = 0;
8625 	ring->inited = FALSE;
8626 
8627 	ring->curr_rd = 0;
8628 }
8629 
8630 /* Assumes only one index is updated at a time */
8631 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
8632 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
8633 /* If exactly_nitems is false, this function will allocate space for nitems or less */
8634 static void *BCMFASTPATH
8635 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
8636 	bool exactly_nitems)
8637 {
8638 	void *ret_ptr = NULL;
8639 	uint16 ring_avail_cnt;
8640 
8641 	ASSERT(nitems <= ring->max_items);
8642 
8643 	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
8644 
8645 	if ((ring_avail_cnt == 0) ||
8646 	       (exactly_nitems && (ring_avail_cnt < nitems) &&
8647 	       ((ring->max_items - ring->wr) >= nitems))) {
8648 		DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
8649 			ring->name, nitems, ring->wr, ring->rd));
8650 		return NULL;
8651 	}
8652 	*alloced = MIN(nitems, ring_avail_cnt);
8653 
8654 	/* Return next available space */
8655 	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
8656 
8657 	/* Update write index */
8658 	if ((ring->wr + *alloced) == ring->max_items)
8659 		ring->wr = 0;
8660 	else if ((ring->wr + *alloced) < ring->max_items)
8661 		ring->wr += *alloced;
8662 	else {
8663 		/* Should never hit this */
8664 		ASSERT(0);
8665 		return NULL;
8666 	}
8667 
8668 	return ret_ptr;
8669 } /* dhd_prot_get_ring_space */
8670 
8671 /**
8672  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
8673  * new messages in a H2D ring. The messages are flushed from cache prior to
8674  * posting the new WR index. The new WR index will be updated in the DMA index
8675  * array or directly in the dongle's ring state memory.
8676  * A PCIE doorbell will be generated to wake up the dongle.
8677  * This is a non-atomic function, make sure the callers
8678  * always hold appropriate locks.
8679  */
8680 static void BCMFASTPATH
8681 __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8682 	uint16 nitems)
8683 {
8684 	dhd_prot_t *prot = dhd->prot;
8685 	uint32 db_index;
8686 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8687 	uint corerev;
8688 
8689 	/* cache flush */
8690 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
8691 
8692 	/* For HWA, update db_index and ring mb2 DB and return */
8693 	if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8694 		db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
8695 		DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
8696 			__FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
8697 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8698 		return;
8699 	}
8700 
8701 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8702 			dhd_prot_dma_indx_set(dhd, ring->wr,
8703 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
8704 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
8705 			dhd_prot_dma_indx_set(dhd, ring->wr,
8706 			H2D_IFRM_INDX_WR_UPD, ring->idx);
8707 	} else {
8708 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8709 				sizeof(uint16), RING_WR_UPD, ring->idx);
8710 	}
8711 
8712 	/* raise h2d interrupt */
8713 	if (IDMA_ACTIVE(dhd) ||
8714 		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
8715 		db_index = IDMA_IDX0;
8716 		/* this api is called in wl down path..in that case sih is freed already */
8717 		if (dhd->bus->sih) {
8718 			corerev = dhd->bus->sih->buscorerev;
8719 			/* We need to explictly configure the type of DMA for core rev >= 24 */
8720 			if (corerev >= 24) {
8721 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8722 			}
8723 		}
8724 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8725 	} else {
8726 		prot->mb_ring_fn(dhd->bus, ring->wr);
8727 	}
8728 }
8729 
8730 static void BCMFASTPATH
8731 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8732 	uint16 nitems)
8733 {
8734 	unsigned long flags_bus;
8735 	DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8736 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8737 	DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8738 }
8739 
8740 /**
8741  * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
8742  * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
8743  * to indicate D3_INFORM sent in the same BUS_LOCK.
8744  */
8745 static void BCMFASTPATH
8746 dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
8747 	uint16 nitems, uint32 mb_data)
8748 {
8749 	unsigned long flags_bus;
8750 
8751 	DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8752 
8753 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8754 
8755 	/* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
8756 	if (mb_data == H2D_HOST_D3_INFORM) {
8757 		dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
8758 	}
8759 
8760 	DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8761 }
8762 
8763 /**
8764  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
8765  * from a D2H ring. The new RD index will be updated in the DMA Index array or
8766  * directly in dongle's ring state memory.
8767  */
8768 static void
8769 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
8770 {
8771 	dhd_prot_t *prot = dhd->prot;
8772 	uint32 db_index;
8773 	uint corerev;
8774 
8775 	/* For HWA, update db_index and ring mb2 DB and return */
8776 	if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8777 		db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
8778 		DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
8779 			__FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
8780 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8781 		return;
8782 	}
8783 
8784 	/* update read index */
8785 	/* If dma'ing h2d indices supported
8786 	 * update the r -indices in the
8787 	 * host memory o/w in TCM
8788 	 */
8789 	if (IDMA_ACTIVE(dhd)) {
8790 		dhd_prot_dma_indx_set(dhd, ring->rd,
8791 			D2H_DMA_INDX_RD_UPD, ring->idx);
8792 		db_index = IDMA_IDX1;
8793 		if (dhd->bus->sih) {
8794 			corerev = dhd->bus->sih->buscorerev;
8795 			/* We need to explictly configure the type of DMA for core rev >= 24 */
8796 			if (corerev >= 24) {
8797 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8798 			}
8799 		}
8800 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8801 	} else if (dhd->dma_h2d_ring_upd_support) {
8802 		dhd_prot_dma_indx_set(dhd, ring->rd,
8803 		                      D2H_DMA_INDX_RD_UPD, ring->idx);
8804 	} else {
8805 		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8806 			sizeof(uint16), RING_RD_UPD, ring->idx);
8807 	}
8808 }
8809 
8810 static int
8811 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
8812 	uint16 ring_type, uint32 req_id)
8813 {
8814 	unsigned long flags;
8815 	d2h_ring_create_req_t  *d2h_ring;
8816 	uint16 alloced = 0;
8817 	int ret = BCME_OK;
8818 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8819 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8820 
8821 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8822 
8823 	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
8824 
8825 	if (ring_to_create == NULL) {
8826 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8827 		ret = BCME_ERROR;
8828 		goto err;
8829 	}
8830 
8831 	/* Request for ring buffer space */
8832 	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
8833 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8834 		&alloced, FALSE);
8835 
8836 	if (d2h_ring == NULL) {
8837 		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
8838 			__FUNCTION__));
8839 		ret = BCME_NOMEM;
8840 		goto err;
8841 	}
8842 	ring_to_create->create_req_id = (uint16)req_id;
8843 	ring_to_create->create_pending = TRUE;
8844 
8845 	/* Common msg buf hdr */
8846 	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
8847 	d2h_ring->msg.if_id = 0;
8848 	d2h_ring->msg.flags = ctrl_ring->current_phase;
8849 	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8850 	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
8851 	DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
8852 			ring_to_create->idx, max_h2d_rings));
8853 
8854 	d2h_ring->ring_type = ring_type;
8855 	d2h_ring->max_items = htol16(ring_to_create->max_items);
8856 	d2h_ring->len_item = htol16(ring_to_create->item_len);
8857 	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8858 	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8859 
8860 	d2h_ring->flags = 0;
8861 	d2h_ring->msg.epoch =
8862 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8863 	ctrl_ring->seqnum++;
8864 #ifdef EWP_EDL
8865 	if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
8866 		DHD_ERROR(("%s: sending d2h EDL ring create: "
8867 			"\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
8868 			__FUNCTION__, ltoh16(d2h_ring->max_items),
8869 			ltoh16(d2h_ring->len_item),
8870 			ltoh16(d2h_ring->ring_id),
8871 			d2h_ring->ring_ptr.low_addr,
8872 			d2h_ring->ring_ptr.high_addr));
8873 	}
8874 #endif /* EWP_EDL */
8875 
8876 	/* Update the flow_ring's WRITE index */
8877 	dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
8878 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8879 
8880 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8881 
8882 	return ret;
8883 err:
8884 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8885 
8886 	return ret;
8887 }
8888 
8889 static int
8890 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
8891 {
8892 	unsigned long flags;
8893 	h2d_ring_create_req_t  *h2d_ring;
8894 	uint16 alloced = 0;
8895 	uint8 i = 0;
8896 	int ret = BCME_OK;
8897 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8898 
8899 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8900 
8901 	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
8902 
8903 	if (ring_to_create == NULL) {
8904 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8905 		ret = BCME_ERROR;
8906 		goto err;
8907 	}
8908 
8909 	/* Request for ring buffer space */
8910 	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
8911 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8912 		&alloced, FALSE);
8913 
8914 	if (h2d_ring == NULL) {
8915 		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
8916 			__FUNCTION__));
8917 		ret = BCME_NOMEM;
8918 		goto err;
8919 	}
8920 	ring_to_create->create_req_id = (uint16)id;
8921 	ring_to_create->create_pending = TRUE;
8922 
8923 	/* Common msg buf hdr */
8924 	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
8925 	h2d_ring->msg.if_id = 0;
8926 	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8927 	h2d_ring->msg.flags = ctrl_ring->current_phase;
8928 	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
8929 	h2d_ring->ring_type = ring_type;
8930 	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
8931 	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
8932 	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
8933 	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8934 	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8935 
8936 	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
8937 		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
8938 	}
8939 
8940 	h2d_ring->flags = 0;
8941 	h2d_ring->msg.epoch =
8942 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8943 	ctrl_ring->seqnum++;
8944 
8945 	/* Update the flow_ring's WRITE index */
8946 	dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
8947 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8948 
8949 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8950 
8951 	return ret;
8952 err:
8953 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8954 
8955 	return ret;
8956 }
8957 
8958 /**
8959  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
8960  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
8961  * See dhd_prot_dma_indx_init()
8962  */
8963 void
8964 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
8965 {
8966 	uint8 *ptr;
8967 	uint16 offset;
8968 	dhd_prot_t *prot = dhd->prot;
8969 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8970 
8971 	switch (type) {
8972 		case H2D_DMA_INDX_WR_UPD:
8973 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
8974 			offset = DHD_H2D_RING_OFFSET(ringid);
8975 			break;
8976 
8977 		case D2H_DMA_INDX_RD_UPD:
8978 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
8979 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8980 			break;
8981 
8982 		case H2D_IFRM_INDX_WR_UPD:
8983 			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
8984 			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
8985 			break;
8986 
8987 		default:
8988 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
8989 				__FUNCTION__));
8990 			return;
8991 	}
8992 
8993 	ASSERT(prot->rw_index_sz != 0);
8994 	ptr += offset * prot->rw_index_sz;
8995 
8996 	*(uint16*)ptr = htol16(new_index);
8997 
8998 	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
8999 
9000 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9001 		__FUNCTION__, new_index, type, ringid, ptr, offset));
9002 
9003 } /* dhd_prot_dma_indx_set */
9004 
9005 /**
9006  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
9007  * array.
9008  * Dongle DMAes an entire array to host memory (if the feature is enabled).
9009  * See dhd_prot_dma_indx_init()
9010  */
9011 static uint16
9012 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
9013 {
9014 	uint8 *ptr;
9015 	uint16 data;
9016 	uint16 offset;
9017 	dhd_prot_t *prot = dhd->prot;
9018 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
9019 
9020 	switch (type) {
9021 		case H2D_DMA_INDX_WR_UPD:
9022 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
9023 			offset = DHD_H2D_RING_OFFSET(ringid);
9024 			break;
9025 
9026 		case H2D_DMA_INDX_RD_UPD:
9027 			ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
9028 			offset = DHD_H2D_RING_OFFSET(ringid);
9029 			break;
9030 
9031 		case D2H_DMA_INDX_WR_UPD:
9032 			ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
9033 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9034 			break;
9035 
9036 		case D2H_DMA_INDX_RD_UPD:
9037 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
9038 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9039 			break;
9040 
9041 		default:
9042 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
9043 				__FUNCTION__));
9044 			return 0;
9045 	}
9046 
9047 	ASSERT(prot->rw_index_sz != 0);
9048 	ptr += offset * prot->rw_index_sz;
9049 
9050 	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
9051 
9052 	data = LTOH16(*((uint16*)ptr));
9053 
9054 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9055 		__FUNCTION__, data, type, ringid, ptr, offset));
9056 
9057 	return (data);
9058 
9059 } /* dhd_prot_dma_indx_get */
9060 
9061 /**
9062  * An array of DMA read/write indices, containing information about host rings, can be maintained
9063  * either in host memory or in device memory, dependent on preprocessor options. This function is,
9064  * dependent on these options, called during driver initialization. It reserves and initializes
9065  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
9066  * address of these host memory blocks are communicated to the dongle later on. By reading this host
9067  * memory, the dongle learns about the state of the host rings.
9068  */
9069 
9070 static INLINE int
9071 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
9072 	dhd_dma_buf_t *dma_buf, uint32 bufsz)
9073 {
9074 	int rc;
9075 
9076 	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
9077 		return BCME_OK;
9078 
9079 	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
9080 
9081 	return rc;
9082 }
9083 
9084 int
9085 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
9086 {
9087 	uint32 bufsz;
9088 	dhd_prot_t *prot = dhd->prot;
9089 	dhd_dma_buf_t *dma_buf;
9090 
9091 	if (prot == NULL) {
9092 		DHD_ERROR(("prot is not inited\n"));
9093 		return BCME_ERROR;
9094 	}
9095 
9096 	/* Dongle advertizes 2B or 4B RW index size */
9097 	ASSERT(rw_index_sz != 0);
9098 	prot->rw_index_sz = rw_index_sz;
9099 
9100 	bufsz = rw_index_sz * length;
9101 
9102 	switch (type) {
9103 		case H2D_DMA_INDX_WR_BUF:
9104 			dma_buf = &prot->h2d_dma_indx_wr_buf;
9105 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9106 				goto ret_no_mem;
9107 			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
9108 				dma_buf->len, rw_index_sz, length));
9109 			break;
9110 
9111 		case H2D_DMA_INDX_RD_BUF:
9112 			dma_buf = &prot->h2d_dma_indx_rd_buf;
9113 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9114 				goto ret_no_mem;
9115 			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
9116 				dma_buf->len, rw_index_sz, length));
9117 			break;
9118 
9119 		case D2H_DMA_INDX_WR_BUF:
9120 			dma_buf = &prot->d2h_dma_indx_wr_buf;
9121 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9122 				goto ret_no_mem;
9123 			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
9124 				dma_buf->len, rw_index_sz, length));
9125 			break;
9126 
9127 		case D2H_DMA_INDX_RD_BUF:
9128 			dma_buf = &prot->d2h_dma_indx_rd_buf;
9129 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9130 				goto ret_no_mem;
9131 			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
9132 				dma_buf->len, rw_index_sz, length));
9133 			break;
9134 
9135 		case H2D_IFRM_INDX_WR_BUF:
9136 			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
9137 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9138 				goto ret_no_mem;
9139 			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
9140 				dma_buf->len, rw_index_sz, length));
9141 			break;
9142 
9143 		default:
9144 			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
9145 			return BCME_BADOPTION;
9146 	}
9147 
9148 	return BCME_OK;
9149 
9150 ret_no_mem:
9151 	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
9152 		__FUNCTION__, type, bufsz));
9153 	return BCME_NOMEM;
9154 
9155 } /* dhd_prot_dma_indx_init */
9156 
9157 /**
9158  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
9159  * from, or NULL if there are no more messages to read.
9160  */
9161 static uint8*
9162 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
9163 {
9164 	uint16 wr;
9165 	uint16 rd;
9166 	uint16 depth;
9167 	uint16 items;
9168 	void  *read_addr = NULL; /* address of next msg to be read in ring */
9169 	uint16 d2h_wr = 0;
9170 
9171 	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
9172 		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
9173 		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
9174 
9175 	/* Remember the read index in a variable.
9176 	 * This is becuase ring->rd gets updated in the end of this function
9177 	 * So if we have to print the exact read index from which the
9178 	 * message is read its not possible.
9179 	 */
9180 	ring->curr_rd = ring->rd;
9181 
9182 	/* update write pointer */
9183 	if (dhd->dma_d2h_ring_upd_support) {
9184 		/* DMAing write/read indices supported */
9185 		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
9186 		ring->wr = d2h_wr;
9187 	} else {
9188 		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
9189 	}
9190 
9191 	wr = ring->wr;
9192 	rd = ring->rd;
9193 	depth = ring->max_items;
9194 
9195 	/* check for avail space, in number of ring items */
9196 	items = READ_AVAIL_SPACE(wr, rd, depth);
9197 	if (items == 0)
9198 		return NULL;
9199 
9200 	/*
9201 	 * Note that there are builds where Assert translates to just printk
9202 	 * so, even if we had hit this condition we would never halt. Now
9203 	 * dhd_prot_process_msgtype can get into an big loop if this
9204 	 * happens.
9205 	 */
9206 	if (items > ring->max_items) {
9207 		DHD_ERROR(("\r\n======================= \r\n"));
9208 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
9209 			__FUNCTION__, ring, ring->name, ring->max_items, items));
9210 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
9211 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
9212 			dhd->busstate, dhd->bus->wait_for_d3_ack));
9213 		DHD_ERROR(("\r\n======================= \r\n"));
9214 #ifdef SUPPORT_LINKDOWN_RECOVERY
9215 		if (wr >= ring->max_items) {
9216 			dhd->bus->read_shm_fail = TRUE;
9217 		}
9218 #else
9219 #ifdef DHD_FW_COREDUMP
9220 		if (dhd->memdump_enabled) {
9221 			/* collect core dump */
9222 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
9223 			dhd_bus_mem_dump(dhd);
9224 
9225 		}
9226 #endif /* DHD_FW_COREDUMP */
9227 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9228 
9229 		*available_len = 0;
9230 		dhd_schedule_reset(dhd);
9231 
9232 		return NULL;
9233 	}
9234 
9235 	/* if space is available, calculate address to be read */
9236 	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
9237 
9238 	/* update read pointer */
9239 	if ((ring->rd + items) >= ring->max_items)
9240 		ring->rd = 0;
9241 	else
9242 		ring->rd += items;
9243 
9244 	ASSERT(ring->rd < ring->max_items);
9245 
9246 	/* convert items to bytes : available_len must be 32bits */
9247 	*available_len = (uint32)(items * ring->item_len);
9248 
9249 	OSL_CACHE_INV(read_addr, *available_len);
9250 
9251 	/* return read address */
9252 	return read_addr;
9253 
9254 } /* dhd_prot_get_read_addr */
9255 
9256 /**
9257  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
9258  * make sure the callers always hold appropriate locks.
9259  */
9260 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
9261 {
9262 	h2d_mailbox_data_t *h2d_mb_data;
9263 	uint16 alloced = 0;
9264 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9265 	unsigned long flags;
9266 	int num_post = 1;
9267 	int i;
9268 
9269 	DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
9270 		__FUNCTION__, mb_data));
9271 	if (!ctrl_ring->inited) {
9272 		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
9273 		return BCME_ERROR;
9274 	}
9275 
9276 	for (i = 0; i < num_post; i ++) {
9277 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9278 		/* Request for ring buffer space */
9279 		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
9280 			ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
9281 			&alloced, FALSE);
9282 
9283 		if (h2d_mb_data == NULL) {
9284 			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
9285 				__FUNCTION__));
9286 			DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9287 			return BCME_NOMEM;
9288 		}
9289 
9290 		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
9291 		/* Common msg buf hdr */
9292 		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
9293 		h2d_mb_data->msg.flags = ctrl_ring->current_phase;
9294 
9295 		h2d_mb_data->msg.epoch =
9296 			ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9297 		ctrl_ring->seqnum++;
9298 
9299 		/* Update flow create message */
9300 		h2d_mb_data->mail_box_data = htol32(mb_data);
9301 		{
9302 			h2d_mb_data->mail_box_data = htol32(mb_data);
9303 		}
9304 
9305 		DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9306 
9307 		/* upd wrt ptr and raise interrupt */
9308 		dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
9309 			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
9310 
9311 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9312 
9313 	}
9314 	return 0;
9315 }
9316 
9317 /** Creates a flow ring and informs dongle of this event */
9318 int
9319 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9320 {
9321 	tx_flowring_create_request_t *flow_create_rqst;
9322 	msgbuf_ring_t *flow_ring;
9323 	dhd_prot_t *prot = dhd->prot;
9324 	unsigned long flags;
9325 	uint16 alloced = 0;
9326 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9327 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
9328 
9329 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9330 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
9331 	if (flow_ring == NULL) {
9332 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9333 			__FUNCTION__, flow_ring_node->flowid));
9334 		return BCME_NOMEM;
9335 	}
9336 
9337 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9338 
9339 	/* Request for ctrl_ring buffer space */
9340 	flow_create_rqst = (tx_flowring_create_request_t *)
9341 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
9342 
9343 	if (flow_create_rqst == NULL) {
9344 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
9345 		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
9346 			__FUNCTION__, flow_ring_node->flowid));
9347 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9348 		return BCME_NOMEM;
9349 	}
9350 
9351 	flow_ring_node->prot_info = (void *)flow_ring;
9352 
9353 	/* Common msg buf hdr */
9354 	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
9355 	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9356 	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
9357 	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
9358 
9359 	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9360 	ctrl_ring->seqnum++;
9361 
9362 	/* Update flow create message */
9363 	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
9364 	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9365 	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
9366 	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
9367 	/* CAUTION: ring::base_addr already in Little Endian */
9368 	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
9369 	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
9370 	flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
9371 	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
9372 	flow_create_rqst->if_flags = 0;
9373 
9374 #ifdef DHD_HP2P
9375 	/* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
9376 	/* and traffic is not multicast */
9377 	/* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
9378 	/* Allow only one HP2P Flow active at a time */
9379 	if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
9380 		flow_ring_node->flow_info.tid == HP2P_PRIO &&
9381 		(dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
9382 		!ETHER_ISMULTI(flow_create_rqst->da)) {
9383 		flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
9384 		flow_ring_node->hp2p_ring = TRUE;
9385 		dhd->hp2p_ring_active = TRUE;
9386 
9387 		DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
9388 				__FUNCTION__, flow_ring_node->flow_info.tid,
9389 				flow_ring_node->flowid));
9390 	}
9391 #endif /* DHD_HP2P */
9392 
9393 	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
9394 	 * currently it is not used for priority. so uses solely for ifrm mask
9395 	 */
9396 	if (IFRM_ACTIVE(dhd))
9397 		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
9398 
9399 	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
9400 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9401 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
9402 		flow_ring_node->flow_info.ifindex));
9403 
9404 	/* Update the flow_ring's WRITE index */
9405 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
9406 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9407 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9408 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
9409 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9410 			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
9411 	} else {
9412 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
9413 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
9414 	}
9415 
9416 	/* update control subn ring's WR index and ring doorbell to dongle */
9417 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
9418 
9419 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9420 
9421 	return BCME_OK;
9422 } /* dhd_prot_flow_ring_create */
9423 
9424 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
9425 static void
9426 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
9427 {
9428 	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
9429 
9430 	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
9431 		ltoh16(flow_create_resp->cmplt.status),
9432 		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
9433 
9434 	dhd_bus_flow_ring_create_response(dhd->bus,
9435 		ltoh16(flow_create_resp->cmplt.flow_ring_id),
9436 		ltoh16(flow_create_resp->cmplt.status));
9437 }
9438 
9439 static void
9440 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
9441 {
9442 	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
9443 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9444 		ltoh16(resp->cmplt.status),
9445 		ltoh16(resp->cmplt.ring_id),
9446 		ltoh32(resp->cmn_hdr.request_id)));
9447 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
9448 		(ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
9449 		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
9450 		return;
9451 	}
9452 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
9453 		!dhd->prot->h2dring_info_subn->create_pending) {
9454 		DHD_ERROR(("info ring create status for not pending submit ring\n"));
9455 	}
9456 
9457 	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9458 		DHD_ERROR(("info/btlog ring create failed with status %d\n",
9459 			ltoh16(resp->cmplt.status)));
9460 		return;
9461 	}
9462 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
9463 		dhd->prot->h2dring_info_subn->create_pending = FALSE;
9464 		dhd->prot->h2dring_info_subn->inited = TRUE;
9465 		DHD_ERROR(("info buffer post after ring create\n"));
9466 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
9467 	}
9468 }
9469 
9470 static void
9471 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
9472 {
9473 	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
9474 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9475 		ltoh16(resp->cmplt.status),
9476 		ltoh16(resp->cmplt.ring_id),
9477 		ltoh32(resp->cmn_hdr.request_id)));
9478 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
9479 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
9480 #ifdef DHD_HP2P
9481 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
9482 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
9483 #endif /* DHD_HP2P */
9484 		TRUE) {
9485 		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
9486 		return;
9487 	}
9488 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
9489 #ifdef EWP_EDL
9490 		if (!dhd->dongle_edl_support)
9491 #endif // endif
9492 		{
9493 			if (!dhd->prot->d2hring_info_cpln->create_pending) {
9494 				DHD_ERROR(("info ring create status for not pending cpl ring\n"));
9495 				return;
9496 			}
9497 
9498 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9499 				DHD_ERROR(("info cpl ring create failed with status %d\n",
9500 					ltoh16(resp->cmplt.status)));
9501 				return;
9502 			}
9503 			dhd->prot->d2hring_info_cpln->create_pending = FALSE;
9504 			dhd->prot->d2hring_info_cpln->inited = TRUE;
9505 		}
9506 #ifdef EWP_EDL
9507 		else {
9508 			if (!dhd->prot->d2hring_edl->create_pending) {
9509 				DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
9510 				return;
9511 			}
9512 
9513 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9514 				DHD_ERROR(("edl cpl ring create failed with status %d\n",
9515 					ltoh16(resp->cmplt.status)));
9516 				return;
9517 			}
9518 			dhd->prot->d2hring_edl->create_pending = FALSE;
9519 			dhd->prot->d2hring_edl->inited = TRUE;
9520 		}
9521 #endif /* EWP_EDL */
9522 	}
9523 
9524 #ifdef DHD_HP2P
9525 	if (dhd->prot->d2hring_hp2p_txcpl &&
9526 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
9527 		if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
9528 			DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
9529 			return;
9530 		}
9531 
9532 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9533 			DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
9534 				ltoh16(resp->cmplt.status)));
9535 			return;
9536 		}
9537 		dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
9538 		dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
9539 	}
9540 	if (dhd->prot->d2hring_hp2p_rxcpl &&
9541 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
9542 		if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
9543 			DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
9544 			return;
9545 		}
9546 
9547 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9548 			DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
9549 				ltoh16(resp->cmplt.status)));
9550 			return;
9551 		}
9552 		dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
9553 		dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
9554 	}
9555 #endif /* DHD_HP2P */
9556 }
9557 
9558 static void
9559 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
9560 {
9561 	d2h_mailbox_data_t *d2h_data;
9562 
9563 	d2h_data = (d2h_mailbox_data_t *)buf;
9564 	DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
9565 		d2h_data->d2h_mailbox_data));
9566 	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
9567 }
9568 
9569 static void
9570 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
9571 {
9572 	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
9573 
9574 }
9575 
9576 /** called on e.g. flow ring delete */
9577 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
9578 {
9579 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9580 	dhd_prot_ring_detach(dhd, flow_ring);
9581 	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
9582 }
9583 
9584 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
9585 	struct bcmstrbuf *strbuf, const char * fmt)
9586 {
9587 	const char *default_fmt =
9588 		"RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
9589 		"WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
9590 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9591 	uint16 rd, wr;
9592 	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
9593 
9594 	if (fmt == NULL) {
9595 		fmt = default_fmt;
9596 	}
9597 
9598 	if (dhd->bus->is_linkdown) {
9599 		DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
9600 		return;
9601 	}
9602 
9603 	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
9604 	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
9605 	bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
9606 		ltoh32(flow_ring->base_addr.high_addr),
9607 		ltoh32(flow_ring->base_addr.low_addr),
9608 		flow_ring->item_len, flow_ring->max_items,
9609 		dma_buf_len);
9610 }
9611 
9612 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
9613 {
9614 	dhd_prot_t *prot = dhd->prot;
9615 	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
9616 		dhd->prot->device_ipc_version,
9617 		dhd->prot->host_ipc_version,
9618 		dhd->prot->active_ipc_version);
9619 
9620 	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
9621 		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
9622 	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
9623 		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
9624 	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
9625 		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
9626 	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
9627 		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
9628 	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
9629 		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
9630 
9631 	bcm_bprintf(strbuf,
9632 		"%14s %5s %5s %17s %17s %14s %14s %10s\n",
9633 		"Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
9634 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
9635 	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
9636 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
9637 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9638 	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
9639 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
9640 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9641 	bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
9642 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
9643 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9644 	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
9645 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
9646 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9647 	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
9648 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
9649 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9650 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
9651 		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
9652 		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
9653 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9654 		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
9655 		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
9656 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9657 	}
9658 	if (dhd->prot->d2hring_edl != NULL) {
9659 		bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
9660 		dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
9661 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9662 	}
9663 
9664 	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
9665 		OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
9666 		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
9667 		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
9668 		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
9669 
9670 }
9671 
9672 int
9673 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9674 {
9675 	tx_flowring_delete_request_t *flow_delete_rqst;
9676 	dhd_prot_t *prot = dhd->prot;
9677 	unsigned long flags;
9678 	uint16 alloced = 0;
9679 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9680 
9681 	DHD_RING_LOCK(ring->ring_lock, flags);
9682 
9683 	/* Request for ring buffer space */
9684 	flow_delete_rqst = (tx_flowring_delete_request_t *)
9685 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9686 
9687 	if (flow_delete_rqst == NULL) {
9688 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9689 		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
9690 		return BCME_NOMEM;
9691 	}
9692 
9693 	/* Common msg buf hdr */
9694 	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
9695 	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9696 	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
9697 	flow_delete_rqst->msg.flags = ring->current_phase;
9698 
9699 	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9700 	ring->seqnum++;
9701 
9702 	/* Update Delete info */
9703 	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9704 	flow_delete_rqst->reason = htol16(BCME_OK);
9705 
9706 	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
9707 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9708 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
9709 		flow_ring_node->flow_info.ifindex));
9710 
9711 	/* update ring's WR index and ring doorbell to dongle */
9712 	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
9713 
9714 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9715 
9716 	return BCME_OK;
9717 }
9718 
9719 static void BCMFASTPATH
9720 dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
9721 {
9722 	flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
9723 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9724 	host_txbuf_cmpl_t txstatus;
9725 	host_txbuf_post_t *txdesc;
9726 	uint16 wr_idx;
9727 
9728 	DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
9729 		__FUNCTION__, flowid, rd_idx, ring->wr));
9730 
9731 	memset(&txstatus, 0, sizeof(txstatus));
9732 	txstatus.compl_hdr.flow_ring_id = flowid;
9733 	txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
9734 	wr_idx = ring->wr;
9735 
9736 	while (wr_idx != rd_idx) {
9737 		if (wr_idx)
9738 			wr_idx--;
9739 		else
9740 			wr_idx = ring->max_items - 1;
9741 		txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
9742 			(wr_idx * ring->item_len));
9743 		txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
9744 		dhd_prot_txstatus_process(dhd, &txstatus);
9745 	}
9746 }
9747 
9748 static void
9749 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
9750 {
9751 	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
9752 
9753 	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
9754 		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
9755 
9756 	if (dhd->fast_delete_ring_support) {
9757 		dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
9758 			flow_delete_resp->read_idx);
9759 	}
9760 	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
9761 		flow_delete_resp->cmplt.status);
9762 }
9763 
9764 static void
9765 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
9766 {
9767 #ifdef IDLE_TX_FLOW_MGMT
9768 	tx_idle_flowring_resume_response_t	*flow_resume_resp =
9769 		(tx_idle_flowring_resume_response_t *)msg;
9770 
9771 	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
9772 		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
9773 
9774 	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
9775 		flow_resume_resp->cmplt.status);
9776 #endif /* IDLE_TX_FLOW_MGMT */
9777 }
9778 
9779 static void
9780 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
9781 {
9782 #ifdef IDLE_TX_FLOW_MGMT
9783 	int16 status;
9784 	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
9785 		(tx_idle_flowring_suspend_response_t *)msg;
9786 	status = flow_suspend_resp->cmplt.status;
9787 
9788 	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
9789 		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
9790 		status));
9791 
9792 	if (status != BCME_OK) {
9793 
9794 		DHD_ERROR(("%s Error in Suspending Flow rings!!"
9795 			"Dongle will still be polling idle rings!!Status = %d \n",
9796 			__FUNCTION__, status));
9797 	}
9798 #endif /* IDLE_TX_FLOW_MGMT */
9799 }
9800 
9801 int
9802 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9803 {
9804 	tx_flowring_flush_request_t *flow_flush_rqst;
9805 	dhd_prot_t *prot = dhd->prot;
9806 	unsigned long flags;
9807 	uint16 alloced = 0;
9808 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9809 
9810 	DHD_RING_LOCK(ring->ring_lock, flags);
9811 
9812 	/* Request for ring buffer space */
9813 	flow_flush_rqst = (tx_flowring_flush_request_t *)
9814 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9815 	if (flow_flush_rqst == NULL) {
9816 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9817 		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
9818 		return BCME_NOMEM;
9819 	}
9820 
9821 	/* Common msg buf hdr */
9822 	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
9823 	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9824 	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
9825 	flow_flush_rqst->msg.flags = ring->current_phase;
9826 	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9827 	ring->seqnum++;
9828 
9829 	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9830 	flow_flush_rqst->reason = htol16(BCME_OK);
9831 
9832 	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
9833 
9834 	/* update ring's WR index and ring doorbell to dongle */
9835 	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
9836 
9837 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9838 
9839 	return BCME_OK;
9840 } /* dhd_prot_flow_ring_flush */
9841 
9842 static void
9843 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
9844 {
9845 	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
9846 
9847 	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
9848 		flow_flush_resp->cmplt.status));
9849 
9850 	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
9851 		flow_flush_resp->cmplt.status);
9852 }
9853 
9854 /**
9855  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
9856  * doorbell information is transferred to dongle via the d2h ring config control
9857  * message.
9858  */
9859 void
9860 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
9861 {
9862 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
9863 	uint16 ring_idx;
9864 	uint8 *msg_next;
9865 	void *msg_start;
9866 	uint16 alloced = 0;
9867 	unsigned long flags;
9868 	dhd_prot_t *prot = dhd->prot;
9869 	ring_config_req_t *ring_config_req;
9870 	bcmpcie_soft_doorbell_t *soft_doorbell;
9871 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9872 	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9873 
9874 	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
9875 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9876 	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
9877 
9878 	if (msg_start == NULL) {
9879 		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
9880 			__FUNCTION__, d2h_rings));
9881 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9882 		return;
9883 	}
9884 
9885 	msg_next = (uint8*)msg_start;
9886 
9887 	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
9888 
9889 		/* position the ring_config_req into the ctrl subm ring */
9890 		ring_config_req = (ring_config_req_t *)msg_next;
9891 
9892 		/* Common msg header */
9893 		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
9894 		ring_config_req->msg.if_id = 0;
9895 		ring_config_req->msg.flags = 0;
9896 
9897 		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9898 		ctrl_ring->seqnum++;
9899 
9900 		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
9901 
9902 		/* Ring Config subtype and d2h ring_id */
9903 		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
9904 		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
9905 
9906 		/* Host soft doorbell configuration */
9907 		soft_doorbell = &prot->soft_doorbell[ring_idx];
9908 
9909 		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
9910 		ring_config_req->soft_doorbell.haddr.high =
9911 			htol32(soft_doorbell->haddr.high);
9912 		ring_config_req->soft_doorbell.haddr.low =
9913 			htol32(soft_doorbell->haddr.low);
9914 		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
9915 		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
9916 
9917 		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
9918 			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
9919 			ring_config_req->soft_doorbell.haddr.low,
9920 			ring_config_req->soft_doorbell.value));
9921 
9922 		msg_next = msg_next + ctrl_ring->item_len;
9923 	}
9924 
9925 	/* update control subn ring's WR index and ring doorbell to dongle */
9926 	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
9927 
9928 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9929 
9930 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
9931 }
9932 
9933 static void
9934 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
9935 {
9936 	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
9937 		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
9938 		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
9939 }
9940 
9941 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9942 void
9943 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
9944 {
9945 	uint32 *ext_data = dhd->extended_trap_data;
9946 	hnd_ext_trap_hdr_t *hdr;
9947 	const bcm_tlv_t *tlv;
9948 
9949 	if (ext_data == NULL) {
9950 		return;
9951 	}
9952 	/* First word is original trap_data */
9953 	ext_data++;
9954 
9955 	/* Followed by the extended trap data header */
9956 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
9957 
9958 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
9959 	if (tlv) {
9960 		memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
9961 	}
9962 }
9963 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
9964 
9965 typedef struct {
9966 	char name[HANG_INFO_TRAP_T_NAME_MAX];
9967 	uint32 offset;
9968 } hang_info_trap_t;
9969 
9970 #ifdef DHD_EWPR_VER2
9971 static hang_info_trap_t hang_info_trap_tbl[] = {
9972 	{"reason", 0},
9973 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9974 	{"stype", 0},
9975 	TRAP_T_NAME_OFFSET(type),
9976 	TRAP_T_NAME_OFFSET(epc),
9977 	{"resrvd", 0},
9978 	{"resrvd", 0},
9979 	{"resrvd", 0},
9980 	{"resrvd", 0},
9981 	{"", 0}
9982 };
9983 #else
9984 static hang_info_trap_t hang_info_trap_tbl[] = {
9985 	{"reason", 0},
9986 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9987 	{"stype", 0},
9988 	TRAP_T_NAME_OFFSET(type),
9989 	TRAP_T_NAME_OFFSET(epc),
9990 	TRAP_T_NAME_OFFSET(cpsr),
9991 	TRAP_T_NAME_OFFSET(spsr),
9992 	TRAP_T_NAME_OFFSET(r0),
9993 	TRAP_T_NAME_OFFSET(r1),
9994 	TRAP_T_NAME_OFFSET(r2),
9995 	TRAP_T_NAME_OFFSET(r3),
9996 	TRAP_T_NAME_OFFSET(r4),
9997 	TRAP_T_NAME_OFFSET(r5),
9998 	TRAP_T_NAME_OFFSET(r6),
9999 	TRAP_T_NAME_OFFSET(r7),
10000 	TRAP_T_NAME_OFFSET(r8),
10001 	TRAP_T_NAME_OFFSET(r9),
10002 	TRAP_T_NAME_OFFSET(r10),
10003 	TRAP_T_NAME_OFFSET(r11),
10004 	TRAP_T_NAME_OFFSET(r12),
10005 	TRAP_T_NAME_OFFSET(r13),
10006 	TRAP_T_NAME_OFFSET(r14),
10007 	TRAP_T_NAME_OFFSET(pc),
10008 	{"", 0}
10009 };
10010 #endif /* DHD_EWPR_VER2 */
10011 
10012 #define TAG_TRAP_IS_STATE(tag) \
10013 	((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
10014 	(tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
10015 	(tag == TAG_TRAP_CODE))
10016 
10017 static void
10018 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
10019 		int *bytes_written, int *cnt, char *cookie)
10020 {
10021 	uint8 *ptr;
10022 	int remain_len;
10023 	int i;
10024 
10025 	ptr = (uint8 *)src;
10026 
10027 	memset(dest, 0, len);
10028 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10029 
10030 	/* hang reason, hang info ver */
10031 	for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
10032 			i++, (*cnt)++) {
10033 		if (field_name) {
10034 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10035 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10036 					hang_info_trap_tbl[i].name, HANG_KEY_DEL);
10037 		}
10038 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10039 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10040 				hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
10041 
10042 	}
10043 
10044 	if (*cnt < HANG_FIELD_CNT_MAX) {
10045 		if (field_name) {
10046 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10047 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10048 					"cookie", HANG_KEY_DEL);
10049 		}
10050 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10051 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
10052 				cookie, HANG_KEY_DEL);
10053 		(*cnt)++;
10054 	}
10055 
10056 	if (*cnt < HANG_FIELD_CNT_MAX) {
10057 		if (field_name) {
10058 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10059 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10060 					hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
10061 					HANG_KEY_DEL);
10062 		}
10063 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10064 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10065 				hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
10066 				HANG_KEY_DEL);
10067 		(*cnt)++;
10068 	}
10069 
10070 	if (*cnt < HANG_FIELD_CNT_MAX) {
10071 		if (field_name) {
10072 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10073 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10074 					hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
10075 					HANG_KEY_DEL);
10076 		}
10077 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10078 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10079 				*(uint32 *)
10080 				(ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
10081 				HANG_KEY_DEL);
10082 		(*cnt)++;
10083 	}
10084 #ifdef DHD_EWPR_VER2
10085 	/* put 0 for HG03 ~ HG06 (reserved for future use) */
10086 	for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
10087 			i++, (*cnt)++) {
10088 		if (field_name) {
10089 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10090 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10091 				hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
10092 				HANG_KEY_DEL);
10093 		}
10094 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10095 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10096 			hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
10097 			HANG_KEY_DEL);
10098 	}
10099 #endif /* DHD_EWPR_VER2 */
10100 }
10101 #ifndef DHD_EWPR_VER2
10102 static void
10103 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
10104 		int *bytes_written, int *cnt, char *cookie)
10105 {
10106 	uint8 *ptr;
10107 	int remain_len;
10108 	int i;
10109 
10110 	ptr = (uint8 *)src;
10111 
10112 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10113 
10114 	for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
10115 			(hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
10116 			i++, (*cnt)++) {
10117 		if (field_name) {
10118 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10119 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
10120 					HANG_RAW_DEL, hang_info_trap_tbl[i].name);
10121 		}
10122 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10123 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10124 				HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
10125 	}
10126 }
10127 
10128 /* Ignore compiler warnings due to -Werror=cast-qual */
10129 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10130 #pragma GCC diagnostic push
10131 #pragma GCC diagnostic ignored "-Wcast-qual"
10132 #endif // endif
10133 
10134 static void
10135 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10136 {
10137 	int remain_len;
10138 	int i = 0;
10139 	const uint32 *stack;
10140 	uint32 *ext_data = dhd->extended_trap_data;
10141 	hnd_ext_trap_hdr_t *hdr;
10142 	const bcm_tlv_t *tlv;
10143 	int remain_stack_cnt = 0;
10144 	uint32 dummy_data = 0;
10145 	int bigdata_key_stack_cnt = 0;
10146 
10147 	if (ext_data == NULL) {
10148 		return;
10149 	}
10150 	/* First word is original trap_data */
10151 	ext_data++;
10152 
10153 	/* Followed by the extended trap data header */
10154 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10155 
10156 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10157 
10158 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10159 
10160 	if (tlv) {
10161 		stack = (const uint32 *)tlv->data;
10162 
10163 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10164 				"%08x", *(uint32 *)(stack++));
10165 		(*cnt)++;
10166 		if (*cnt >= HANG_FIELD_CNT_MAX) {
10167 			return;
10168 		}
10169 		for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
10170 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10171 			/* Raw data for bigdata use '_' and Key data for bigdata use space */
10172 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10173 				"%c%08x",
10174 				i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
10175 				*(uint32 *)(stack++));
10176 
10177 			(*cnt)++;
10178 			if ((*cnt >= HANG_FIELD_CNT_MAX) ||
10179 					(i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
10180 				return;
10181 			}
10182 		}
10183 	}
10184 
10185 	remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
10186 
10187 	for (i = 0; i < remain_stack_cnt; i++) {
10188 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10189 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10190 				HANG_RAW_DEL, dummy_data);
10191 		(*cnt)++;
10192 		if (*cnt >= HANG_FIELD_CNT_MAX) {
10193 			return;
10194 		}
10195 	}
10196 
10197 }
10198 
10199 static void
10200 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10201 {
10202 	int remain_len;
10203 	int i;
10204 	const uint32 *data;
10205 	uint32 *ext_data = dhd->extended_trap_data;
10206 	hnd_ext_trap_hdr_t *hdr;
10207 	const bcm_tlv_t *tlv;
10208 	int remain_trap_data = 0;
10209 	uint8 buf_u8[sizeof(uint32)] = { 0, };
10210 	const uint8 *p_u8;
10211 
10212 	if (ext_data == NULL) {
10213 		return;
10214 	}
10215 	/* First word is original trap_data */
10216 	ext_data++;
10217 
10218 	/* Followed by the extended trap data header */
10219 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10220 
10221 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
10222 	if (tlv) {
10223 		/* header include tlv hader */
10224 		remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
10225 	}
10226 
10227 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10228 	if (tlv) {
10229 		/* header include tlv hader */
10230 		remain_trap_data -= (tlv->len + sizeof(uint16));
10231 	}
10232 
10233 	data = (const uint32 *)(hdr->data + (hdr->len  - remain_trap_data));
10234 
10235 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10236 
10237 	for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
10238 			i++, (*cnt)++) {
10239 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10240 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10241 				HANG_RAW_DEL, *(uint32 *)(data++));
10242 	}
10243 
10244 	if (*cnt >= HANG_FIELD_CNT_MAX) {
10245 		return;
10246 	}
10247 
10248 	remain_trap_data -= (sizeof(uint32) * i);
10249 
10250 	if (remain_trap_data > sizeof(buf_u8)) {
10251 		DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
10252 		remain_trap_data =  sizeof(buf_u8);
10253 	}
10254 
10255 	if (remain_trap_data) {
10256 		p_u8 = (const uint8 *)data;
10257 		for (i = 0; i < remain_trap_data; i++) {
10258 			buf_u8[i] = *(const uint8 *)(p_u8++);
10259 		}
10260 
10261 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10262 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10263 				HANG_RAW_DEL, ltoh32_ua(buf_u8));
10264 		(*cnt)++;
10265 	}
10266 }
10267 #endif /* DHD_EWPR_VER2 */
10268 
10269 static void
10270 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
10271 {
10272 	uint32 i;
10273 	uint32 *ext_data = dhd->extended_trap_data;
10274 	hnd_ext_trap_hdr_t *hdr;
10275 	const bcm_tlv_t *tlv;
10276 
10277 	/* First word is original trap_data */
10278 	ext_data++;
10279 
10280 	/* Followed by the extended trap data header */
10281 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10282 
10283 	/* Dump a list of all tags found  before parsing data */
10284 	for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
10285 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
10286 		if (tlv) {
10287 			if (!TAG_TRAP_IS_STATE(i)) {
10288 				*subtype = i;
10289 				return;
10290 			}
10291 		}
10292 	}
10293 }
10294 #ifdef DHD_EWPR_VER2
10295 static void
10296 copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10297 {
10298 	int remain_len;
10299 	uint32 *ext_data = dhd->extended_trap_data;
10300 	hnd_ext_trap_hdr_t *hdr;
10301 	char *base64_out = NULL;
10302 	int base64_cnt;
10303 	int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
10304 
10305 	if (ext_data == NULL) {
10306 		return;
10307 	}
10308 	/* First word is original trap_data */
10309 	ext_data++;
10310 
10311 	/* Followed by the extended trap data header */
10312 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10313 
10314 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10315 
10316 	if (remain_len <= 0) {
10317 		DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
10318 		return;
10319 	}
10320 
10321 	if (remain_len < max_base64_len) {
10322 		DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
10323 			remain_len));
10324 		max_base64_len = remain_len;
10325 	}
10326 
10327 	base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
10328 	if (base64_out == NULL) {
10329 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
10330 			__FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
10331 		return;
10332 	}
10333 
10334 	if (hdr->len > 0) {
10335 		base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
10336 		if (base64_cnt == 0) {
10337 			DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
10338 		}
10339 	}
10340 
10341 	*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
10342 			base64_out);
10343 	(*cnt)++;
10344 	MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
10345 }
10346 #endif /* DHD_EWPR_VER2 */
10347 
10348 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10349 #pragma GCC diagnostic pop
10350 #endif // endif
10351 
10352 void
10353 copy_hang_info_trap(dhd_pub_t *dhd)
10354 {
10355 	trap_t tr;
10356 	int bytes_written;
10357 	int trap_subtype = 0;
10358 
10359 	if (!dhd || !dhd->hang_info) {
10360 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
10361 			dhd, (dhd ? dhd->hang_info : NULL)));
10362 		return;
10363 	}
10364 
10365 	if (!dhd->dongle_trap_occured) {
10366 		DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
10367 		return;
10368 	}
10369 
10370 	memset(&tr, 0x00, sizeof(struct _trap_struct));
10371 
10372 	copy_ext_trap_sig(dhd, &tr);
10373 	get_hang_info_trap_subtype(dhd, &trap_subtype);
10374 
10375 	hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
10376 	hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
10377 
10378 	bytes_written = 0;
10379 	dhd->hang_info_cnt = 0;
10380 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
10381 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
10382 
10383 	copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10384 			&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10385 
10386 	DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
10387 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10388 
10389 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
10390 
10391 #ifdef DHD_EWPR_VER2
10392 	/* stack info & trap info are included in etd data */
10393 
10394 	/* extended trap data dump */
10395 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10396 		copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10397 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10398 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10399 	}
10400 #else
10401 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10402 		copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10403 		DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
10404 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10405 	}
10406 
10407 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10408 		copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10409 				&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10410 		DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
10411 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10412 	}
10413 
10414 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10415 		copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10416 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10417 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10418 	}
10419 #endif /* DHD_EWPR_VER2 */
10420 }
10421 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10422 
10423 int
10424 dhd_prot_debug_info_print(dhd_pub_t *dhd)
10425 {
10426 	dhd_prot_t *prot = dhd->prot;
10427 	msgbuf_ring_t *ring;
10428 	uint16 rd, wr;
10429 	uint32 dma_buf_len;
10430 	uint64 current_time;
10431 	ulong ring_tcm_rd_addr; /* dongle address */
10432 	ulong ring_tcm_wr_addr; /* dongle address */
10433 
10434 	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
10435 	DHD_ERROR(("DHD: %s\n", dhd_version));
10436 	DHD_ERROR(("Firmware: %s\n", fw_version));
10437 
10438 	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
10439 	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
10440 		prot->device_ipc_version,
10441 		prot->host_ipc_version,
10442 		prot->active_ipc_version));
10443 	DHD_ERROR(("d2h_intr_method -> %s\n",
10444 			dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
10445 	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
10446 		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
10447 	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
10448 		prot->max_infobufpost, prot->infobufpost));
10449 	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
10450 		prot->max_eventbufpost, prot->cur_event_bufs_posted));
10451 	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
10452 		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
10453 	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
10454 		prot->max_rxbufpost, prot->rxbufpost));
10455 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10456 		h2d_max_txpost, prot->h2d_max_txpost));
10457 
10458 	current_time = OSL_LOCALTIME_NS();
10459 	DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
10460 	DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
10461 		" ioctl_ack_time="SEC_USEC_FMT
10462 		" ioctl_cmplt_time="SEC_USEC_FMT"\n",
10463 		GET_SEC_USEC(prot->ioctl_fillup_time),
10464 		GET_SEC_USEC(prot->ioctl_ack_time),
10465 		GET_SEC_USEC(prot->ioctl_cmplt_time)));
10466 
10467 	/* Check PCIe INT registers */
10468 	if (!dhd_pcie_dump_int_regs(dhd)) {
10469 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10470 		dhd->bus->is_linkdown = TRUE;
10471 	}
10472 
10473 	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
10474 
10475 	ring = &prot->h2dring_ctrl_subn;
10476 	dma_buf_len = ring->max_items * ring->item_len;
10477 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10478 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10479 	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10480 		"SIZE %d \r\n",
10481 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10482 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
10483 	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10484 	if (dhd->bus->is_linkdown) {
10485 		DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
10486 			" due to PCIe link down\r\n"));
10487 	} else {
10488 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10489 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10490 		DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10491 	}
10492 	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10493 
10494 	ring = &prot->d2hring_ctrl_cpln;
10495 	dma_buf_len = ring->max_items * ring->item_len;
10496 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10497 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10498 	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10499 		"SIZE %d \r\n",
10500 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10501 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
10502 	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10503 	if (dhd->bus->is_linkdown) {
10504 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
10505 			" due to PCIe link down\r\n"));
10506 	} else {
10507 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10508 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10509 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10510 	}
10511 	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10512 
10513 	ring = prot->h2dring_info_subn;
10514 	if (ring) {
10515 		dma_buf_len = ring->max_items * ring->item_len;
10516 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10517 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10518 		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10519 			"SIZE %d \r\n",
10520 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10521 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10522 			dma_buf_len));
10523 		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10524 		if (dhd->bus->is_linkdown) {
10525 			DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
10526 				" due to PCIe link down\r\n"));
10527 		} else {
10528 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10529 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10530 			DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10531 		}
10532 		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10533 	}
10534 	ring = prot->d2hring_info_cpln;
10535 	if (ring) {
10536 		dma_buf_len = ring->max_items * ring->item_len;
10537 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10538 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10539 		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10540 			"SIZE %d \r\n",
10541 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10542 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10543 			dma_buf_len));
10544 		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10545 		if (dhd->bus->is_linkdown) {
10546 			DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
10547 				" due to PCIe link down\r\n"));
10548 		} else {
10549 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10550 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10551 			DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10552 		}
10553 		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10554 	}
10555 
10556 	ring = &prot->d2hring_tx_cpln;
10557 	if (ring) {
10558 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10559 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10560 		dma_buf_len = ring->max_items * ring->item_len;
10561 		DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10562 			"SIZE %d \r\n",
10563 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10564 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10565 			dma_buf_len));
10566 		DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10567 		if (dhd->bus->is_linkdown) {
10568 			DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
10569 				" due to PCIe link down\r\n"));
10570 		} else {
10571 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10572 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10573 			DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10574 		}
10575 		DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10576 	}
10577 
10578 	ring = &prot->d2hring_rx_cpln;
10579 	if (ring) {
10580 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10581 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10582 		dma_buf_len = ring->max_items * ring->item_len;
10583 		DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10584 			"SIZE %d \r\n",
10585 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10586 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10587 			dma_buf_len));
10588 		DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10589 		if (dhd->bus->is_linkdown) {
10590 			DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
10591 				" due to PCIe link down\r\n"));
10592 		} else {
10593 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10594 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10595 			DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10596 		}
10597 		DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10598 	}
10599 #ifdef EWP_EDL
10600 	ring = prot->d2hring_edl;
10601 	if (ring) {
10602 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10603 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10604 		dma_buf_len = ring->max_items * ring->item_len;
10605 		DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10606 			"SIZE %d \r\n",
10607 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10608 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10609 			dma_buf_len));
10610 		DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10611 		if (dhd->bus->is_linkdown) {
10612 			DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
10613 				" due to PCIe link down\r\n"));
10614 		} else {
10615 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10616 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10617 			DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10618 		}
10619 		DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
10620 			ring->seqnum % D2H_EPOCH_MODULO));
10621 	}
10622 #endif /* EWP_EDL */
10623 
10624 	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
10625 		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
10626 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
10627 	DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
10628 		__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
10629 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
10630 
10631 	DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
10632 	DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
10633 	DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
10634 
10635 	dhd_pcie_debug_info_dump(dhd);
10636 
10637 	return 0;
10638 }
10639 
10640 int
10641 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10642 {
10643 	uint32 *ptr;
10644 	uint32 value;
10645 
10646 	if (dhd->prot->d2h_dma_indx_wr_buf.va) {
10647 		uint32 i;
10648 		uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
10649 
10650 		OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
10651 			dhd->prot->d2h_dma_indx_wr_buf.len);
10652 
10653 		ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
10654 
10655 		bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
10656 
10657 		bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
10658 		value = ltoh32(*ptr);
10659 		bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
10660 		ptr++;
10661 		value = ltoh32(*ptr);
10662 		bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10663 
10664 		ptr++;
10665 		bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
10666 		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10667 			value = ltoh32(*ptr);
10668 			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10669 			ptr++;
10670 		}
10671 	}
10672 
10673 	if (dhd->prot->h2d_dma_indx_rd_buf.va) {
10674 		OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
10675 			dhd->prot->h2d_dma_indx_rd_buf.len);
10676 
10677 		ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
10678 
10679 		bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
10680 		value = ltoh32(*ptr);
10681 		bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
10682 		ptr++;
10683 		value = ltoh32(*ptr);
10684 		bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
10685 		ptr++;
10686 		value = ltoh32(*ptr);
10687 		bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10688 	}
10689 
10690 	return 0;
10691 }
10692 
10693 uint32
10694 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
10695 {
10696 	dhd_prot_t *prot = dhd->prot;
10697 #if DHD_DBG_SHOW_METADATA
10698 	prot->metadata_dbg = val;
10699 #endif // endif
10700 	return (uint32)prot->metadata_dbg;
10701 }
10702 
10703 uint32
10704 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
10705 {
10706 	dhd_prot_t *prot = dhd->prot;
10707 	return (uint32)prot->metadata_dbg;
10708 }
10709 
10710 uint32
10711 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
10712 {
10713 	dhd_prot_t *prot = dhd->prot;
10714 	if (rx)
10715 		prot->rx_metadata_offset = (uint16)val;
10716 	else
10717 		prot->tx_metadata_offset = (uint16)val;
10718 	return dhd_prot_metadatalen_get(dhd, rx);
10719 }
10720 
10721 uint32
10722 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
10723 {
10724 	dhd_prot_t *prot = dhd->prot;
10725 	if (rx)
10726 		return prot->rx_metadata_offset;
10727 	else
10728 		return prot->tx_metadata_offset;
10729 }
10730 
10731 /** optimization to write "n" tx items at a time to ring */
10732 uint32
10733 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10734 {
10735 	dhd_prot_t *prot = dhd->prot;
10736 	if (set)
10737 		prot->txp_threshold = (uint16)val;
10738 	val = prot->txp_threshold;
10739 	return val;
10740 }
10741 
10742 #ifdef DHD_RX_CHAINING
10743 
10744 static INLINE void BCMFASTPATH
10745 dhd_rxchain_reset(rxchain_info_t *rxchain)
10746 {
10747 	rxchain->pkt_count = 0;
10748 }
10749 
10750 static void BCMFASTPATH
10751 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
10752 {
10753 	uint8 *eh;
10754 	uint8 prio;
10755 	dhd_prot_t *prot = dhd->prot;
10756 	rxchain_info_t *rxchain = &prot->rxchain;
10757 
10758 	ASSERT(!PKTISCHAINED(pkt));
10759 	ASSERT(PKTCLINK(pkt) == NULL);
10760 	ASSERT(PKTCGETATTR(pkt) == 0);
10761 
10762 	eh = PKTDATA(dhd->osh, pkt);
10763 	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
10764 
10765 	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
10766 		rxchain->h_da, rxchain->h_prio))) {
10767 		/* Different flow - First release the existing chain */
10768 		dhd_rxchain_commit(dhd);
10769 	}
10770 
10771 	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
10772 	/* so that the chain can be handed off to CTF bridge as is. */
10773 	if (rxchain->pkt_count == 0) {
10774 		/* First packet in chain */
10775 		rxchain->pkthead = rxchain->pkttail = pkt;
10776 
10777 		/* Keep a copy of ptr to ether_da, ether_sa and prio */
10778 		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
10779 		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
10780 		rxchain->h_prio = prio;
10781 		rxchain->ifidx = ifidx;
10782 		rxchain->pkt_count++;
10783 	} else {
10784 		/* Same flow - keep chaining */
10785 		PKTSETCLINK(rxchain->pkttail, pkt);
10786 		rxchain->pkttail = pkt;
10787 		rxchain->pkt_count++;
10788 	}
10789 
10790 	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
10791 		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
10792 		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
10793 		PKTSETCHAINED(dhd->osh, pkt);
10794 		PKTCINCRCNT(rxchain->pkthead);
10795 		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
10796 	} else {
10797 		dhd_rxchain_commit(dhd);
10798 		return;
10799 	}
10800 
10801 	/* If we have hit the max chain length, dispatch the chain and reset */
10802 	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
10803 		dhd_rxchain_commit(dhd);
10804 	}
10805 }
10806 
10807 static void BCMFASTPATH
10808 dhd_rxchain_commit(dhd_pub_t *dhd)
10809 {
10810 	dhd_prot_t *prot = dhd->prot;
10811 	rxchain_info_t *rxchain = &prot->rxchain;
10812 
10813 	if (rxchain->pkt_count == 0)
10814 		return;
10815 
10816 	/* Release the packets to dhd_linux */
10817 	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
10818 
10819 	/* Reset the chain */
10820 	dhd_rxchain_reset(rxchain);
10821 }
10822 
10823 #endif /* DHD_RX_CHAINING */
10824 
10825 #ifdef IDLE_TX_FLOW_MGMT
10826 int
10827 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
10828 {
10829 	tx_idle_flowring_resume_request_t *flow_resume_rqst;
10830 	msgbuf_ring_t *flow_ring;
10831 	dhd_prot_t *prot = dhd->prot;
10832 	unsigned long flags;
10833 	uint16 alloced = 0;
10834 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10835 
10836 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
10837 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
10838 	if (flow_ring == NULL) {
10839 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
10840 			__FUNCTION__, flow_ring_node->flowid));
10841 		return BCME_NOMEM;
10842 	}
10843 
10844 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10845 
10846 	/* Request for ctrl_ring buffer space */
10847 	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
10848 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
10849 
10850 	if (flow_resume_rqst == NULL) {
10851 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
10852 		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
10853 			__FUNCTION__, flow_ring_node->flowid));
10854 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10855 		return BCME_NOMEM;
10856 	}
10857 
10858 	flow_ring_node->prot_info = (void *)flow_ring;
10859 
10860 	/* Common msg buf hdr */
10861 	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
10862 	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
10863 	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
10864 
10865 	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10866 	ctrl_ring->seqnum++;
10867 
10868 	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
10869 	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
10870 		__FUNCTION__, flow_ring_node->flowid));
10871 
10872 	/* Update the flow_ring's WRITE index */
10873 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
10874 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10875 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
10876 	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
10877 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10878 			H2D_IFRM_INDX_WR_UPD,
10879 			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
10880 	} else {
10881 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
10882 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
10883 	}
10884 
10885 	/* update control subn ring's WR index and ring doorbell to dongle */
10886 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
10887 
10888 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10889 
10890 	return BCME_OK;
10891 } /* dhd_prot_flow_ring_create */
10892 
10893 int
10894 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
10895 {
10896 	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
10897 	dhd_prot_t *prot = dhd->prot;
10898 	unsigned long flags;
10899 	uint16 index;
10900 	uint16 alloced = 0;
10901 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10902 
10903 	DHD_RING_LOCK(ring->ring_lock, flags);
10904 
10905 	/* Request for ring buffer space */
10906 	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
10907 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10908 
10909 	if (flow_suspend_rqst == NULL) {
10910 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10911 		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
10912 		return BCME_NOMEM;
10913 	}
10914 
10915 	/* Common msg buf hdr */
10916 	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
10917 	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
10918 	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
10919 
10920 	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10921 	ring->seqnum++;
10922 
10923 	/* Update flow id  info */
10924 	for (index = 0; index < count; index++)
10925 	{
10926 		flow_suspend_rqst->ring_id[index] = ringid[index];
10927 	}
10928 	flow_suspend_rqst->num = count;
10929 
10930 	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
10931 
10932 	/* update ring's WR index and ring doorbell to dongle */
10933 	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
10934 
10935 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10936 
10937 	return BCME_OK;
10938 }
10939 #endif /* IDLE_TX_FLOW_MGMT */
10940 
10941 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
10942 {
10943 	switch (tag)
10944 	{
10945 	case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
10946 	case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
10947 	case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
10948 	case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
10949 	case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
10950 	case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
10951 	case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
10952 	case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
10953 	case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
10954 	case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
10955 	case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
10956 	case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
10957 	case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
10958 	case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
10959 	case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
10960 	case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
10961 	case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
10962 	case TAG_TRAP_LAST:
10963 	default:
10964 		return "Unknown";
10965 	}
10966 	return "Unknown";
10967 }
10968 
10969 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
10970 {
10971 	uint32 i;
10972 	uint32 *ext_data;
10973 	hnd_ext_trap_hdr_t *hdr;
10974 	const bcm_tlv_t *tlv;
10975 	const trap_t *tr;
10976 	const uint32 *stack;
10977 	const hnd_ext_trap_bp_err_t *bpe;
10978 	uint32 raw_len;
10979 
10980 	ext_data = dhdp->extended_trap_data;
10981 
10982 	/* return if there is no extended trap data */
10983 	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
10984 	{
10985 		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
10986 		return BCME_OK;
10987 	}
10988 
10989 	bcm_bprintf(b, "Extended trap data\n");
10990 
10991 	/* First word is original trap_data */
10992 	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
10993 	ext_data++;
10994 
10995 	/* Followed by the extended trap data header */
10996 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10997 	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
10998 
10999 	/* Dump a list of all tags found  before parsing data */
11000 	bcm_bprintf(b, "\nTags Found:\n");
11001 	for (i = 0; i < TAG_TRAP_LAST; i++) {
11002 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
11003 		if (tlv)
11004 			bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
11005 	}
11006 
11007 	if (raw)
11008 	{
11009 		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
11010 		for (i = 0; i < raw_len; i++)
11011 		{
11012 			bcm_bprintf(b, "0x%08x ", ext_data[i]);
11013 			if (i % 4 == 3)
11014 				bcm_bprintf(b, "\n");
11015 		}
11016 		return BCME_OK;
11017 	}
11018 
11019 	/* Extract the various supported TLVs from the extended trap data */
11020 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
11021 	if (tlv)
11022 	{
11023 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
11024 		bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
11025 	}
11026 
11027 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
11028 	if (tlv)
11029 	{
11030 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
11031 		tr = (const trap_t *)tlv->data;
11032 
11033 		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
11034 		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
11035 		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
11036 		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
11037 		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
11038 		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
11039 	}
11040 
11041 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
11042 	if (tlv)
11043 	{
11044 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
11045 		stack = (const uint32 *)tlv->data;
11046 		for (i = 0; i < (uint32)(tlv->len / 4); i++)
11047 		{
11048 			bcm_bprintf(b, "  0x%08x\n", *stack);
11049 			stack++;
11050 		}
11051 	}
11052 
11053 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
11054 	if (tlv)
11055 	{
11056 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
11057 		bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
11058 		bcm_bprintf(b, " error: %x\n", bpe->error);
11059 		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
11060 		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
11061 		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
11062 		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
11063 		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
11064 		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
11065 		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
11066 		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
11067 		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
11068 		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
11069 		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
11070 		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
11071 		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
11072 		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
11073 	}
11074 
11075 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
11076 	if (tlv)
11077 	{
11078 		const hnd_ext_trap_heap_err_t* hme;
11079 
11080 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
11081 		hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
11082 		bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
11083 		bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
11084 		bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
11085 		bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
11086 		bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
11087 
11088 		bcm_bprintf(b, " Histogram:\n");
11089 		for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
11090 			if (hme->heap_histogm[i] == 0xfffe)
11091 				bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
11092 			else if (hme->heap_histogm[i] == 0xffff)
11093 				bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
11094 			else
11095 				bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
11096 					hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
11097 					* hme->heap_histogm[i + 1]);
11098 		}
11099 
11100 		bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
11101 		for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
11102 			bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
11103 		}
11104 	}
11105 
11106 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
11107 	if (tlv)
11108 	{
11109 		const hnd_ext_trap_pcie_mem_err_t* pqme;
11110 
11111 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
11112 		pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
11113 		bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
11114 		bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
11115 	}
11116 
11117 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
11118 	if (tlv)
11119 	{
11120 		const hnd_ext_trap_wlc_mem_err_t* wsme;
11121 
11122 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
11123 		wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
11124 		bcm_bprintf(b, " instance: %d\n", wsme->instance);
11125 		bcm_bprintf(b, " associated: %d\n", wsme->associated);
11126 		bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11127 		bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11128 		bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11129 		bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11130 		bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11131 		bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11132 
11133 		if (tlv->len >= (sizeof(*wsme) * 2)) {
11134 			wsme++;
11135 			bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
11136 			bcm_bprintf(b, " associated: %d\n", wsme->associated);
11137 			bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11138 			bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11139 			bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11140 			bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11141 			bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11142 			bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11143 		}
11144 	}
11145 
11146 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
11147 	if (tlv)
11148 	{
11149 		const hnd_ext_trap_phydbg_t* phydbg;
11150 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
11151 		phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
11152 		bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
11153 		bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
11154 		bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
11155 		bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
11156 		bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
11157 		bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
11158 		bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
11159 		bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
11160 		bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
11161 		bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
11162 		bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
11163 		bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
11164 		bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
11165 		bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
11166 		bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
11167 		bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
11168 		bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
11169 		bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
11170 		bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
11171 		bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
11172 		bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
11173 		bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
11174 		bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
11175 		bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
11176 		bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
11177 		bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
11178 		bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
11179 		for (i = 0; i < 3; i++)
11180 			bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
11181 	}
11182 
11183 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
11184 	if (tlv)
11185 	{
11186 		const hnd_ext_trap_psmwd_t* psmwd;
11187 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
11188 		psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
11189 		bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
11190 		bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
11191 		bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
11192 		bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
11193 		bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
11194 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
11195 		for (i = 0; i < 3; i++)
11196 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
11197 		bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
11198 		bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
11199 		bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
11200 		bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
11201 		bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
11202 		bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
11203 		bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
11204 		bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
11205 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
11206 		bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
11207 		bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
11208 		bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
11209 		bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
11210 		bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
11211 		bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
11212 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
11213 		bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
11214 		bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
11215 		bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
11216 		bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
11217 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
11218 		bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
11219 		bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
11220 	}
11221 
11222 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
11223 	if (tlv)
11224 	{
11225 		const hnd_ext_trap_macsusp_t* macsusp;
11226 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
11227 		macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
11228 		bcm_bprintf(b, " version: %d\n", macsusp->version);
11229 		bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
11230 		bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
11231 		bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
11232 		bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
11233 		for (i = 0; i < 4; i++)
11234 			bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
11235 		for (i = 0; i < 8; i++)
11236 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
11237 		bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
11238 		bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
11239 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
11240 		bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
11241 		bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
11242 		bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
11243 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
11244 		bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
11245 		bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
11246 		bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
11247 		bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
11248 		bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
11249 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
11250 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
11251 	}
11252 
11253 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
11254 	if (tlv)
11255 	{
11256 		const hnd_ext_trap_macenab_t* macwake;
11257 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
11258 		macwake = (const hnd_ext_trap_macenab_t *)tlv;
11259 		bcm_bprintf(b, " version: 0x%x\n", macwake->version);
11260 		bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
11261 		bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
11262 		bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
11263 		bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
11264 		for (i = 0; i < 8; i++)
11265 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
11266 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
11267 		bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
11268 		bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
11269 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
11270 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
11271 		bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
11272 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
11273 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
11274 		bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
11275 		bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
11276 		bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
11277 		bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
11278 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
11279 	}
11280 
11281 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
11282 	if (tlv)
11283 	{
11284 		const bcm_dngl_pcie_hc_t* hc;
11285 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
11286 		hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
11287 		bcm_bprintf(b, " version: 0x%x\n", hc->version);
11288 		bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
11289 		bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
11290 		bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
11291 		bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
11292 		for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
11293 			bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
11294 	}
11295 
11296 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
11297 	if (tlv)
11298 	{
11299 		const pcie_hmapviolation_t* hmap;
11300 		hmap = (const pcie_hmapviolation_t *)tlv->data;
11301 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
11302 		bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
11303 		bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
11304 		bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
11305 	}
11306 
11307 	return BCME_OK;
11308 }
11309 
11310 #ifdef BCMPCIE
11311 int
11312 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
11313 	uint16 seqnum, uint16 xt_id)
11314 {
11315 	dhd_prot_t *prot = dhdp->prot;
11316 	host_timestamp_msg_t *ts_req;
11317 	unsigned long flags;
11318 	uint16 alloced = 0;
11319 	uchar *ts_tlv_buf;
11320 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
11321 
11322 	if ((tlvs == NULL) || (tlv_len == 0)) {
11323 		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
11324 			__FUNCTION__, tlvs, tlv_len));
11325 		return -1;
11326 	}
11327 
11328 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11329 
11330 	/* if Host TS req already pending go away */
11331 	if (prot->hostts_req_buf_inuse == TRUE) {
11332 		DHD_ERROR(("one host TS request already pending at device\n"));
11333 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11334 		return -1;
11335 	}
11336 
11337 	/* Request for cbuf space */
11338 	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
11339 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
11340 	if (ts_req == NULL) {
11341 		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
11342 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11343 		return -1;
11344 	}
11345 
11346 	/* Common msg buf hdr */
11347 	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
11348 	ts_req->msg.if_id = 0;
11349 	ts_req->msg.flags =  ctrl_ring->current_phase;
11350 	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
11351 
11352 	ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11353 	ctrl_ring->seqnum++;
11354 
11355 	ts_req->xt_id = xt_id;
11356 	ts_req->seqnum = seqnum;
11357 	/* populate TS req buffer info */
11358 	ts_req->input_data_len = htol16(tlv_len);
11359 	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
11360 	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
11361 	/* copy ioct payload */
11362 	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
11363 	prot->hostts_req_buf_inuse = TRUE;
11364 	memcpy(ts_tlv_buf, tlvs, tlv_len);
11365 
11366 	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
11367 
11368 	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
11369 		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
11370 	}
11371 
11372 	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
11373 		ts_req->msg.request_id, ts_req->input_data_len,
11374 		ts_req->xt_id, ts_req->seqnum));
11375 
11376 	/* upd wrt ptr and raise interrupt */
11377 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
11378 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11379 
11380 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11381 
11382 	return 0;
11383 } /* dhd_prot_send_host_timestamp */
11384 
11385 bool
11386 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
11387 {
11388 	if (set)
11389 		dhd->prot->tx_ts_log_enabled = enable;
11390 
11391 	return dhd->prot->tx_ts_log_enabled;
11392 }
11393 
11394 bool
11395 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
11396 {
11397 	if (set)
11398 		dhd->prot->rx_ts_log_enabled = enable;
11399 
11400 	return dhd->prot->rx_ts_log_enabled;
11401 }
11402 
11403 bool
11404 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
11405 {
11406 	if (set)
11407 		dhd->prot->no_retry = enable;
11408 
11409 	return dhd->prot->no_retry;
11410 }
11411 
11412 bool
11413 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
11414 {
11415 	if (set)
11416 		dhd->prot->no_aggr = enable;
11417 
11418 	return dhd->prot->no_aggr;
11419 }
11420 
11421 bool
11422 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
11423 {
11424 	if (set)
11425 		dhd->prot->fixed_rate = enable;
11426 
11427 	return dhd->prot->fixed_rate;
11428 }
11429 #endif /* BCMPCIE */
11430 
11431 void
11432 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
11433 {
11434 	dhd_prot_t *prot = dhd->prot;
11435 
11436 	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
11437 	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
11438 }
11439 
11440 void
11441 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
11442 {
11443 	if (dhd->prot->max_tsbufpost > 0)
11444 		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
11445 }
11446 
11447 static void BCMFASTPATH
11448 dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
11449 {
11450 	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
11451 
11452 }
11453 
11454 uint16
11455 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
11456 {
11457 	return dhdp->prot->ioctl_trans_id;
11458 }
11459 
11460 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
11461 {
11462 	if (!dhd->hscb_enable) {
11463 		if (len) {
11464 			/* prevent "Operation not supported" dhd message */
11465 			*len = 0;
11466 			return BCME_OK;
11467 		}
11468 		return BCME_UNSUPPORTED;
11469 	}
11470 
11471 	if (va) {
11472 		*va = dhd->prot->host_scb_buf.va;
11473 	}
11474 	if (len) {
11475 		*len = dhd->prot->host_scb_buf.len;
11476 	}
11477 
11478 	return BCME_OK;
11479 }
11480 
11481 #ifdef DHD_BUS_MEM_ACCESS
11482 int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
11483 {
11484 	if (!dhd->hscb_enable) {
11485 		return BCME_UNSUPPORTED;
11486 	}
11487 
11488 	if (dhd->prot->host_scb_buf.va == NULL ||
11489 		((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
11490 		return BCME_BADADDR;
11491 	}
11492 
11493 	memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
11494 
11495 	return BCME_OK;
11496 }
11497 #endif /* DHD_BUS_MEM_ACCESS */
11498 
11499 #ifdef DHD_HP2P
11500 uint32
11501 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11502 {
11503 	if (set)
11504 		dhd->pkt_thresh = (uint16)val;
11505 
11506 	val = dhd->pkt_thresh;
11507 
11508 	return val;
11509 }
11510 
11511 uint32
11512 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11513 {
11514 	if (set)
11515 		dhd->time_thresh = (uint16)val;
11516 
11517 	val = dhd->time_thresh;
11518 
11519 	return val;
11520 }
11521 
11522 uint32
11523 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
11524 {
11525 	if (set)
11526 		dhd->pkt_expiry = (uint16)val;
11527 
11528 	val = dhd->pkt_expiry;
11529 
11530 	return val;
11531 }
11532 
11533 uint8
11534 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
11535 {
11536 	uint8 ret = 0;
11537 	if (set) {
11538 		dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
11539 		dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
11540 
11541 		if (enable) {
11542 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
11543 		} else {
11544 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
11545 		}
11546 	}
11547 	ret = dhd->hp2p_infra_enable ? 0x1:0x0;
11548 	ret <<= 4;
11549 	ret |= dhd->hp2p_enable ? 0x1:0x0;
11550 
11551 	return ret;
11552 }
11553 
11554 static void
11555 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
11556 {
11557 	ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
11558 	hp2p_info_t *hp2p_info;
11559 	uint32 dur1;
11560 
11561 	hp2p_info = &dhd->hp2p_info[0];
11562 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
11563 
11564 	if (dur1 > (MAX_RX_HIST_BIN - 1)) {
11565 		dur1 = MAX_RX_HIST_BIN - 1;
11566 		DHD_ERROR(("%s: 0x%x 0x%x\n",
11567 			__FUNCTION__, ts->low, ts->high));
11568 	}
11569 
11570 	hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
11571 	return;
11572 }
11573 
11574 static void
11575 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
11576 {
11577 	ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
11578 	uint16 flowid = txstatus->compl_hdr.flow_ring_id;
11579 	uint32 hp2p_flowid, dur1, dur2;
11580 	hp2p_info_t *hp2p_info;
11581 
11582 	hp2p_flowid = dhd->bus->max_submission_rings -
11583 		dhd->bus->max_cmn_rings - flowid + 1;
11584 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11585 	ts = (ts_timestamp_t *)&(txstatus->ts);
11586 
11587 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11588 	if (dur1 > (MAX_TX_HIST_BIN - 1)) {
11589 		dur1 = MAX_TX_HIST_BIN - 1;
11590 		DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11591 	}
11592 	hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
11593 
11594 	dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11595 	if (dur2 > (MAX_TX_HIST_BIN - 1)) {
11596 		dur2 = MAX_TX_HIST_BIN - 1;
11597 		DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11598 	}
11599 
11600 	hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
11601 	return;
11602 }
11603 
11604 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
11605 {
11606 	hp2p_info_t *hp2p_info;
11607 	unsigned long flags;
11608 	dhd_pub_t *dhdp;
11609 
11610 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11611 #pragma GCC diagnostic push
11612 #pragma GCC diagnostic ignored "-Wcast-qual"
11613 #endif // endif
11614 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
11615 	hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
11616 #else
11617 	hp2p_info = container_of(timer, hp2p_info_t, timer);
11618 #endif // endif
11619 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11620 #pragma GCC diagnostic pop
11621 #endif // endif
11622 	dhdp = hp2p_info->dhd_pub;
11623 	if (!dhdp) {
11624 		goto done;
11625 	}
11626 
11627 	DHD_INFO(("%s: pend_item = %d flowid = %d\n",
11628 		__FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
11629 		hp2p_info->flowid));
11630 
11631 	flags = dhd_os_hp2plock(dhdp);
11632 
11633 	dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
11634 	hp2p_info->hrtimer_init = FALSE;
11635 	hp2p_info->num_timer_limit++;
11636 
11637 	dhd_os_hp2punlock(dhdp, flags);
11638 done:
11639 	return HRTIMER_NORESTART;
11640 }
11641 
11642 static void
11643 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
11644 {
11645 	hp2p_info_t *hp2p_info;
11646 	uint16 hp2p_flowid;
11647 
11648 	hp2p_flowid = dhd->bus->max_submission_rings -
11649 		dhd->bus->max_cmn_rings - flowid + 1;
11650 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11651 
11652 	if (ring->pend_items_count == dhd->pkt_thresh) {
11653 		dhd_prot_txdata_write_flush(dhd, flowid);
11654 
11655 		hp2p_info->hrtimer_init = FALSE;
11656 		hp2p_info->ring = NULL;
11657 		hp2p_info->num_pkt_limit++;
11658 #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11659 		tasklet_hrtimer_cancel(&hp2p_info->timer);
11660 #else
11661 		hrtimer_cancel(&hp2p_info->timer);
11662 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11663 		DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
11664 			"hp2p_flowid = %d pkt_thresh = %d\n",
11665 			__FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
11666 	} else {
11667 		if (hp2p_info->hrtimer_init == FALSE) {
11668 			hp2p_info->hrtimer_init = TRUE;
11669 			hp2p_info->flowid = flowid;
11670 			hp2p_info->dhd_pub = dhd;
11671 			hp2p_info->ring = ring;
11672 			hp2p_info->num_timer_start++;
11673 #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11674 			tasklet_hrtimer_start(&hp2p_info->timer,
11675 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
11676 #else
11677 			hrtimer_start(&hp2p_info->timer,
11678 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
11679 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11680 
11681 			DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
11682 					__FUNCTION__, flowid, hp2p_flowid));
11683 		}
11684 	}
11685 	return;
11686 }
11687 
11688 static void
11689 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
11690 {
11691 	uint64 ts;
11692 
11693 	ts = local_clock();
11694 	do_div(ts, 1000);
11695 
11696 	txdesc->metadata_buf_len = 0;
11697 	txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
11698 	txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
11699 	txdesc->exp_time = dhd->pkt_expiry;
11700 
11701 	DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
11702 		__FUNCTION__, txdesc->metadata_buf_addr.high_addr,
11703 		txdesc->metadata_buf_addr.low_addr,
11704 		txdesc->exp_time));
11705 
11706 	return;
11707 }
11708 #endif /* DHD_HP2P */
11709 
11710 #ifdef DHD_MAP_LOGGING
11711 void
11712 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
11713 {
11714 	dhd_prot_debug_info_print(dhdp);
11715 	OSL_DMA_MAP_DUMP(dhdp->osh);
11716 #ifdef DHD_MAP_PKTID_LOGGING
11717 	dhd_pktid_logging_dump(dhdp);
11718 #endif /* DHD_MAP_PKTID_LOGGING */
11719 #ifdef DHD_FW_COREDUMP
11720 	dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
11721 #ifdef DNGL_AXI_ERROR_LOGGING
11722 	dhdp->memdump_enabled = DUMP_MEMFILE;
11723 	dhd_bus_get_mem_dump(dhdp);
11724 #else
11725 	dhdp->memdump_enabled = DUMP_MEMONLY;
11726 	dhd_bus_mem_dump(dhdp);
11727 #endif /* DNGL_AXI_ERROR_LOGGING */
11728 #endif /* DHD_FW_COREDUMP */
11729 }
11730 #endif /* DHD_MAP_LOGGING */
11731