xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_msgbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 2020, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *
23  * <<Broadcom-WL-IPTag/Open:>>
24  *
25  * $Id$
26  */
27 
28 /** XXX Twiki: [PCIeFullDongleArchitecture] */
29 
30 #include <typedefs.h>
31 #include <osl.h>
32 
33 #include <bcmutils.h>
34 #include <bcmmsgbuf.h>
35 #include <bcmendian.h>
36 #include <bcmstdlib_s.h>
37 
38 #include <dngl_stats.h>
39 #include <dhd.h>
40 #include <dhd_proto.h>
41 
42 #ifdef BCMDBUS
43 #include <dbus.h>
44 #else
45 #include <dhd_bus.h>
46 #endif /* BCMDBUS */
47 
48 #include <dhd_dbg.h>
49 #include <siutils.h>
50 #include <dhd_debug.h>
51 #ifdef EXT_STA
52 #include <wlc_cfg.h>
53 #include <wlc_pub.h>
54 #include <wl_port_if.h>
55 #endif /* EXT_STA */
56 
57 #include <dhd_flowring.h>
58 
59 #include <pcie_core.h>
60 #include <bcmpcie.h>
61 #include <dhd_pcie.h>
62 #ifdef DHD_TIMESYNC
63 #include <dhd_timesync.h>
64 #endif /* DHD_TIMESYNC */
65 #ifdef DHD_PKTTS
66 #include <bcmudp.h>
67 #include <bcmtcp.h>
68 #endif /* DHD_PKTTS */
69 #include <dhd_config.h>
70 
71 #if defined(DHD_LB)
72 #if !defined(LINUX) && !defined(linux) && !defined(OEM_ANDROID)
73 #error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID"
74 #endif /* !LINUX && !OEM_ANDROID */
75 #include <linux/cpu.h>
76 #include <bcm_ring.h>
77 #define DHD_LB_WORKQ_SZ			    (8192)
78 #define DHD_LB_WORKQ_SYNC           (16)
79 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
80 #endif /* DHD_LB */
81 
82 #include <etd.h>
83 #include <hnd_debug.h>
84 #include <bcmtlv.h>
85 #include <hnd_armtrap.h>
86 #include <dnglevent.h>
87 
88 #ifdef DHD_PKT_LOGGING
89 #include <dhd_pktlog.h>
90 #include <dhd_linux_pktdump.h>
91 #endif /* DHD_PKT_LOGGING */
92 #ifdef DHD_EWPR_VER2
93 #include <dhd_bitpack.h>
94 #endif /* DHD_EWPR_VER2 */
95 
96 extern char dhd_version[];
97 extern char fw_version[];
98 
99 /**
100  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
101  * address where a value must be written. Host may also interrupt coalescing
102  * on this soft doorbell.
103  * Use Case: Hosts with network processors, may register with the dongle the
104  * network processor's thread wakeup register and a value corresponding to the
105  * core/thread context. Dongle will issue a write transaction <address,value>
106  * to the PCIE RC which will need to be routed to the mapped register space, by
107  * the host.
108  */
109 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
110 
111 /* Dependency Check */
112 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
113 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
114 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
115 
116 #define RETRIES 2		/* # of retries to retrieve matching ioctl response */
117 
118 #if defined(DHD_HTPUT_TUNABLES)
119 #define DEFAULT_RX_BUFFERS_TO_POST		1024
120 #define RX_BUF_BURST				64 /* Rx buffers for MSDU Data */
121 #define RXBUFPOST_THRESHOLD			64 /* Rxbuf post threshold */
122 #else
123 #define DEFAULT_RX_BUFFERS_TO_POST		256
124 #define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
125 #define RXBUFPOST_THRESHOLD			32 /* Rxbuf post threshold */
126 #endif /* DHD_HTPUT_TUNABLES */
127 
128 /* Read index update Magic sequence */
129 #define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC	0xDDDDDDDDAu
130 #define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xDD000000 | (ring->idx << 16u) | ring->rd)
131 /* Write index update Magic sequence */
132 #define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xFF000000 | (ring->idx << 16u) | ring->wr)
133 #define DHD_AGGR_H2D_DB_MAGIC	0xFFFFFFFAu
134 
135 #define DHD_STOP_QUEUE_THRESHOLD	200
136 #define DHD_START_QUEUE_THRESHOLD	100
137 
138 #define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
139 #define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
140 
141 /* flags for ioctl pending status */
142 #define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
143 #define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
144 
145 #define DHD_IOCTL_REQ_PKTBUFSZ		2048
146 #define MSGBUF_IOCTL_MAX_RQSTLEN	(DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
147 
148 /**
149  * XXX: DMA_ALIGN_LEN use is overloaded:
150  * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
151  * - in ensuring that a buffer's va is 4 Byte aligned
152  * - in rounding up a buffer length to 4 Bytes.
153  */
154 #define DMA_ALIGN_LEN		4
155 
156 #define DMA_D2H_SCRATCH_BUF_LEN	8
157 #define DMA_XFER_LEN_LIMIT	0x400000
158 
159 #ifdef BCM_HOST_BUF
160 #ifndef DMA_HOST_BUFFER_LEN
161 #define DMA_HOST_BUFFER_LEN	0x200000
162 #endif
163 #endif /* BCM_HOST_BUF */
164 
165 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
166 
167 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
168 #define DHD_FLOWRING_MAX_EVENTBUF_POST			32
169 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
170 #define DHD_H2D_INFORING_MAX_BUF_POST			32
171 #ifdef BTLOG
172 #define DHD_H2D_BTLOGRING_MAX_BUF_POST			32
173 #endif	/* BTLOG */
174 #define DHD_MAX_TSBUF_POST			8
175 
176 #define DHD_PROT_FUNCS	43
177 
178 /* Length of buffer in host for bus throughput measurement */
179 #define DHD_BUS_TPUT_BUF_LEN 2048
180 
181 #define TXP_FLUSH_NITEMS
182 
183 /* optimization to write "n" tx items at a time to ring */
184 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
185 
186 #define RING_NAME_MAX_LENGTH		24
187 #define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
188 /* Giving room before ioctl_trans_id rollsover. */
189 #define BUFFER_BEFORE_ROLLOVER 300
190 
191 /* 512K memory + 32K registers */
192 #define SNAPSHOT_UPLOAD_BUF_SIZE	((512 + 32) * 1024)
193 
194 struct msgbuf_ring; /* ring context for common and flow rings */
195 
196 #ifdef DHD_HMAPTEST
197 /* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */
198 #define HMAP_SANDBOX_BUFFER_LEN	(DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */
199 /**
200  * for D11 DMA HMAPTEST thes states are as follows
201  * iovar sets ACTIVE state
202  * next TXPOST / RXPOST sets POSTED state
203  * on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE
204  * This ensures that on an iovar only one buffer is replaced from sandbox area
205  */
206 #define HMAPTEST_D11_TX_INACTIVE 0
207 #define HMAPTEST_D11_TX_ACTIVE 1
208 #define HMAPTEST_D11_TX_POSTED 2
209 
210 #define HMAPTEST_D11_RX_INACTIVE 0
211 #define HMAPTEST_D11_RX_ACTIVE 1
212 #define HMAPTEST_D11_RX_POSTED 2
213 #endif /* DHD_HMAPTEST */
214 
215 #define PCIE_DMA_LOOPBACK	0
216 #define D11_DMA_LOOPBACK	1
217 #define BMC_DMA_LOOPBACK	2
218 
219 /**
220  * PCIE D2H DMA Complete Sync Modes
221  *
222  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
223  * Host system memory. A WAR using one of 3 approaches is needed:
224  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
225  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
226  *    writes in the last word of each work item. Each work item has a seqnum
227  *    number = sequence num % 253.
228  *
229  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
230  *    interrupt, ensuring that D2H data transfer indeed completed.
231  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
232  *    ring contents before the indices.
233  *
234  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
235  * callback (see dhd_prot_d2h_sync_none) may be bound.
236  *
237  * Dongle advertizes host side sync mechanism requirements.
238  */
239 
240 #define PCIE_D2H_SYNC_WAIT_TRIES    (512U)
241 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5U)
242 #define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
243 
244 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
245 #define DHD_MSGBUF_INFO DHD_TRACE
246 #else
247 #define DHD_MSGBUF_INFO DHD_INFO
248 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
249 
250 /**
251  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
252  *
253  * On success: return cmn_msg_hdr_t::msg_type
254  * On failure: return 0 (invalid msg_type)
255  */
256 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
257                                 volatile cmn_msg_hdr_t *msg, int msglen);
258 
259 /**
260  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
261  * For EDL messages.
262  *
263  * On success: return cmn_msg_hdr_t::msg_type
264  * On failure: return 0 (invalid msg_type)
265  */
266 #ifdef EWP_EDL
267 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
268                                 volatile cmn_msg_hdr_t *msg);
269 #endif /* EWP_EDL */
270 
271 /*
272  * +----------------------------------------------------------------------------
273  *
274  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
275  * flowids do not.
276  *
277  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
278  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
279  *
280  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
281  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
282  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
283  *
284  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
285  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
286  *
287  *  D2H Control  Complete RingId = 2
288  *  D2H Transmit Complete RingId = 3
289  *  D2H Receive  Complete RingId = 4
290  *
291  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
292  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
293  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
294  *
295  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
296  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
297  *
298  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
299  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
300  * FlowId values would be in the range [2..133] and the corresponding
301  * RingId values would be in the range [5..136].
302  *
303  * The flowId allocator, may chose to, allocate Flowids:
304  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
305  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
306  *   packet's access category (e.g. 4 uc flowids per station).
307  *
308  * CAUTION:
309  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
310  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
311  * since the FlowId truly represents the index in the H2D DMA indices array.
312  *
313  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
314  * will represent the index in the D2H DMA indices array.
315  *
316  * +----------------------------------------------------------------------------
317  */
318 
319 /* First TxPost Flowring Id */
320 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
321 
322 /* Determine whether a ringid belongs to a TxPost flowring */
323 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
324 	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
325 	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
326 
327 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
328 #define DHD_FLOWID_TO_RINGID(flowid) \
329 	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
330 
331 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
332 #define DHD_RINGID_TO_FLOWID(ringid) \
333 	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
334 
335 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
336  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
337  * any array of H2D rings.
338  */
339 #define DHD_H2D_RING_OFFSET(ringid) \
340 	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
341 
342 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
343  * This may be used for IFRM.
344  */
345 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
346 	((ringid) - BCMPCIE_COMMON_MSGRINGS)
347 
348 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
349  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
350  * any array of D2H rings.
351  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
352  * max_h2d_rings: total number of h2d rings
353  */
354 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
355 	((ringid) > (max_h2d_rings) ? \
356 		((ringid) - max_h2d_rings) : \
357 		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
358 
359 /* Convert a D2H DMA Indices Offset to a RingId */
360 #define DHD_D2H_RINGID(offset) \
361 	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
362 
363 /* XXX: The ringid and flowid and dma indices array index idiosyncracy is error
364  * prone. While a simplification is possible, the backward compatability
365  * requirement (DHD should operate with any PCIE rev version of firmware),
366  * limits what may be accomplished.
367  *
368  * At the minimum, implementation should use macros for any conversions
369  * facilitating introduction of future PCIE FD revs that need more "common" or
370  * other dynamic rings.
371  */
372 
373 /* XXX: Presently there is no need for maintaining both a dmah and a secdmah */
374 #define DHD_DMAH_NULL      ((void*)NULL)
375 
376 /*
377  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
378  * buffer does not occupy the entire cacheline, and another object is placed
379  * following the DMA-able buffer, data corruption may occur if the DMA-able
380  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
381  * is not available.
382  */
383 #if defined(L1_CACHE_BYTES)
384 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
385 #else
386 #define DHD_DMA_PAD        (128)
387 #endif
388 
389 /*
390  * +----------------------------------------------------------------------------
391  * Flowring Pool
392  *
393  * Unlike common rings, which are attached very early on (dhd_prot_attach),
394  * flowrings are dynamically instantiated. Moreover, flowrings may require a
395  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
396  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
397  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
398  *
399  * Each DMA-able buffer may be allocated independently, or may be carved out
400  * of a single large contiguous region that is registered with the protocol
401  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
402  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
403  *
404  * No flowring pool action is performed in dhd_prot_attach(), as the number
405  * of h2d rings is not yet known.
406  *
407  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
408  * determine the number of flowrings required, and a pool of msgbuf_rings are
409  * allocated and a DMA-able buffer (carved or allocated) is attached.
410  * See: dhd_prot_flowrings_pool_attach()
411  *
412  * A flowring msgbuf_ring object may be fetched from this pool during flowring
413  * creation, using the flowid. Likewise, flowrings may be freed back into the
414  * pool on flowring deletion.
415  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
416  *
417  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
418  * are detached (returned back to the carved region or freed), and the pool of
419  * msgbuf_ring and any objects allocated against it are freed.
420  * See: dhd_prot_flowrings_pool_detach()
421  *
422  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
423  * state as-if upon an attach. All DMA-able buffers are retained.
424  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
425  * pool attach will notice that the pool persists and continue to use it. This
426  * will avoid the case of a fragmented DMA-able region.
427  *
428  * +----------------------------------------------------------------------------
429  */
430 
431 /* Conversion of a flowid to a flowring pool index */
432 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
433 	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
434 
435 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
436 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
437 	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
438 	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
439 
440 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
441 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
442 	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
443 		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
444 		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
445 		 (ring)++, (flowid)++)
446 
447 /* Used in loopback tests */
448 typedef struct dhd_dmaxfer {
449 	dhd_dma_buf_t srcmem;
450 	dhd_dma_buf_t dstmem;
451 	uint32        srcdelay;
452 	uint32        destdelay;
453 	uint32        len;
454 	bool          in_progress;
455 	uint64        start_usec;
456 	uint64        time_taken;
457 	uint32        d11_lpbk;
458 	int           status;
459 } dhd_dmaxfer_t;
460 
461 #ifdef DHD_HMAPTEST
462 /* Used in HMAP test */
463 typedef struct dhd_hmaptest {
464 	dhd_dma_buf_t	mem;
465 	uint32		len;
466 	bool	in_progress;
467 	uint32	is_write;
468 	uint32	accesstype;
469 	uint64  start_usec;
470 	uint32	offset;
471 } dhd_hmaptest_t;
472 #endif /* DHD_HMAPTEST */
473 /**
474  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
475  * buffer, the WR and RD indices, ring parameters such as max number of items
476  * an length of each items, and other miscellaneous runtime state.
477  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
478  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
479  * Ring parameters are conveyed to the dongle, which maintains its own peer end
480  * ring state. Depending on whether the DMA Indices feature is supported, the
481  * host will update the WR/RD index in the DMA indices array in host memory or
482  * directly in dongle memory.
483  */
484 typedef struct msgbuf_ring {
485 	bool           inited;
486 	uint16         idx;       /* ring id */
487 	uint16         rd;        /* read index */
488 	uint16         curr_rd;   /* read index for debug */
489 	uint16         wr;        /* write index */
490 	uint16         max_items; /* maximum number of items in ring */
491 	uint16         item_len;  /* length of each item in the ring */
492 	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
493 	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
494 	uint32         seqnum;    /* next expected item's sequence number */
495 #ifdef TXP_FLUSH_NITEMS
496 	void           *start_addr;
497 	/* # of messages on ring not yet announced to dongle */
498 	uint16         pend_items_count;
499 #ifdef AGG_H2D_DB
500 	osl_atomic_t	inflight;
501 #endif /* AGG_H2D_DB */
502 #endif /* TXP_FLUSH_NITEMS */
503 
504 	uint8   ring_type;
505 	uint8   n_completion_ids;
506 	bool    create_pending;
507 	uint16  create_req_id;
508 	uint8   current_phase;
509 	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
510 	uchar		name[RING_NAME_MAX_LENGTH];
511 	uint32		ring_mem_allocated;
512 	void	*ring_lock;
513 } msgbuf_ring_t;
514 
515 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
516 #define DHD_RING_END_VA(ring) \
517 	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
518 	 (((ring)->max_items - 1) * (ring)->item_len))
519 
520 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
521 #define MAX_IOCTL_TRACE_SIZE    50
522 #define MAX_IOCTL_BUF_SIZE		64
523 typedef struct _dhd_ioctl_trace_t {
524 	uint32	cmd;
525 	uint16	transid;
526 	char	ioctl_buf[MAX_IOCTL_BUF_SIZE];
527 	uint64	timestamp;
528 } dhd_ioctl_trace_t;
529 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
530 
531 #ifdef DHD_PKTTS
532 struct pktts_fwtx_v1 {
533 	uint32 ts[PKTTS_MAX_FWTX];
534 };
535 
536 struct pktts_fwtx_v2 {
537 	uint32 ts[PKTTS_MAX_FWTX];
538 	uint32 ut[PKTTS_MAX_UCTX];
539 	uint32 uc[PKTTS_MAX_UCCNT];
540 };
541 
542 static void dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhd, void *pkt,
543 	void *fw_ts, uint16 version);
544 static void dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhd, void *pkt,
545 	uint fwr1, uint fwr2);
546 #endif /* DHD_PKTTS */
547 
548 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
549 /** D2H WLAN Rx Packet Chaining context */
550 typedef struct rxchain_info {
551 	uint		pkt_count;
552 	uint		ifidx;
553 	void		*pkthead;
554 	void		*pkttail;
555 	uint8		*h_da;	/* pointer to da of chain head */
556 	uint8		*h_sa;	/* pointer to sa of chain head */
557 	uint8		h_prio; /* prio of chain head */
558 } rxchain_info_t;
559 #endif /* BCM_ROUTER_DHD && HNDCTF */
560 
561 /* This can be overwritten by module parameter defined in dhd_linux.c
562  * or by dhd iovar h2d_max_txpost.
563  */
564 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
565 #if defined(DHD_HTPUT_TUNABLES)
566 int h2d_htput_max_txpost = H2DRING_HTPUT_TXPOST_MAX_ITEM;
567 #endif /* DHD_HTPUT_TUNABLES */
568 
569 #ifdef AGG_H2D_DB
570 bool agg_h2d_db_enab = TRUE;
571 
572 #define AGG_H2D_DB_TIMEOUT_USEC		(1000u)	/* 1 msec */
573 uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC;
574 
575 #ifndef AGG_H2D_DB_INFLIGHT_THRESH
576 /* Keep inflight threshold same as txp_threshold */
577 #define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
578 #endif /* !AGG_H2D_DB_INFLIGHT_THRESH */
579 
580 uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH;
581 
582 #define DHD_NUM_INFLIGHT_HISTO_ROWS (14u)
583 #define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS)
584 
585 typedef struct _agg_h2d_db_info {
586 	void *dhd;
587 	struct hrtimer timer;
588 	bool init;
589 	uint32 direct_db_cnt;
590 	uint32 timer_db_cnt;
591 	uint64  *inflight_histo;
592 } agg_h2d_db_info_t;
593 #endif /* AGG_H2D_DB */
594 
595 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
596 typedef struct dhd_prot {
597 	osl_t *osh;		/* OSL handle */
598 	uint16 rxbufpost_sz;
599 	uint16 rxbufpost;
600 	uint16 max_rxbufpost;
601 	uint32 tot_rxbufpost;
602 	uint32 tot_rxcpl;
603 	uint16 max_eventbufpost;
604 	uint16 max_ioctlrespbufpost;
605 	uint16 max_tsbufpost;
606 	uint16 max_infobufpost;
607 	uint16 infobufpost;
608 	uint16 cur_event_bufs_posted;
609 	uint16 cur_ioctlresp_bufs_posted;
610 	uint16 cur_ts_bufs_posted;
611 
612 	/* Flow control mechanism based on active transmits pending */
613 	osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
614 	uint16 h2d_max_txpost;
615 #if defined(DHD_HTPUT_TUNABLES)
616 	uint16 h2d_htput_max_txpost;
617 #endif /* DHD_HTPUT_TUNABLES */
618 	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
619 
620 	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
621 	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
622 	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
623 	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
624 	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
625 	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
626 	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
627 	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
628 	msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
629 
630 	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
631 	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
632 	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
633 
634 	uint32		rx_dataoffset;
635 
636 	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
637 	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
638 
639 	/* ioctl related resources */
640 	uint8 ioctl_state;
641 	int16 ioctl_status;		/* status returned from dongle */
642 	uint16 ioctl_resplen;
643 	dhd_ioctl_recieved_status_t ioctl_received;
644 	uint curr_ioctl_cmd;
645 	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
646 	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
647 
648 	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
649 
650 	/* DMA-able arrays for holding WR and RD indices */
651 	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
652 	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
653 	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
654 	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
655 	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
656 	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
657 
658 	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
659 
660 	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
661 #ifdef DHD_DMA_INDICES_SEQNUM
662 	char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */
663 	char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */
664 	uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */
665 	uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */
666 	uint32 host_seqnum;	/* Seqence number for D2H DMA Indices sync */
667 #endif /* DHD_DMA_INDICES_SEQNUM */
668 	uint32			flowring_num;
669 
670 	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
671 #ifdef EWP_EDL
672 	d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
673 #endif /* EWP_EDL */
674 	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
675 	ulong d2h_sync_wait_tot; /* total wait loops */
676 
677 	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
678 
679 	uint16		ioctl_seq_no;
680 	uint16		data_seq_no;  /* XXX this field is obsolete */
681 	uint16		ioctl_trans_id;
682 	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
683 	void		*pktid_rx_map;	/* pktid map for rx path */
684 	void		*pktid_tx_map;	/* pktid map for tx path */
685 	bool		metadata_dbg;
686 	void		*pktid_map_handle_ioctl;
687 #ifdef DHD_MAP_PKTID_LOGGING
688 	void		*pktid_dma_map;	/* pktid map for DMA MAP */
689 	void		*pktid_dma_unmap; /* pktid map for DMA UNMAP */
690 #endif /* DHD_MAP_PKTID_LOGGING */
691 	uint32		pktid_depleted_cnt;	/* pktid depleted count */
692 	/* netif tx queue stop count */
693 	uint8		pktid_txq_stop_cnt;
694 	/* netif tx queue start count */
695 	uint8		pktid_txq_start_cnt;
696 	uint64		ioctl_fillup_time;	/* timestamp for ioctl fillup */
697 	uint64		ioctl_ack_time;		/* timestamp for ioctl ack */
698 	uint64		ioctl_cmplt_time;	/* timestamp for ioctl completion */
699 
700 	/* Applications/utilities can read tx and rx metadata using IOVARs */
701 	uint16		rx_metadata_offset;
702 	uint16		tx_metadata_offset;
703 
704 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
705 	rxchain_info_t	rxchain;	/* chain of rx packets */
706 #endif
707 
708 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
709 	/* Host's soft doorbell configuration */
710 	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
711 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
712 
713 	/* Work Queues to be used by the producer and the consumer, and threshold
714 	 * when the WRITE index must be synced to consumer's workq
715 	 */
716 	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
717 
718 	uint32  host_ipc_version; /* Host sypported IPC rev */
719 	uint32  device_ipc_version; /* FW supported IPC rev */
720 	uint32  active_ipc_version; /* Host advertised IPC rev */
721 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
722 	dhd_ioctl_trace_t	ioctl_trace[MAX_IOCTL_TRACE_SIZE];
723 	uint32				ioctl_trace_count;
724 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
725 	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
726 	bool    hostts_req_buf_inuse;
727 	bool    rx_ts_log_enabled;
728 	bool    tx_ts_log_enabled;
729 #ifdef BTLOG
730 	msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */
731 	msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */
732 	uint16 btlogbufpost;
733 	uint16 max_btlogbufpost;
734 #endif	/* BTLOG */
735 #ifdef DHD_HMAPTEST
736 	uint32 hmaptest_rx_active;
737 	uint32 hmaptest_rx_pktid;
738 	char *hmap_rx_buf_va;
739 	dmaaddr_t hmap_rx_buf_pa;
740 	uint32 hmap_rx_buf_len;
741 
742 	uint32 hmaptest_tx_active;
743 	uint32 hmaptest_tx_pktid;
744 	char *hmap_tx_buf_va;
745 	dmaaddr_t hmap_tx_buf_pa;
746 	uint32	  hmap_tx_buf_len;
747 	dhd_hmaptest_t	hmaptest; /* for hmaptest */
748 	bool hmap_enabled; /* TRUE = hmap is enabled */
749 #endif /* DHD_HMAPTEST */
750 #ifdef SNAPSHOT_UPLOAD
751 	dhd_dma_buf_t snapshot_upload_buf;	/* snapshot upload buffer */
752 	uint32 snapshot_upload_len;		/* snapshot uploaded len */
753 	uint8 snapshot_type;			/* snaphot uploaded type */
754 	bool snapshot_cmpl_pending;		/* snapshot completion pending */
755 #endif	/* SNAPSHOT_UPLOAD */
756 	bool no_retry;
757 	bool no_aggr;
758 	bool fixed_rate;
759 	dhd_dma_buf_t	host_scb_buf; /* scb host offload buffer */
760 #ifdef DHD_HP2P
761 	msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
762 	msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
763 #endif /* DHD_HP2P */
764 	bool no_tx_resource;
765 	uint32 txcpl_db_cnt;
766 #ifdef AGG_H2D_DB
767 	agg_h2d_db_info_t agg_h2d_db_info;
768 #endif /* AGG_H2D_DB */
769 	uint64 tx_h2d_db_cnt;
770 } dhd_prot_t;
771 
772 #ifdef DHD_EWPR_VER2
773 #define HANG_INFO_BASE64_BUFFER_SIZE 640
774 #endif
775 
776 #ifdef DHD_DUMP_PCIE_RINGS
777 static
778 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
779 	const void *user_buf, unsigned long *file_posn);
780 #ifdef EWP_EDL
781 static
782 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
783 	unsigned long *file_posn);
784 #endif /* EWP_EDL */
785 #endif /* DHD_DUMP_PCIE_RINGS */
786 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
787 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
788 /* Convert a dmaaddr_t to a base_addr with htol operations */
789 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
790 
791 /* APIs for managing a DMA-able buffer */
792 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
793 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
794 
795 /* msgbuf ring management */
796 static int dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot);
797 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
798 	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
799 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
800 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
801 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
802 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
803 
804 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
805 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
806 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
807 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
808 
809 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
810 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
811 	uint16 flowid);
812 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
813 
814 /* Producer: Allocate space in a msgbuf ring */
815 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
816 	uint16 nitems, uint16 *alloced, bool exactly_nitems);
817 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
818 	uint16 *alloced, bool exactly_nitems);
819 
820 /* Consumer: Determine the location where the next message may be consumed */
821 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
822 	uint32 *available_len);
823 
824 /* Producer (WR index update) or Consumer (RD index update) indication */
825 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
826 	void *p, uint16 len);
827 
828 #ifdef AGG_H2D_DB
829 static void dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring,
830 		void* p, uint16 len);
831 static void dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db);
832 static void dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid);
833 #endif /* AGG_H2D_DB */
834 static void dhd_prot_ring_doorbell(dhd_pub_t *dhd, uint32 value);
835 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
836 
837 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
838 	dhd_dma_buf_t *dma_buf, uint32 bufsz);
839 
840 /* Set/Get a RD or WR index in the array of indices */
841 /* See also: dhd_prot_dma_indx_init() */
842 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
843 	uint16 ringid);
844 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
845 
846 /* Locate a packet given a pktid */
847 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
848 	bool free_pktid);
849 /* Locate a packet given a PktId and free it. */
850 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
851 
852 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
853 	void *buf, uint len, uint8 action);
854 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
855 	void *buf, uint len, uint8 action);
856 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
857 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
858 	void *buf, int ifidx);
859 
860 /* Post buffers for Rx, control ioctl response and events */
861 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
862 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
863 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
864 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
865 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
866 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
867 
868 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt);
869 
870 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
871 static void dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len);
872 static void dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf);
873 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
874 
875 /* D2H Message handling */
876 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
877 
878 /* D2H Message handlers */
879 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
880 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
881 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
882 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
883 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
884 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
885 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
886 
887 /* Loopback test with dongle */
888 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
889 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
890 	uint destdelay, dhd_dmaxfer_t *dma);
891 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
892 
893 /* Flowring management communication with dongle */
894 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
895 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
896 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
897 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
898 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
899 
900 /* Monitor Mode */
901 #ifdef WL_MONITOR
902 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
903 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
904 #endif /* WL_MONITOR */
905 
906 /* Configure a soft doorbell per D2H ring */
907 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
908 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
909 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
910 #if !defined(BCM_ROUTER_DHD)
911 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
912 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
913 #endif /* !BCM_ROUTER_DHD */
914 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
915 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
916 #ifdef BTLOG
917 static void dhd_prot_process_btlog_complete(dhd_pub_t *dhd, void* buf);
918 static void dhd_prot_detach_btlog_rings(dhd_pub_t *dhd);
919 #endif	/* BTLOG */
920 #ifdef DHD_HP2P
921 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
922 #endif /* DHD_HP2P */
923 #ifdef EWP_EDL
924 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
925 #endif
926 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
927 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
928 
929 #ifdef DHD_TIMESYNC
930 extern void dhd_parse_proto(uint8 *pktdata, dhd_pkt_parse_t *parse);
931 #endif
932 
933 #ifdef DHD_FLOW_RING_STATUS_TRACE
934 void dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
935 void dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
936 #endif /* DHD_FLOW_RING_STATUS_TRACE */
937 
938 #ifdef DHD_TX_PROFILE
939 extern bool dhd_protocol_matches_profile(uint8 *p, int plen, const
940 		dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
941 #endif /* defined(DHD_TX_PROFILE) */
942 
943 #ifdef DHD_HP2P
944 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
945 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
946 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
947 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
948 #endif
949 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
950 
951 /** callback functions for messages generated by the dongle */
952 #define MSG_TYPE_INVALID 0
953 
954 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
955 	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
956 	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
957 	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
958 	NULL,
959 	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
960 	NULL,
961 	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
962 	NULL,
963 	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
964 	NULL,
965 	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
966 	NULL,
967 	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
968 	NULL,
969 	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
970 	NULL,
971 	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
972 	NULL,
973 	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
974 	NULL,
975 	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
976 	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
977 	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
978 	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
979 	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
980 	NULL, /* MSG_TYPE_INFO_BUF_POST */
981 #if defined(BCM_ROUTER_DHD)
982 	NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
983 #else
984 	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
985 #endif /* BCM_ROUTER_DHD */
986 	NULL, /* MSG_TYPE_H2D_RING_CREATE */
987 	NULL, /* MSG_TYPE_D2H_RING_CREATE */
988 #if defined(BCM_ROUTER_DHD)
989 	NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
990 #else
991 	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
992 #endif /* BCM_ROUTER_DHD */
993 	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
994 	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
995 	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
996 	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
997 	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
998 	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
999 	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
1000 	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
1001 	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
1002 	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
1003 	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
1004 	NULL,	/* MSG_TYPE_SNAPSHOT_UPLOAD */
1005 	dhd_prot_process_snapshot_complete,	/* MSG_TYPE_SNAPSHOT_CMPLT */
1006 };
1007 
1008 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
1009 /* Related to router CPU mapping per radio core */
1010 #define DHD_RX_CHAINING
1011 #endif /* BCM_ROUTER_DHD && HNDCTF */
1012 
1013 #ifdef DHD_RX_CHAINING
1014 
1015 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
1016 	(dhd_wet_chainable(dhd) && \
1017 	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
1018 	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
1019 	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
1020 	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
1021 	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
1022 	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
1023 	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
1024 	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
1025 
1026 static INLINE void dhd_rxchain_reset(rxchain_info_t *rxchain);
1027 static void dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
1028 static void dhd_rxchain_commit(dhd_pub_t *dhd);
1029 
1030 #define DHD_PKT_CTF_MAX_CHAIN_LEN	64
1031 
1032 #endif /* DHD_RX_CHAINING */
1033 
1034 #ifdef DHD_EFI
1035 #define DHD_LPBKDTDUMP_ON()	(1)
1036 #else
1037 #define DHD_LPBKDTDUMP_ON()	(dhd_msg_level & DHD_LPBKDTDUMP_VAL)
1038 #endif
1039 
1040 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
1041 
1042 #ifdef D2H_MINIDUMP
1043 dhd_dma_buf_t *
dhd_prot_get_minidump_buf(dhd_pub_t * dhd)1044 dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
1045 {
1046 	return &dhd->prot->fw_trap_buf;
1047 }
1048 #endif /* D2H_MINIDUMP */
1049 
1050 uint16
dhd_prot_get_rxbufpost_sz(dhd_pub_t * dhd)1051 dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd)
1052 {
1053 	return dhd->prot->rxbufpost_sz;
1054 }
1055 
1056 uint16
dhd_prot_get_h2d_rx_post_active(dhd_pub_t * dhd)1057 dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd)
1058 {
1059 	dhd_prot_t *prot = dhd->prot;
1060 	msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn;
1061 	uint16 rd, wr;
1062 
1063 	/* Since wr is owned by host in h2d direction, directly read wr */
1064 	wr = flow_ring->wr;
1065 
1066 	if (dhd->dma_d2h_ring_upd_support) {
1067 		rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
1068 	} else {
1069 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1070 	}
1071 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1072 }
1073 
1074 uint16
dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t * dhd)1075 dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd)
1076 {
1077 	dhd_prot_t *prot = dhd->prot;
1078 	msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln;
1079 	uint16 rd, wr;
1080 
1081 	if (dhd->dma_d2h_ring_upd_support) {
1082 		wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
1083 	} else {
1084 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1085 	}
1086 
1087 	/* Since rd is owned by host in d2h direction, directly read rd */
1088 	rd = flow_ring->rd;
1089 
1090 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1091 }
1092 
1093 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)1094 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
1095 {
1096 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
1097 	uint16 rd, wr;
1098 	bool ret;
1099 
1100 	if (dhd->dma_d2h_ring_upd_support) {
1101 		wr = flow_ring->wr;
1102 	} else {
1103 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1104 	}
1105 	if (dhd->dma_h2d_ring_upd_support) {
1106 		rd = flow_ring->rd;
1107 	} else {
1108 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1109 	}
1110 	ret = (wr == rd) ? TRUE : FALSE;
1111 	return ret;
1112 }
1113 
1114 void
dhd_prot_dump_ring_ptrs(void * prot_info)1115 dhd_prot_dump_ring_ptrs(void *prot_info)
1116 {
1117 	msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
1118 	DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
1119 		ring->curr_rd, ring->rd, ring->wr));
1120 }
1121 
1122 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)1123 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
1124 {
1125 	return (uint16)h2d_max_txpost;
1126 }
1127 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)1128 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
1129 {
1130 	h2d_max_txpost = max_txpost;
1131 }
1132 #if defined(DHD_HTPUT_TUNABLES)
1133 uint16
dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t * dhd)1134 dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd)
1135 {
1136 	return (uint16)h2d_htput_max_txpost;
1137 }
1138 void
dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t * dhd,uint16 htput_max_txpost)1139 dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 htput_max_txpost)
1140 {
1141 	h2d_htput_max_txpost = htput_max_txpost;
1142 }
1143 
1144 #endif /* DHD_HTPUT_TUNABLES */
1145 /**
1146  * D2H DMA to completion callback handlers. Based on the mode advertised by the
1147  * dongle through the PCIE shared region, the appropriate callback will be
1148  * registered in the proto layer to be invoked prior to precessing any message
1149  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
1150  * does not require host participation, then a noop callback handler will be
1151  * bound that simply returns the msg_type.
1152  */
1153 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
1154                                        uint32 tries, volatile uchar *msg, int msglen);
1155 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1156                                       volatile cmn_msg_hdr_t *msg, int msglen);
1157 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1158                                        volatile cmn_msg_hdr_t *msg, int msglen);
1159 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1160                                     volatile cmn_msg_hdr_t *msg, int msglen);
1161 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
1162 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1163 	uint16 ring_type, uint32 id);
1164 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1165 	uint8 type, uint32 id);
1166 
1167 /**
1168  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
1169  * not completed, a livelock condition occurs. Host will avert this livelock by
1170  * dropping this message and moving to the next. This dropped message can lead
1171  * to a packet leak, or even something disastrous in the case the dropped
1172  * message happens to be a control response.
1173  * Here we will log this condition. One may choose to reboot the dongle.
1174  *
1175  */
1176 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)1177 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
1178                            volatile uchar *msg, int msglen)
1179 {
1180 	uint32 ring_seqnum = ring->seqnum;
1181 
1182 	if (dhd_query_bus_erros(dhd)) {
1183 		return;
1184 	}
1185 
1186 	DHD_ERROR((
1187 		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
1188 		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
1189 		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
1190 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
1191 		ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
1192 
1193 	dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
1194 
1195 	/* Try to resume if already suspended or suspend in progress */
1196 #ifdef DHD_PCIE_RUNTIMEPM
1197 	dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
1198 #endif /* DHD_PCIE_RUNTIMEPM */
1199 
1200 	/* Skip if still in suspended or suspend in progress */
1201 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
1202 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
1203 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
1204 		goto exit;
1205 	}
1206 
1207 	dhd_bus_dump_console_buffer(dhd->bus);
1208 	dhd_prot_debug_info_print(dhd);
1209 
1210 #ifdef DHD_FW_COREDUMP
1211 	if (dhd->memdump_enabled) {
1212 		/* collect core dump */
1213 		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
1214 		dhd_bus_mem_dump(dhd);
1215 	}
1216 #endif /* DHD_FW_COREDUMP */
1217 
1218 exit:
1219 	dhd_schedule_reset(dhd);
1220 
1221 #ifdef OEM_ANDROID
1222 #ifdef SUPPORT_LINKDOWN_RECOVERY
1223 #ifdef CONFIG_ARCH_MSM
1224 	dhd->bus->no_cfg_restore = 1;
1225 #endif /* CONFIG_ARCH_MSM */
1226 	/* XXX Trigger HANG event for recovery */
1227 	dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
1228 	dhd_os_send_hang_message(dhd);
1229 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1230 #endif /* OEM_ANDROID */
1231 	dhd->livelock_occured = TRUE;
1232 }
1233 
1234 /**
1235  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
1236  * mode. Sequence number is always in the last word of a message.
1237  */
1238 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_seqnum)1239 BCMFASTPATH(dhd_prot_d2h_sync_seqnum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1240                          volatile cmn_msg_hdr_t *msg, int msglen)
1241 {
1242 	uint32 tries;
1243 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1244 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1245 	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
1246 	dhd_prot_t *prot = dhd->prot;
1247 	uint32 msg_seqnum;
1248 	uint32 step = 0;
1249 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1250 	uint32 total_tries = 0;
1251 
1252 	ASSERT(msglen == ring->item_len);
1253 
1254 	BCM_REFERENCE(delay);
1255 	/*
1256 	 * For retries we have to make some sort of stepper algorithm.
1257 	 * We see that every time when the Dongle comes out of the D3
1258 	 * Cold state, the first D2H mem2mem DMA takes more time to
1259 	 * complete, leading to livelock issues.
1260 	 *
1261 	 * Case 1 - Apart from Host CPU some other bus master is
1262 	 * accessing the DDR port, probably page close to the ring
1263 	 * so, PCIE does not get a change to update the memory.
1264 	 * Solution - Increase the number of tries.
1265 	 *
1266 	 * Case 2 - The 50usec delay given by the Host CPU is not
1267 	 * sufficient for the PCIe RC to start its work.
1268 	 * In this case the breathing time of 50usec given by
1269 	 * the Host CPU is not sufficient.
1270 	 * Solution: Increase the delay in a stepper fashion.
1271 	 * This is done to ensure that there are no
1272 	 * unwanted extra delay introdcued in normal conditions.
1273 	 */
1274 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1275 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1276 			msg_seqnum = *marker;
1277 			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
1278 				ring->seqnum++; /* next expected sequence number */
1279 				/* Check for LIVELOCK induce flag, which is set by firing
1280 				 * dhd iovar to induce LIVELOCK error. If flag is set,
1281 				 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1282 				 */
1283 				if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1284 					goto dma_completed;
1285 				}
1286 			}
1287 
1288 			total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
1289 
1290 			if (total_tries > prot->d2h_sync_wait_max)
1291 				prot->d2h_sync_wait_max = total_tries;
1292 
1293 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1294 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1295 			OSL_DELAY(delay * step); /* Add stepper delay */
1296 
1297 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1298 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1299 
1300 	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
1301 		(volatile uchar *) msg, msglen);
1302 
1303 	ring->seqnum++; /* skip this message ... leak of a pktid */
1304 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1305 
1306 dma_completed:
1307 
1308 	prot->d2h_sync_wait_tot += tries;
1309 	return msg->msg_type;
1310 }
1311 
1312 /**
1313  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1314  * mode. The xorcsum is placed in the last word of a message. Dongle will also
1315  * place a seqnum in the epoch field of the cmn_msg_hdr.
1316  */
1317 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)1318 BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1319                           volatile cmn_msg_hdr_t *msg, int msglen)
1320 {
1321 	uint32 tries;
1322 	uint32 prot_checksum = 0; /* computed checksum */
1323 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1324 	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1325 	dhd_prot_t *prot = dhd->prot;
1326 	uint32 step = 0;
1327 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1328 	uint32 total_tries = 0;
1329 
1330 	ASSERT(msglen == ring->item_len);
1331 
1332 	BCM_REFERENCE(delay);
1333 	/*
1334 	 * For retries we have to make some sort of stepper algorithm.
1335 	 * We see that every time when the Dongle comes out of the D3
1336 	 * Cold state, the first D2H mem2mem DMA takes more time to
1337 	 * complete, leading to livelock issues.
1338 	 *
1339 	 * Case 1 - Apart from Host CPU some other bus master is
1340 	 * accessing the DDR port, probably page close to the ring
1341 	 * so, PCIE does not get a change to update the memory.
1342 	 * Solution - Increase the number of tries.
1343 	 *
1344 	 * Case 2 - The 50usec delay given by the Host CPU is not
1345 	 * sufficient for the PCIe RC to start its work.
1346 	 * In this case the breathing time of 50usec given by
1347 	 * the Host CPU is not sufficient.
1348 	 * Solution: Increase the delay in a stepper fashion.
1349 	 * This is done to ensure that there are no
1350 	 * unwanted extra delay introdcued in normal conditions.
1351 	 */
1352 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1353 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1354 			/* First verify if the seqnumber has been update,
1355 			 * if yes, then only check xorcsum.
1356 			 * Once seqnum and xorcsum is proper that means
1357 			 * complete message has arrived.
1358 			 */
1359 			if (msg->epoch == ring_seqnum) {
1360 				prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1361 					num_words);
1362 				if (prot_checksum == 0U) { /* checksum is OK */
1363 					ring->seqnum++; /* next expected sequence number */
1364 					/* Check for LIVELOCK induce flag, which is set by firing
1365 					 * dhd iovar to induce LIVELOCK error. If flag is set,
1366 					 * MSG_TYPE_INVALID is returned, which results in to
1367 					 * LIVELOCK error.
1368 					 */
1369 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1370 						goto dma_completed;
1371 					}
1372 				}
1373 			}
1374 
1375 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1376 
1377 			if (total_tries > prot->d2h_sync_wait_max)
1378 				prot->d2h_sync_wait_max = total_tries;
1379 
1380 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1381 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1382 			OSL_DELAY(delay * step); /* Add stepper delay */
1383 
1384 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1385 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1386 
1387 	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1388 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1389 		(volatile uchar *) msg, msglen);
1390 
1391 	ring->seqnum++; /* skip this message ... leak of a pktid */
1392 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1393 
1394 dma_completed:
1395 
1396 	prot->d2h_sync_wait_tot += tries;
1397 	return msg->msg_type;
1398 }
1399 
1400 /**
1401  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1402  * need to try to sync. This noop sync handler will be bound when the dongle
1403  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1404  */
1405 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_none)1406 BCMFASTPATH(dhd_prot_d2h_sync_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1407                        volatile cmn_msg_hdr_t *msg, int msglen)
1408 {
1409 	/* Check for LIVELOCK induce flag, which is set by firing
1410 	* dhd iovar to induce LIVELOCK error. If flag is set,
1411 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1412 	*/
1413 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1414 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1415 		return MSG_TYPE_INVALID;
1416 	} else {
1417 		return msg->msg_type;
1418 	}
1419 }
1420 
1421 #ifdef EWP_EDL
1422 /**
1423  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1424  * header values at both the beginning and end of the payload.
1425  * The cmn_msg_hdr_t is placed at the start and end of the payload
1426  * in each work item in the EDL ring.
1427  * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1428  * and the length of the payload in the 'request_id' field.
1429  * Structure of each work item in the EDL ring:
1430  * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1431  * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1432  * too costly on the dongle side and might take up too many ARM cycles,
1433  * hence the xorcsum sync method is not being used for EDL ring.
1434  */
1435 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1436 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1437                           volatile cmn_msg_hdr_t *msg)
1438 {
1439 	uint32 tries;
1440 	int msglen = 0, len = 0;
1441 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1442 	dhd_prot_t *prot = dhd->prot;
1443 	uint32 step = 0;
1444 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1445 	uint32 total_tries = 0;
1446 	volatile cmn_msg_hdr_t *trailer = NULL;
1447 	volatile uint8 *buf = NULL;
1448 	bool valid_msg = FALSE;
1449 
1450 	BCM_REFERENCE(delay);
1451 	/*
1452 	 * For retries we have to make some sort of stepper algorithm.
1453 	 * We see that every time when the Dongle comes out of the D3
1454 	 * Cold state, the first D2H mem2mem DMA takes more time to
1455 	 * complete, leading to livelock issues.
1456 	 *
1457 	 * Case 1 - Apart from Host CPU some other bus master is
1458 	 * accessing the DDR port, probably page close to the ring
1459 	 * so, PCIE does not get a change to update the memory.
1460 	 * Solution - Increase the number of tries.
1461 	 *
1462 	 * Case 2 - The 50usec delay given by the Host CPU is not
1463 	 * sufficient for the PCIe RC to start its work.
1464 	 * In this case the breathing time of 50usec given by
1465 	 * the Host CPU is not sufficient.
1466 	 * Solution: Increase the delay in a stepper fashion.
1467 	 * This is done to ensure that there are no
1468 	 * unwanted extra delay introdcued in normal conditions.
1469 	 */
1470 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1471 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1472 			/* First verify if the seqnumber has been updated,
1473 			 * if yes, only then validate the header and trailer.
1474 			 * Once seqnum, header and trailer have been validated, it means
1475 			 * that the complete message has arrived.
1476 			 */
1477 			valid_msg = FALSE;
1478 			if (msg->epoch == ring_seqnum &&
1479 				msg->msg_type == MSG_TYPE_INFO_PYLD &&
1480 				msg->request_id > 0 &&
1481 				msg->request_id <= ring->item_len) {
1482 				/* proceed to check trailer only if header is valid */
1483 				buf = (volatile uint8 *)msg;
1484 				msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1485 				buf += msglen;
1486 				if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1487 					trailer = (volatile cmn_msg_hdr_t *)buf;
1488 					valid_msg = (trailer->epoch == ring_seqnum) &&
1489 						(trailer->msg_type == msg->msg_type) &&
1490 						(trailer->request_id == msg->request_id);
1491 					if (!valid_msg) {
1492 						DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1493 						" expected, seqnum=%u; reqid=%u. Retrying... \n",
1494 						__FUNCTION__, trailer->epoch, trailer->request_id,
1495 						msg->epoch, msg->request_id));
1496 					}
1497 				} else {
1498 					DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1499 						__FUNCTION__, msg->request_id));
1500 				}
1501 
1502 				if (valid_msg) {
1503 					/* data is OK */
1504 					ring->seqnum++; /* next expected sequence number */
1505 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1506 						goto dma_completed;
1507 					}
1508 				}
1509 			} else {
1510 				DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1511 					" msg_type=0x%x, request_id=%u."
1512 					" Retrying...\n",
1513 					__FUNCTION__, ring_seqnum, msg->epoch,
1514 					msg->msg_type, msg->request_id));
1515 			}
1516 
1517 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1518 
1519 			if (total_tries > prot->d2h_sync_wait_max)
1520 				prot->d2h_sync_wait_max = total_tries;
1521 
1522 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1523 #if !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3))
1524 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1525 			OSL_DELAY(delay * step); /* Add stepper delay */
1526 #endif /* !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) */
1527 
1528 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1529 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1530 
1531 	DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1532 	DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1533 		" msgtype=0x%x; expected-msgtype=0x%x"
1534 		" length=%u; expected-max-length=%u", __FUNCTION__,
1535 		msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1536 		msg->request_id, ring->item_len));
1537 	dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1538 	if (trailer && msglen > 0 &&
1539 			(msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1540 		DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1541 			" msgtype=0x%x; expected-msgtype=0x%x"
1542 			" length=%u; expected-length=%u", __FUNCTION__,
1543 			trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1544 			trailer->request_id, msg->request_id));
1545 		dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1546 			sizeof(*trailer), DHD_ERROR_VAL);
1547 	}
1548 
1549 	if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1550 		len = msglen + sizeof(cmn_msg_hdr_t);
1551 	else
1552 		len = ring->item_len;
1553 
1554 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1555 		(volatile uchar *) msg, len);
1556 
1557 	ring->seqnum++; /* skip this message */
1558 	return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1559 
1560 dma_completed:
1561 	DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1562 		msg->epoch, msg->request_id));
1563 
1564 	prot->d2h_sync_wait_tot += tries;
1565 	return BCME_OK;
1566 }
1567 
1568 /**
1569  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1570  * need to try to sync. This noop sync handler will be bound when the dongle
1571  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1572  */
BCMFASTPATH(dhd_prot_d2h_sync_edl_none)1573 static int BCMFASTPATH
1574 (dhd_prot_d2h_sync_edl_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1575                        volatile cmn_msg_hdr_t *msg)
1576 {
1577 	/* Check for LIVELOCK induce flag, which is set by firing
1578 	* dhd iovar to induce LIVELOCK error. If flag is set,
1579 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1580 	*/
1581 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1582 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1583 		return BCME_ERROR;
1584 	} else {
1585 		if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1586 			return BCME_OK;
1587 		else
1588 			return msg->msg_type;
1589 	}
1590 }
1591 #endif /* EWP_EDL */
1592 
1593 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1594 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1595 {
1596 	/* To synchronize with the previous memory operations call wmb() */
1597 	OSL_SMP_WMB();
1598 	dhd->prot->ioctl_received = reason;
1599 	/* Call another wmb() to make sure before waking up the other event value gets updated */
1600 	OSL_SMP_WMB();
1601 	dhd_os_ioctl_resp_wake(dhd);
1602 }
1603 
1604 /**
1605  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1606  * dongle advertizes.
1607  */
1608 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1609 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1610 {
1611 	dhd_prot_t *prot = dhd->prot;
1612 	prot->d2h_sync_wait_max = 0UL;
1613 	prot->d2h_sync_wait_tot = 0UL;
1614 
1615 	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1616 	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1617 
1618 	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1619 	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1620 
1621 	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1622 	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1623 
1624 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1625 		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1626 #ifdef EWP_EDL
1627 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1628 #endif /* EWP_EDL */
1629 		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1630 	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1631 		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1632 #ifdef EWP_EDL
1633 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1634 #endif /* EWP_EDL */
1635 		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1636 	} else {
1637 		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1638 #ifdef EWP_EDL
1639 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1640 #endif /* EWP_EDL */
1641 		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1642 	}
1643 }
1644 
1645 /**
1646  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1647  */
1648 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1649 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1650 {
1651 	dhd_prot_t *prot = dhd->prot;
1652 	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1653 
1654 	prot->h2dring_rxp_subn.current_phase = 0;
1655 
1656 	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1657 	prot->h2dring_ctrl_subn.current_phase = 0;
1658 }
1659 
1660 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1661 
1662 /*
1663  * +---------------------------------------------------------------------------+
1664  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1665  * virtual and physical address, the buffer lenght and the DMA handler.
1666  * A secdma handler is also included in the dhd_dma_buf object.
1667  * +---------------------------------------------------------------------------+
1668  */
1669 
1670 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1671 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1672 {
1673 	base_addr->low_addr = htol32(PHYSADDRLO(pa));
1674 	base_addr->high_addr = htol32(PHYSADDRHI(pa));
1675 }
1676 
1677 /**
1678  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1679  */
1680 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1681 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1682 {
1683 	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1684 	ASSERT(dma_buf);
1685 	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1686 	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1687 	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1688 	ASSERT(dma_buf->len != 0);
1689 
1690 	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1691 	end = (pa_lowaddr + dma_buf->len); /* end address */
1692 
1693 	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1694 		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1695 			__FUNCTION__, pa_lowaddr, dma_buf->len));
1696 		return BCME_ERROR;
1697 	}
1698 
1699 	return BCME_OK;
1700 }
1701 
1702 /**
1703  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1704  * returns BCME_OK=0 on success
1705  * returns non-zero negative error value on failure.
1706  */
1707 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1708 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1709 {
1710 	uint32 dma_pad = 0;
1711 	osl_t *osh = dhd->osh;
1712 	uint16 dma_align = DMA_ALIGN_LEN;
1713 	uint32 rem = 0;
1714 
1715 	ASSERT(dma_buf != NULL);
1716 	ASSERT(dma_buf->va == NULL);
1717 	ASSERT(dma_buf->len == 0);
1718 
1719 	/* Pad the buffer length to align to cacheline size. */
1720 	rem = (buf_len % DHD_DMA_PAD);
1721 	dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1722 
1723 	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1724 		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1725 
1726 	if (dma_buf->va == NULL) {
1727 		DHD_ERROR(("%s: buf_len %d, no memory available\n",
1728 			__FUNCTION__, buf_len));
1729 		return BCME_NOMEM;
1730 	}
1731 
1732 	dma_buf->len = buf_len; /* not including padded len */
1733 
1734 	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1735 		dhd_dma_buf_free(dhd, dma_buf);
1736 		return BCME_ERROR;
1737 	}
1738 
1739 	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1740 
1741 	return BCME_OK;
1742 }
1743 
1744 /**
1745  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1746  */
1747 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1748 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1749 {
1750 	if ((dma_buf == NULL) || (dma_buf->va == NULL))
1751 		return;
1752 
1753 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1754 
1755 	/* Zero out the entire buffer and cache flush */
1756 	memset((void*)dma_buf->va, 0, dma_buf->len);
1757 	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1758 }
1759 
1760 void
dhd_local_buf_reset(char * buf,uint32 len)1761 dhd_local_buf_reset(char *buf, uint32 len)
1762 {
1763 	/* Zero out the entire buffer and cache flush */
1764 	memset((void*)buf, 0, len);
1765 	OSL_CACHE_FLUSH((void *)buf, len);
1766 }
1767 
1768 /**
1769  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1770  * dhd_dma_buf_alloc().
1771  */
1772 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1773 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1774 {
1775 	osl_t *osh = dhd->osh;
1776 
1777 	ASSERT(dma_buf);
1778 
1779 	if (dma_buf->va == NULL)
1780 		return; /* Allow for free invocation, when alloc failed */
1781 
1782 	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1783 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1784 
1785 	/* dma buffer may have been padded at allocation */
1786 	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1787 		dma_buf->pa, dma_buf->dmah);
1788 
1789 	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1790 }
1791 
1792 /**
1793  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1794  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1795  */
1796 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1797 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1798 	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1799 {
1800 	dhd_dma_buf_t *dma_buf;
1801 	ASSERT(dhd_dma_buf);
1802 	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1803 	dma_buf->va = va;
1804 	dma_buf->len = len;
1805 	dma_buf->pa = pa;
1806 	dma_buf->dmah = dmah;
1807 	dma_buf->secdma = secdma;
1808 
1809 	/* Audit user defined configuration */
1810 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1811 }
1812 
1813 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1814 
1815 /*
1816  * +---------------------------------------------------------------------------+
1817  * DHD_MAP_PKTID_LOGGING
1818  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1819  * debugging in customer platform.
1820  * +---------------------------------------------------------------------------+
1821  */
1822 
1823 #ifdef DHD_MAP_PKTID_LOGGING
1824 typedef struct dhd_pktid_log_item {
1825 	dmaaddr_t pa;		/* DMA bus address */
1826 	uint64 ts_nsec;		/* Timestamp: nsec */
1827 	uint32 size;		/* DMA map/unmap size */
1828 	uint32 pktid;		/* Packet ID */
1829 	uint8 pkttype;		/* Packet Type */
1830 	uint8 rsvd[7];		/* Reserved for future use */
1831 } dhd_pktid_log_item_t;
1832 
1833 typedef struct dhd_pktid_log {
1834 	uint32 items;		/* number of total items */
1835 	uint32 index;		/* index of pktid_log_item */
1836 	dhd_pktid_log_item_t map[0];	/* metadata storage */
1837 } dhd_pktid_log_t;
1838 
1839 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1840 
1841 #define	MAX_PKTID_LOG				(2048)
1842 #define DHD_PKTID_LOG_ITEM_SZ			(sizeof(dhd_pktid_log_item_t))
1843 #define DHD_PKTID_LOG_SZ(items)			(uint32)((sizeof(dhd_pktid_log_t)) + \
1844 					((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1845 
1846 #define DHD_PKTID_LOG_INIT(dhd, hdl)		dhd_pktid_logging_init((dhd), (hdl))
1847 #define DHD_PKTID_LOG_FINI(dhd, hdl)		dhd_pktid_logging_fini((dhd), (hdl))
1848 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)	\
1849 	dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1850 #define DHD_PKTID_LOG_DUMP(dhd)			dhd_pktid_logging_dump((dhd))
1851 
1852 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1853 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1854 {
1855 	dhd_pktid_log_t *log;
1856 	uint32 log_size;
1857 
1858 	log_size = DHD_PKTID_LOG_SZ(num_items);
1859 	log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1860 	if (log == NULL) {
1861 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
1862 			__FUNCTION__, log_size));
1863 		return (dhd_pktid_log_handle_t *)NULL;
1864 	}
1865 
1866 	log->items = num_items;
1867 	log->index = 0;
1868 
1869 	return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1870 }
1871 
1872 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1873 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1874 {
1875 	dhd_pktid_log_t *log;
1876 	uint32 log_size;
1877 
1878 	if (handle == NULL) {
1879 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1880 		return;
1881 	}
1882 
1883 	log = (dhd_pktid_log_t *)handle;
1884 	log_size = DHD_PKTID_LOG_SZ(log->items);
1885 	MFREE(dhd->osh, handle, log_size);
1886 }
1887 
1888 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1889 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1890 	uint32 pktid, uint32 len, uint8 pkttype)
1891 {
1892 	dhd_pktid_log_t *log;
1893 	uint32 idx;
1894 
1895 	if (handle == NULL) {
1896 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1897 		return;
1898 	}
1899 
1900 	log = (dhd_pktid_log_t *)handle;
1901 	idx = log->index;
1902 	log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1903 	log->map[idx].pa = pa;
1904 	log->map[idx].pktid = pktid;
1905 	log->map[idx].size = len;
1906 	log->map[idx].pkttype = pkttype;
1907 	log->index = (idx + 1) % (log->items);	/* update index */
1908 }
1909 
1910 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1911 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1912 {
1913 	dhd_prot_t *prot = dhd->prot;
1914 	dhd_pktid_log_t *map_log, *unmap_log;
1915 	uint64 ts_sec, ts_usec;
1916 
1917 	if (prot == NULL) {
1918 		DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1919 		return;
1920 	}
1921 
1922 	map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1923 	unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1924 	OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1925 	if (map_log && unmap_log) {
1926 		DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1927 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
1928 			map_log->index, unmap_log->index,
1929 			(unsigned long)ts_sec, (unsigned long)ts_usec));
1930 		DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1931 			"pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1932 			(uint64)__virt_to_phys((ulong)(map_log->map)),
1933 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1934 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
1935 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1936 	}
1937 }
1938 #endif /* DHD_MAP_PKTID_LOGGING */
1939 
1940 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1941 
1942 /*
1943  * +---------------------------------------------------------------------------+
1944  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1945  * Main purpose is to save memory on the dongle, has other purposes as well.
1946  * The packet id map, also includes storage for some packet parameters that
1947  * may be saved. A native packet pointer along with the parameters may be saved
1948  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1949  * and the metadata may be retrieved using the previously allocated packet id.
1950  * +---------------------------------------------------------------------------+
1951  */
1952 #define DHD_PCIE_PKTID
1953 
1954 /* On Router, the pktptr serves as a pktid. */
1955 #if defined(BCM_ROUTER_DHD) && !defined(BCA_HNDROUTER)
1956 #undef DHD_PCIE_PKTID		/* Comment this undef, to reenable PKTIDMAP */
1957 #endif /* BCM_ROUTER_DHD && !BCA_HNDROUTER */
1958 
1959 #if defined(BCM_ROUTER_DHD) && defined(DHD_PCIE_PKTID)
1960 #undef MAX_TX_PKTID
1961 #define MAX_TX_PKTID     ((36 * 1024) - 1) /* Extend for 64 clients support. */
1962 #endif /* BCM_ROUTER_DHD && DHD_PCIE_PKTID */
1963 
1964 /* XXX: PROP_TXSTATUS: WLFS defines a private pkttag layout.
1965  * Hence cannot store the dma parameters in the pkttag and the pktidmap locker
1966  * is required.
1967  */
1968 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1969 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1970 #endif
1971 
1972 /* Enum for marking the buffer color based on usage */
1973 typedef enum dhd_pkttype {
1974 	PKTTYPE_DATA_TX = 0,
1975 	PKTTYPE_DATA_RX,
1976 	PKTTYPE_IOCTL_RX,
1977 	PKTTYPE_EVENT_RX,
1978 	PKTTYPE_INFO_RX,
1979 	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1980 	PKTTYPE_NO_CHECK,
1981 	PKTTYPE_TSBUF_RX
1982 } dhd_pkttype_t;
1983 
1984 #define DHD_PKTID_MIN_AVAIL_COUNT		512U
1985 #define DHD_PKTID_DEPLETED_MAX_COUNT		(DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1986 #define DHD_PKTID_INVALID			(0U)
1987 #define DHD_IOCTL_REQ_PKTID			(0xFFFE)
1988 #define DHD_FAKE_PKTID				(0xFACE)
1989 #define DHD_H2D_DBGRING_REQ_PKTID		0xFFFD
1990 #define DHD_D2H_DBGRING_REQ_PKTID		0xFFFC
1991 #define DHD_H2D_HOSTTS_REQ_PKTID		0xFFFB
1992 #define DHD_H2D_BTLOGRING_REQ_PKTID		0xFFFA
1993 #define DHD_D2H_BTLOGRING_REQ_PKTID		0xFFF9
1994 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID	0xFFF8
1995 #ifdef DHD_HP2P
1996 #define DHD_D2H_HPPRING_TXREQ_PKTID		0xFFF7
1997 #define DHD_D2H_HPPRING_RXREQ_PKTID		0xFFF6
1998 #endif /* DHD_HP2P */
1999 
2000 #define IS_FLOWRING(ring) \
2001 	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
2002 
2003 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
2004 
2005 /* Construct a packet id mapping table, returning an opaque map handle */
2006 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
2007 
2008 /* Destroy a packet id mapping table, freeing all packets active in the table */
2009 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
2010 
2011 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
2012 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
2013 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
2014 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
2015 
2016 #if defined(DHD_PCIE_PKTID)
2017 #if defined(NDIS) || defined(DHD_EFI)
2018 /* XXX: for NDIS, using consistent memory instead of buffer from PKTGET for
2019  * up to 8K ioctl response
2020  */
2021 #define IOCTLRESP_USE_CONSTMEM
2022 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2023 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2024 #endif /* NDIS || DHD_EFI */
2025 
2026 /* Determine number of pktids that are available */
2027 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
2028 
2029 /* Allocate a unique pktid against which a pkt and some metadata is saved */
2030 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2031 	void *pkt, dhd_pkttype_t pkttype);
2032 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2033 	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
2034 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2035 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2036 	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
2037 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2038 
2039 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
2040 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2041 	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
2042 	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
2043 
2044 #ifdef DHD_PKTTS
2045 /* Store the Metadata buffer to the locker */
2046 static INLINE void
2047 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2048 	dmaaddr_t mpkt_pa,
2049 	uint16	mpkt_len,
2050 	void *dmah,
2051 	uint32 nkey);
2052 
2053 /* Return the Metadata buffer from the locker */
2054 static void * dhd_pktid_map_retreive_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2055 	dmaaddr_t *pmpkt_pa, uint32 *pmpkt_len, void **pdmah, uint32 nkey);
2056 #endif /* DHD_PKTTS */
2057 
2058 /*
2059  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
2060  *
2061  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
2062  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
2063  *
2064  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
2065  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
2066  */
2067 #if defined(DHD_PKTID_AUDIT_ENABLED)
2068 #define USE_DHD_PKTID_AUDIT_LOCK 1
2069 /* Audit the pktidmap allocator */
2070 /* #define DHD_PKTID_AUDIT_MAP */
2071 
2072 /* Audit the pktid during production/consumption of workitems */
2073 #define DHD_PKTID_AUDIT_RING
2074 
2075 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
2076 #error "May only enabled audit of MAP or RING, at a time."
2077 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
2078 
2079 #define DHD_DUPLICATE_ALLOC     1
2080 #define DHD_DUPLICATE_FREE      2
2081 #define DHD_TEST_IS_ALLOC       3
2082 #define DHD_TEST_IS_FREE        4
2083 
2084 typedef enum dhd_pktid_map_type {
2085 	DHD_PKTID_MAP_TYPE_CTRL = 1,
2086 	DHD_PKTID_MAP_TYPE_TX,
2087 	DHD_PKTID_MAP_TYPE_RX,
2088 	DHD_PKTID_MAP_TYPE_UNKNOWN
2089 } dhd_pktid_map_type_t;
2090 
2091 #ifdef USE_DHD_PKTID_AUDIT_LOCK
2092 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          osl_spin_lock_init(osh)
2093 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  osl_spin_lock_deinit(osh, lock)
2094 #define DHD_PKTID_AUDIT_LOCK(lock)              osl_spin_lock(lock)
2095 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     osl_spin_unlock(lock, flags)
2096 #else
2097 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
2098 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
2099 #define DHD_PKTID_AUDIT_LOCK(lock)              0
2100 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
2101 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
2102 
2103 #endif /* DHD_PKTID_AUDIT_ENABLED */
2104 
2105 #define USE_DHD_PKTID_LOCK   1
2106 
2107 #ifdef USE_DHD_PKTID_LOCK
2108 #define DHD_PKTID_LOCK_INIT(osh)                osl_spin_lock_init(osh)
2109 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        osl_spin_lock_deinit(osh, lock)
2110 #define DHD_PKTID_LOCK(lock, flags)             (flags) = osl_spin_lock(lock)
2111 #define DHD_PKTID_UNLOCK(lock, flags)           osl_spin_unlock(lock, flags)
2112 #else
2113 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
2114 #define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
2115 	do { \
2116 		BCM_REFERENCE(osh); \
2117 		BCM_REFERENCE(lock); \
2118 	} while (0)
2119 #define DHD_PKTID_LOCK(lock)                    0
2120 #define DHD_PKTID_UNLOCK(lock, flags)           \
2121 	do { \
2122 		BCM_REFERENCE(lock); \
2123 		BCM_REFERENCE(flags); \
2124 	} while (0)
2125 #endif /* !USE_DHD_PKTID_LOCK */
2126 
2127 typedef enum dhd_locker_state {
2128 	LOCKER_IS_FREE,
2129 	LOCKER_IS_BUSY,
2130 	LOCKER_IS_RSVD
2131 } dhd_locker_state_t;
2132 
2133 /* Packet metadata saved in packet id mapper */
2134 
2135 typedef struct dhd_pktid_item {
2136 	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
2137 	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
2138 	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
2139 	uint16      len;      /* length of mapped packet's buffer */
2140 	void        *pkt;     /* opaque native pointer to a packet */
2141 	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
2142 	void        *dmah;    /* handle to OS specific DMA map */
2143 	void		*secdma;
2144 #ifdef DHD_PKTTS
2145 	void		*mpkt;    /* VA of Metadata */
2146 	dmaaddr_t	mpkt_pa;  /* PA of Metadata */
2147 	uint16		mpkt_len; /* Length of Metadata */
2148 #endif /* DHD_PKTTS */
2149 } dhd_pktid_item_t;
2150 
2151 typedef uint32 dhd_pktid_key_t;
2152 
2153 typedef struct dhd_pktid_map {
2154 	uint32      items;    /* total items in map */
2155 	uint32      avail;    /* total available items */
2156 	int         failures; /* lockers unavailable count */
2157 	/* Spinlock to protect dhd_pktid_map in process/tasklet context */
2158 	void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
2159 
2160 #if defined(DHD_PKTID_AUDIT_ENABLED)
2161 	void		*pktid_audit_lock;
2162 	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
2163 #endif /* DHD_PKTID_AUDIT_ENABLED */
2164 	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
2165 	dhd_pktid_item_t lockers[0];           /* metadata storage */
2166 } dhd_pktid_map_t;
2167 
2168 /*
2169  * PktId (Locker) #0 is never allocated and is considered invalid.
2170  *
2171  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
2172  * depleted pktid pool and must not be used by the caller.
2173  *
2174  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
2175  */
2176 
2177 #define DHD_PKTID_FREE_LOCKER           (FALSE)
2178 #define DHD_PKTID_RSV_LOCKER            (TRUE)
2179 
2180 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
2181 #define DHD_PKIDMAP_ITEMS(items)        (items)
2182 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
2183 	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
2184 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
2185 
2186 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
2187 
2188 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
2189 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
2190 	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
2191 /* Reuse a previously reserved locker to save packet params */
2192 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
2193 	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
2194 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2195 		(dhd_pkttype_t)(pkttype))
2196 /* Convert a packet to a pktid, and save packet params in locker */
2197 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
2198 	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
2199 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2200 		(dhd_pkttype_t)(pkttype))
2201 
2202 /* Convert pktid to a packet, and free the locker */
2203 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2204 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2205 		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2206 		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
2207 
2208 /* Convert the pktid to a packet, empty locker, but keep it reserved */
2209 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2210 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2211 	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2212 	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
2213 
2214 #ifdef DHD_PKTTS
2215 #define DHD_PKTID_SAVE_METADATA(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) \
2216 	dhd_pktid_map_save_metadata(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey)
2217 
2218 #define DHD_PKTID_RETREIVE_METADATA(dhd, map, mpkt_pa, mpkt_len, dmah, nkey) \
2219 	dhd_pktid_map_retreive_metadata(dhd, map, (dmaaddr_t *)&mpkt_pa, (uint32 *)&mpkt_len, \
2220 		(void **) &dmah, nkey)
2221 #endif /* DHD_PKTTS */
2222 
2223 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
2224 
2225 #if defined(DHD_PKTID_AUDIT_ENABLED)
2226 
2227 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)2228 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
2229 {
2230 	dhd_prot_t *prot = dhd->prot;
2231 	int pktid_map_type;
2232 
2233 	if (pktid_map == prot->pktid_ctrl_map) {
2234 		pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
2235 	} else if (pktid_map == prot->pktid_tx_map) {
2236 		pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
2237 	} else if (pktid_map == prot->pktid_rx_map) {
2238 		pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
2239 	} else {
2240 		pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
2241 	}
2242 
2243 	return pktid_map_type;
2244 }
2245 
2246 /**
2247 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
2248 */
2249 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2250 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2251 	const int test_for, const char *errmsg)
2252 {
2253 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
2254 	struct bcm_mwbmap *handle;
2255 	uint32	flags;
2256 	bool ignore_audit;
2257 	int error = BCME_OK;
2258 
2259 	if (pktid_map == (dhd_pktid_map_t *)NULL) {
2260 		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
2261 		return BCME_OK;
2262 	}
2263 
2264 	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
2265 
2266 	handle = pktid_map->pktid_audit;
2267 	if (handle == (struct bcm_mwbmap *)NULL) {
2268 		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
2269 		goto out;
2270 	}
2271 
2272 	/* Exclude special pktids from audit */
2273 	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
2274 	if (ignore_audit) {
2275 		goto out;
2276 	}
2277 
2278 	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
2279 		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
2280 		error = BCME_ERROR;
2281 		goto out;
2282 	}
2283 
2284 	/* Perform audit */
2285 	switch (test_for) {
2286 		case DHD_DUPLICATE_ALLOC:
2287 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2288 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
2289 				           errmsg, pktid));
2290 				error = BCME_ERROR;
2291 			} else {
2292 				bcm_mwbmap_force(handle, pktid);
2293 			}
2294 			break;
2295 
2296 		case DHD_DUPLICATE_FREE:
2297 			if (bcm_mwbmap_isfree(handle, pktid)) {
2298 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
2299 				           errmsg, pktid));
2300 				error = BCME_ERROR;
2301 			} else {
2302 				bcm_mwbmap_free(handle, pktid);
2303 			}
2304 			break;
2305 
2306 		case DHD_TEST_IS_ALLOC:
2307 			if (bcm_mwbmap_isfree(handle, pktid)) {
2308 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
2309 				           errmsg, pktid));
2310 				error = BCME_ERROR;
2311 			}
2312 			break;
2313 
2314 		case DHD_TEST_IS_FREE:
2315 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2316 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
2317 				           errmsg, pktid));
2318 				error = BCME_ERROR;
2319 			}
2320 			break;
2321 
2322 		default:
2323 			DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
2324 			error = BCME_ERROR;
2325 			break;
2326 	}
2327 
2328 out:
2329 	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
2330 
2331 	if (error != BCME_OK) {
2332 		dhd->pktid_audit_failed = TRUE;
2333 	}
2334 
2335 	return error;
2336 }
2337 
2338 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2339 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2340 	const int test_for, const char *errmsg)
2341 {
2342 	int ret = BCME_OK;
2343 	ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2344 	if (ret == BCME_ERROR) {
2345 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2346 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2347 		dhd_pktid_error_handler(dhd);
2348 #ifdef DHD_MAP_PKTID_LOGGING
2349 		DHD_PKTID_LOG_DUMP(dhd);
2350 #endif /* DHD_MAP_PKTID_LOGGING */
2351 	}
2352 
2353 	return ret;
2354 }
2355 
2356 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
2357 	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
2358 
2359 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2360 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2361 	const int test_for, void *msg, uint32 msg_len, const char *func)
2362 {
2363 	int ret = BCME_OK;
2364 
2365 	if (dhd_query_bus_erros(dhdp)) {
2366 		return BCME_ERROR;
2367 	}
2368 
2369 	ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2370 	if (ret == BCME_ERROR) {
2371 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2372 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2373 		prhex(func, (uchar *)msg, msg_len);
2374 		dhd_pktid_error_handler(dhdp);
2375 #ifdef DHD_MAP_PKTID_LOGGING
2376 		DHD_PKTID_LOG_DUMP(dhdp);
2377 #endif /* DHD_MAP_PKTID_LOGGING */
2378 	}
2379 	return ret;
2380 }
2381 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2382 	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2383 		(pktid), (test_for), msg, msg_len, __FUNCTION__)
2384 
2385 #endif /* DHD_PKTID_AUDIT_ENABLED */
2386 
2387 /**
2388  * +---------------------------------------------------------------------------+
2389  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2390  *
2391  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2392  *
2393  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2394  * packet id is returned. This unique packet id may be used to retrieve the
2395  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2396  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2397  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2398  *
2399  * Implementation Note:
2400  * Convert this into a <key,locker> abstraction and place into bcmutils !
2401  * Locker abstraction should treat contents as opaque storage, and a
2402  * callback should be registered to handle busy lockers on destructor.
2403  *
2404  * +---------------------------------------------------------------------------+
2405  */
2406 
2407 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2408 
2409 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2410 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2411 {
2412 	void* osh;
2413 	uint32 nkey;
2414 	dhd_pktid_map_t *map;
2415 	uint32 dhd_pktid_map_sz;
2416 	uint32 map_items;
2417 	uint32 map_keys_sz;
2418 	osh = dhd->osh;
2419 
2420 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2421 
2422 	map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2423 	if (map == NULL) {
2424 		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2425 			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
2426 		return (dhd_pktid_map_handle_t *)NULL;
2427 	}
2428 
2429 	map->items = num_items;
2430 	map->avail = num_items;
2431 
2432 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2433 
2434 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2435 
2436 	/* Initialize the lock that protects this structure */
2437 	map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2438 	if (map->pktid_lock == NULL) {
2439 		DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2440 		goto error;
2441 	}
2442 
2443 	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2444 	if (map->keys == NULL) {
2445 		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2446 			__FUNCTION__, __LINE__, map_keys_sz));
2447 		goto error;
2448 	}
2449 
2450 #if defined(DHD_PKTID_AUDIT_ENABLED)
2451 		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2452 		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2453 		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2454 			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2455 			goto error;
2456 		} else {
2457 			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2458 				__FUNCTION__, __LINE__, map_items + 1));
2459 		}
2460 		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2461 #endif /* DHD_PKTID_AUDIT_ENABLED */
2462 
2463 	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2464 		map->keys[nkey] = nkey; /* populate with unique keys */
2465 		map->lockers[nkey].state = LOCKER_IS_FREE;
2466 		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
2467 		map->lockers[nkey].len   = 0;
2468 	}
2469 
2470 	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2471 	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2472 	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
2473 	map->lockers[DHD_PKTID_INVALID].len   = 0;
2474 
2475 #if defined(DHD_PKTID_AUDIT_ENABLED)
2476 	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2477 	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2478 #endif /* DHD_PKTID_AUDIT_ENABLED */
2479 
2480 	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2481 
2482 error:
2483 	if (map) {
2484 #if defined(DHD_PKTID_AUDIT_ENABLED)
2485 		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2486 			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2487 			map->pktid_audit = (struct bcm_mwbmap *)NULL;
2488 			if (map->pktid_audit_lock)
2489 				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2490 		}
2491 #endif /* DHD_PKTID_AUDIT_ENABLED */
2492 
2493 		if (map->keys) {
2494 			MFREE(osh, map->keys, map_keys_sz);
2495 		}
2496 
2497 		if (map->pktid_lock) {
2498 			DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2499 		}
2500 
2501 		VMFREE(osh, map, dhd_pktid_map_sz);
2502 	}
2503 	return (dhd_pktid_map_handle_t *)NULL;
2504 }
2505 
2506 /**
2507  * Retrieve all allocated keys and free all <numbered_key, locker>.
2508  * Freeing implies: unmapping the buffers and freeing the native packet
2509  * This could have been a callback registered with the pktid mapper.
2510  */
2511 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2512 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2513 {
2514 	void *osh;
2515 	uint32 nkey;
2516 	dhd_pktid_map_t *map;
2517 	dhd_pktid_item_t *locker;
2518 	uint32 map_items;
2519 	unsigned long flags;
2520 	bool data_tx = FALSE;
2521 
2522 	map = (dhd_pktid_map_t *)handle;
2523 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2524 	osh = dhd->osh;
2525 
2526 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2527 	/* skip reserved KEY #0, and start from 1 */
2528 
2529 	for (nkey = 1; nkey <= map_items; nkey++) {
2530 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2531 			locker = &map->lockers[nkey];
2532 			locker->state = LOCKER_IS_FREE;
2533 			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2534 			if (data_tx) {
2535 				OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2536 			}
2537 
2538 #ifdef DHD_PKTID_AUDIT_RING
2539 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2540 #endif /* DHD_PKTID_AUDIT_RING */
2541 #ifdef DHD_MAP_PKTID_LOGGING
2542 			DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2543 				locker->pa, nkey, locker->len,
2544 				locker->pkttype);
2545 #endif /* DHD_MAP_PKTID_LOGGING */
2546 
2547 			DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah);
2548 			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2549 				locker->pkttype, data_tx);
2550 		}
2551 		else {
2552 #ifdef DHD_PKTID_AUDIT_RING
2553 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2554 #endif /* DHD_PKTID_AUDIT_RING */
2555 		}
2556 		map->keys[nkey] = nkey; /* populate with unique keys */
2557 	}
2558 
2559 	map->avail = map_items;
2560 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2561 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2562 }
2563 
2564 #ifdef IOCTLRESP_USE_CONSTMEM
2565 /** Called in detach scenario. Releasing IOCTL buffers. */
2566 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2567 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2568 {
2569 	uint32 nkey;
2570 	dhd_pktid_map_t *map;
2571 	dhd_pktid_item_t *locker;
2572 	uint32 map_items;
2573 	unsigned long flags;
2574 
2575 	map = (dhd_pktid_map_t *)handle;
2576 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2577 
2578 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2579 	/* skip reserved KEY #0, and start from 1 */
2580 	for (nkey = 1; nkey <= map_items; nkey++) {
2581 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2582 			dhd_dma_buf_t retbuf;
2583 
2584 #ifdef DHD_PKTID_AUDIT_RING
2585 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2586 #endif /* DHD_PKTID_AUDIT_RING */
2587 
2588 			locker = &map->lockers[nkey];
2589 			retbuf.va = locker->pkt;
2590 			retbuf.len = locker->len;
2591 			retbuf.pa = locker->pa;
2592 			retbuf.dmah = locker->dmah;
2593 			retbuf.secdma = locker->secdma;
2594 
2595 			free_ioctl_return_buffer(dhd, &retbuf);
2596 		}
2597 		else {
2598 #ifdef DHD_PKTID_AUDIT_RING
2599 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2600 #endif /* DHD_PKTID_AUDIT_RING */
2601 		}
2602 		map->keys[nkey] = nkey; /* populate with unique keys */
2603 	}
2604 
2605 	map->avail = map_items;
2606 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2607 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2608 }
2609 #endif /* IOCTLRESP_USE_CONSTMEM */
2610 
2611 /**
2612  * Free the pktid map.
2613  */
2614 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2615 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2616 {
2617 	dhd_pktid_map_t *map;
2618 	uint32 dhd_pktid_map_sz;
2619 	uint32 map_keys_sz;
2620 
2621 	if (handle == NULL)
2622 		return;
2623 
2624 	/* Free any pending packets */
2625 	dhd_pktid_map_reset(dhd, handle);
2626 
2627 	map = (dhd_pktid_map_t *)handle;
2628 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2629 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2630 
2631 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2632 
2633 #if defined(DHD_PKTID_AUDIT_ENABLED)
2634 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2635 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2636 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2637 		if (map->pktid_audit_lock) {
2638 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2639 		}
2640 	}
2641 #endif /* DHD_PKTID_AUDIT_ENABLED */
2642 	MFREE(dhd->osh, map->keys, map_keys_sz);
2643 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2644 }
2645 
2646 #ifdef IOCTLRESP_USE_CONSTMEM
2647 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2648 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2649 {
2650 	dhd_pktid_map_t *map;
2651 	uint32 dhd_pktid_map_sz;
2652 	uint32 map_keys_sz;
2653 
2654 	if (handle == NULL)
2655 		return;
2656 
2657 	/* Free any pending packets */
2658 	dhd_pktid_map_reset_ioctl(dhd, handle);
2659 
2660 	map = (dhd_pktid_map_t *)handle;
2661 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2662 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2663 
2664 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2665 
2666 #if defined(DHD_PKTID_AUDIT_ENABLED)
2667 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2668 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2669 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2670 		if (map->pktid_audit_lock) {
2671 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2672 		}
2673 	}
2674 #endif /* DHD_PKTID_AUDIT_ENABLED */
2675 
2676 	MFREE(dhd->osh, map->keys, map_keys_sz);
2677 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2678 }
2679 #endif /* IOCTLRESP_USE_CONSTMEM */
2680 
2681 /** Get the pktid free count */
2682 static INLINE uint32
BCMFASTPATH(dhd_pktid_map_avail_cnt)2683 BCMFASTPATH(dhd_pktid_map_avail_cnt)(dhd_pktid_map_handle_t *handle)
2684 {
2685 	dhd_pktid_map_t *map;
2686 	uint32	avail;
2687 	unsigned long flags;
2688 
2689 	ASSERT(handle != NULL);
2690 	map = (dhd_pktid_map_t *)handle;
2691 
2692 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2693 	avail = map->avail;
2694 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2695 
2696 	return avail;
2697 }
2698 
2699 /**
2700  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2701  * yet populated. Invoke the pktid save api to populate the packet parameters
2702  * into the locker. This function is not reentrant, and is the caller's
2703  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2704  * a failure case, implying a depleted pool of pktids.
2705  */
2706 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2707 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2708 	void *pkt, dhd_pkttype_t pkttype)
2709 {
2710 	uint32 nkey;
2711 	dhd_pktid_map_t *map;
2712 	dhd_pktid_item_t *locker;
2713 	unsigned long flags;
2714 
2715 	ASSERT(handle != NULL);
2716 	map = (dhd_pktid_map_t *)handle;
2717 
2718 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2719 
2720 	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2721 		map->failures++;
2722 		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2723 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2724 		return DHD_PKTID_INVALID; /* failed alloc request */
2725 	}
2726 
2727 	ASSERT(map->avail <= map->items);
2728 	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2729 
2730 	if ((map->avail > map->items) || (nkey > map->items)) {
2731 		map->failures++;
2732 		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2733 			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2734 			__FUNCTION__, __LINE__, map->avail, nkey,
2735 			pkttype));
2736 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2737 		return DHD_PKTID_INVALID; /* failed alloc request */
2738 	}
2739 
2740 	locker = &map->lockers[nkey]; /* save packet metadata in locker */
2741 	map->avail--;
2742 	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2743 	locker->len = 0;
2744 	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2745 
2746 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2747 
2748 	ASSERT(nkey != DHD_PKTID_INVALID);
2749 
2750 	return nkey; /* return locker's numbered key */
2751 }
2752 
2753 #ifdef DHD_PKTTS
2754 /*
2755  * dhd_pktid_map_save_metadata - Save metadata information in a locker
2756  * that has a reserved unique numbered key.
2757  */
2758 static INLINE void
dhd_pktid_map_save_metadata(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * mpkt,dmaaddr_t mpkt_pa,uint16 mpkt_len,void * dmah,uint32 nkey)2759 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2760 	dmaaddr_t mpkt_pa,
2761 	uint16	mpkt_len,
2762 	void *dmah,
2763 	uint32 nkey)
2764 {
2765 	dhd_pktid_map_t *map;
2766 	dhd_pktid_item_t *locker;
2767 	unsigned long flags;
2768 
2769 	ASSERT(handle != NULL);
2770 	map = (dhd_pktid_map_t *)handle;
2771 
2772 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2773 
2774 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2775 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u>",
2776 			__FUNCTION__, __LINE__, nkey));
2777 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2778 #ifdef DHD_FW_COREDUMP
2779 		if (dhd->memdump_enabled) {
2780 			/* collect core dump */
2781 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2782 			dhd_bus_mem_dump(dhd);
2783 		}
2784 #else
2785 		ASSERT(0);
2786 #endif /* DHD_FW_COREDUMP */
2787 		return;
2788 	}
2789 
2790 	locker = &map->lockers[nkey];
2791 
2792 	/*
2793 	 * TODO: checking the locker state for BUSY will prevent
2794 	 * us from storing meta data on an already allocated
2795 	 * Locker. But not checking may lead to overwriting
2796 	 * existing data.
2797 	 */
2798 	locker->mpkt = mpkt;
2799 	locker->mpkt_pa = mpkt_pa;
2800 	locker->mpkt_len = mpkt_len;
2801 	locker->dmah = dmah;
2802 
2803 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2804 }
2805 #endif /* DHD_PKTTS */
2806 
2807 /*
2808  * dhd_pktid_map_save - Save a packet's parameters into a locker
2809  * corresponding to a previously reserved unique numbered key.
2810  */
2811 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2812 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2813 	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2814 	dhd_pkttype_t pkttype)
2815 {
2816 	dhd_pktid_map_t *map;
2817 	dhd_pktid_item_t *locker;
2818 	unsigned long flags;
2819 
2820 	ASSERT(handle != NULL);
2821 	map = (dhd_pktid_map_t *)handle;
2822 
2823 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2824 
2825 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2826 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2827 			__FUNCTION__, __LINE__, nkey, pkttype));
2828 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2829 #ifdef DHD_FW_COREDUMP
2830 		if (dhd->memdump_enabled) {
2831 			/* collect core dump */
2832 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2833 			dhd_bus_mem_dump(dhd);
2834 		}
2835 #else
2836 		ASSERT(0);
2837 #endif /* DHD_FW_COREDUMP */
2838 		return;
2839 	}
2840 
2841 	locker = &map->lockers[nkey];
2842 
2843 	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2844 		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2845 
2846 	/* store contents in locker */
2847 	locker->dir = dir;
2848 	locker->pa = pa;
2849 	locker->len = (uint16)len; /* 16bit len */
2850 	locker->dmah = dmah; /* 16bit len */
2851 	locker->secdma = secdma;
2852 	locker->pkttype = pkttype;
2853 	locker->pkt = pkt;
2854 	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2855 #ifdef DHD_MAP_PKTID_LOGGING
2856 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2857 #endif /* DHD_MAP_PKTID_LOGGING */
2858 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2859 }
2860 
2861 /**
2862  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2863  * contents into the corresponding locker. Return the numbered key.
2864  */
2865 static uint32
BCMFASTPATH(dhd_pktid_map_alloc)2866 BCMFASTPATH(dhd_pktid_map_alloc)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2867 	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2868 	dhd_pkttype_t pkttype)
2869 {
2870 	uint32 nkey;
2871 
2872 	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2873 	if (nkey != DHD_PKTID_INVALID) {
2874 		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2875 			len, dir, dmah, secdma, pkttype);
2876 	}
2877 
2878 	return nkey;
2879 }
2880 
2881 #ifdef DHD_PKTTS
2882 static void *
BCMFASTPATH(dhd_pktid_map_retreive_metadata)2883 BCMFASTPATH(dhd_pktid_map_retreive_metadata)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2884 	dmaaddr_t *pmpkt_pa,
2885 	uint32	*pmpkt_len,
2886 	void **pdmah,
2887 	uint32 nkey)
2888 {
2889 	dhd_pktid_map_t *map;
2890 	dhd_pktid_item_t *locker;
2891 	void *mpkt;
2892 	unsigned long flags;
2893 
2894 	ASSERT(handle != NULL);
2895 
2896 	map = (dhd_pktid_map_t *)handle;
2897 
2898 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2899 
2900 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2901 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2902 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>\n",
2903 		           __FUNCTION__, __LINE__, nkey));
2904 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2905 #ifdef DHD_FW_COREDUMP
2906 		if (dhd->memdump_enabled) {
2907 			/* collect core dump */
2908 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2909 			dhd_bus_mem_dump(dhd);
2910 		}
2911 #else
2912 		ASSERT(0);
2913 #endif /* DHD_FW_COREDUMP */
2914 		return NULL;
2915 	}
2916 
2917 	locker = &map->lockers[nkey];
2918 	mpkt = locker->mpkt;
2919 	*pmpkt_pa = locker->mpkt_pa;
2920 	*pmpkt_len = locker->mpkt_len;
2921 	if (pdmah)
2922 		*pdmah = locker->dmah;
2923 	locker->mpkt = NULL;
2924 	locker->mpkt_len = 0;
2925 	locker->dmah = NULL;
2926 
2927 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2928 	return mpkt;
2929 }
2930 #endif /* DHD_PKTTS */
2931 
2932 /**
2933  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2934  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2935  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2936  * value. Only a previously allocated pktid may be freed.
2937  */
2938 static void *
BCMFASTPATH(dhd_pktid_map_free)2939 BCMFASTPATH(dhd_pktid_map_free)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2940 	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2941 	bool rsv_locker)
2942 {
2943 	dhd_pktid_map_t *map;
2944 	dhd_pktid_item_t *locker;
2945 	void * pkt;
2946 	unsigned long long locker_addr;
2947 	unsigned long flags;
2948 
2949 	ASSERT(handle != NULL);
2950 
2951 	map = (dhd_pktid_map_t *)handle;
2952 
2953 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2954 
2955 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2956 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2957 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2958 		           __FUNCTION__, __LINE__, nkey, pkttype));
2959 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2960 #ifdef DHD_FW_COREDUMP
2961 		if (dhd->memdump_enabled) {
2962 			/* collect core dump */
2963 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2964 			dhd_bus_mem_dump(dhd);
2965 		}
2966 #else
2967 		ASSERT(0);
2968 #endif /* DHD_FW_COREDUMP */
2969 		return NULL;
2970 	}
2971 
2972 	locker = &map->lockers[nkey];
2973 
2974 #if defined(DHD_PKTID_AUDIT_MAP)
2975 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2976 #endif /* DHD_PKTID_AUDIT_MAP */
2977 
2978 	/* Debug check for cloned numbered key */
2979 	if (locker->state == LOCKER_IS_FREE) {
2980 		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2981 		           __FUNCTION__, __LINE__, nkey));
2982 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2983 		/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2984 #ifdef DHD_FW_COREDUMP
2985 		if (dhd->memdump_enabled) {
2986 			/* collect core dump */
2987 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2988 			dhd_bus_mem_dump(dhd);
2989 		}
2990 #else
2991 		ASSERT(0);
2992 #endif /* DHD_FW_COREDUMP */
2993 		return NULL;
2994 	}
2995 
2996 	/* Check for the colour of the buffer i.e The buffer posted for TX,
2997 	 * should be freed for TX completion. Similarly the buffer posted for
2998 	 * IOCTL should be freed for IOCT completion etc.
2999 	 */
3000 	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
3001 
3002 		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
3003 			__FUNCTION__, __LINE__, nkey));
3004 #ifdef BCMDMA64OSL
3005 		PHYSADDRTOULONG(locker->pa, locker_addr);
3006 #else
3007 		locker_addr = PHYSADDRLO(locker->pa);
3008 #endif /* BCMDMA64OSL */
3009 		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
3010 			"pkttype <%d> locker->pa <0x%llx> \n",
3011 			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
3012 			pkttype, locker_addr));
3013 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3014 #ifdef DHD_FW_COREDUMP
3015 		if (dhd->memdump_enabled) {
3016 			/* collect core dump */
3017 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
3018 			dhd_bus_mem_dump(dhd);
3019 		}
3020 #else
3021 		ASSERT(0);
3022 #endif /* DHD_FW_COREDUMP */
3023 		return NULL;
3024 	}
3025 
3026 	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
3027 		map->avail++;
3028 		map->keys[map->avail] = nkey; /* make this numbered key available */
3029 		locker->state = LOCKER_IS_FREE; /* open and free Locker */
3030 	} else {
3031 		/* pktid will be reused, but the locker does not have a valid pkt */
3032 		locker->state = LOCKER_IS_RSVD;
3033 	}
3034 
3035 #if defined(DHD_PKTID_AUDIT_MAP)
3036 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
3037 #endif /* DHD_PKTID_AUDIT_MAP */
3038 #ifdef DHD_MAP_PKTID_LOGGING
3039 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
3040 		(uint32)locker->len, pkttype);
3041 #endif /* DHD_MAP_PKTID_LOGGING */
3042 
3043 	*pa = locker->pa; /* return contents of locker */
3044 	*len = (uint32)locker->len;
3045 	*dmah = locker->dmah;
3046 	*secdma = locker->secdma;
3047 
3048 	pkt = locker->pkt;
3049 	locker->pkt = NULL; /* Clear pkt */
3050 	locker->len = 0;
3051 
3052 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3053 
3054 	return pkt;
3055 }
3056 
3057 #else /* ! DHD_PCIE_PKTID */
3058 
3059 #ifndef linux
3060 #error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms"
3061 #endif
3062 
3063 typedef struct pktlist {
3064 	PKT_LIST *tx_pkt_list;		/* list for tx packets */
3065 	PKT_LIST *rx_pkt_list;		/* list for rx packets */
3066 	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
3067 } pktlists_t;
3068 
3069 /*
3070  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
3071  * of a one to one mapping 32bit pktptr and a 32bit pktid.
3072  *
3073  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
3074  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
3075  *   a lock.
3076  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
3077  */
3078 #define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
3079 #define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
3080 
3081 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3082 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3083 	dhd_pkttype_t pkttype);
3084 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3085 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3086 	dhd_pkttype_t pkttype);
3087 
3088 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)3089 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
3090 {
3091 	osl_t *osh = dhd->osh;
3092 	pktlists_t *handle = NULL;
3093 
3094 	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
3095 		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
3096 		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
3097 		goto error_done;
3098 	}
3099 
3100 	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3101 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3102 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3103 		goto error;
3104 	}
3105 
3106 	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3107 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3108 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3109 		goto error;
3110 	}
3111 
3112 	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3113 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3114 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3115 		goto error;
3116 	}
3117 
3118 	PKTLIST_INIT(handle->tx_pkt_list);
3119 	PKTLIST_INIT(handle->rx_pkt_list);
3120 	PKTLIST_INIT(handle->ctrl_pkt_list);
3121 
3122 	return (dhd_pktid_map_handle_t *) handle;
3123 
3124 error:
3125 	if (handle->ctrl_pkt_list) {
3126 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3127 	}
3128 
3129 	if (handle->rx_pkt_list) {
3130 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3131 	}
3132 
3133 	if (handle->tx_pkt_list) {
3134 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3135 	}
3136 
3137 	if (handle) {
3138 		MFREE(osh, handle, sizeof(pktlists_t));
3139 	}
3140 
3141 error_done:
3142 	return (dhd_pktid_map_handle_t *)NULL;
3143 }
3144 
3145 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)3146 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
3147 {
3148 	osl_t *osh = dhd->osh;
3149 
3150 	if (handle->ctrl_pkt_list) {
3151 		PKTLIST_FINI(handle->ctrl_pkt_list);
3152 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3153 	}
3154 
3155 	if (handle->rx_pkt_list) {
3156 		PKTLIST_FINI(handle->rx_pkt_list);
3157 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3158 	}
3159 
3160 	if (handle->tx_pkt_list) {
3161 		PKTLIST_FINI(handle->tx_pkt_list);
3162 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3163 	}
3164 }
3165 
3166 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)3167 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
3168 {
3169 	osl_t *osh = dhd->osh;
3170 	pktlists_t *handle = (pktlists_t *) map;
3171 
3172 	ASSERT(handle != NULL);
3173 	if (handle == (pktlists_t *)NULL) {
3174 		return;
3175 	}
3176 
3177 	dhd_pktid_map_reset(dhd, handle);
3178 
3179 	if (handle) {
3180 		MFREE(osh, handle, sizeof(pktlists_t));
3181 	}
3182 }
3183 
3184 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
3185 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)3186 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3187 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3188 	dhd_pkttype_t pkttype)
3189 {
3190 	pktlists_t *handle = (pktlists_t *) map;
3191 	ASSERT(pktptr32 != NULL);
3192 	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
3193 	DHD_PKT_SET_DMAH(pktptr32, dmah);
3194 	DHD_PKT_SET_PA(pktptr32, pa);
3195 	DHD_PKT_SET_SECDMA(pktptr32, secdma);
3196 
3197 	/* XXX optimize these branch conditionals */
3198 	if (pkttype == PKTTYPE_DATA_TX) {
3199 		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
3200 	} else if (pkttype == PKTTYPE_DATA_RX) {
3201 		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
3202 	} else {
3203 		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
3204 	}
3205 
3206 	return DHD_PKTID32(pktptr32);
3207 }
3208 
3209 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
3210 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)3211 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3212 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3213 	dhd_pkttype_t pkttype)
3214 {
3215 	pktlists_t *handle = (pktlists_t *) map;
3216 	void *pktptr32;
3217 
3218 	ASSERT(pktid32 != 0U);
3219 	pktptr32 = DHD_PKTPTR32(pktid32);
3220 	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
3221 	*dmah = DHD_PKT_GET_DMAH(pktptr32);
3222 	*pa = DHD_PKT_GET_PA(pktptr32);
3223 	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
3224 
3225 	/* XXX optimize these branch conditionals */
3226 	if (pkttype == PKTTYPE_DATA_TX) {
3227 		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
3228 	} else if (pkttype == PKTTYPE_DATA_RX) {
3229 		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
3230 	} else {
3231 		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
3232 	}
3233 
3234 	return pktptr32;
3235 }
3236 
3237 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
3238 
3239 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
3240 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
3241 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3242 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3243 	})
3244 
3245 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
3246 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
3247 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3248 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3249 	})
3250 
3251 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
3252 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
3253 		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
3254 				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
3255 				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
3256 	})
3257 
3258 #define DHD_PKTID_AVAIL(map)  (~0)
3259 
3260 #endif /* ! DHD_PCIE_PKTID */
3261 
3262 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
3263 
3264 /*
3265  * Allocating buffers for common rings.
3266  * also allocating Buffers for hmaptest, Scratch buffer for dma rx offset,
3267  * bus_throughput_measurement and snapshot upload
3268  */
3269 static int
dhd_prot_allocate_bufs(dhd_pub_t * dhd,dhd_prot_t * prot)3270 dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot)
3271 {
3272 
3273 	/* Common Ring Allocations */
3274 
3275 	/* Ring  0: H2D Control Submission */
3276 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
3277 	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
3278 	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
3279 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
3280 			__FUNCTION__));
3281 		goto fail;
3282 	}
3283 
3284 	/* Ring  1: H2D Receive Buffer Post */
3285 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
3286 	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
3287 	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
3288 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
3289 			__FUNCTION__));
3290 		goto fail;
3291 	}
3292 
3293 	/* Ring  2: D2H Control Completion */
3294 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
3295 	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
3296 	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
3297 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
3298 			__FUNCTION__));
3299 		goto fail;
3300 	}
3301 
3302 	/* Ring  3: D2H Transmit Complete */
3303 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
3304 	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
3305 	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
3306 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
3307 			__FUNCTION__));
3308 		goto fail;
3309 
3310 	}
3311 
3312 	/* Ring  4: D2H Receive Complete */
3313 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
3314 	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
3315 	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
3316 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
3317 			__FUNCTION__));
3318 		goto fail;
3319 
3320 	}
3321 
3322 	/*
3323 	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
3324 	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
3325 	 * See dhd_prot_flowrings_pool_attach()
3326 	 */
3327 	/* ioctl response buffer */
3328 	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
3329 		goto fail;
3330 	}
3331 
3332 	/* IOCTL request buffer */
3333 	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
3334 		goto fail;
3335 	}
3336 
3337 	/* Host TS request buffer one buffer for now */
3338 	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
3339 		goto fail;
3340 	}
3341 	prot->hostts_req_buf_inuse = FALSE;
3342 
3343 	/* Scratch buffer for dma rx offset */
3344 #ifdef BCM_HOST_BUF
3345 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
3346 		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
3347 #else
3348 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
3349 
3350 #endif /* BCM_HOST_BUF */
3351 	{
3352 		goto fail;
3353 	}
3354 
3355 #ifdef DHD_HMAPTEST
3356 	/* Allocate buffer for hmaptest  */
3357 	DHD_ERROR(("allocating memory for hmaptest \n"));
3358 	if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) {
3359 
3360 		goto fail;
3361 	} else {
3362 		uint32 scratch_len;
3363 		uint64 scratch_lin, w1_start;
3364 		dmaaddr_t scratch_pa;
3365 
3366 		scratch_pa = prot->hmaptest.mem.pa;
3367 		scratch_len = prot->hmaptest.mem.len;
3368 		scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
3369 			| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
3370 		w1_start  = scratch_lin +  scratch_len;
3371 		DHD_ERROR(("hmap: NOTE Buffer alloc for HMAPTEST Start=0x%0llx len=0x%08x"
3372 			"End=0x%0llx\n", (uint64) scratch_lin, scratch_len, (uint64) w1_start));
3373 	}
3374 #endif /* DHD_HMAPTEST */
3375 
3376 	/* scratch buffer bus throughput measurement */
3377 	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
3378 		goto fail;
3379 	}
3380 
3381 #ifdef SNAPSHOT_UPLOAD
3382 	/* snapshot upload buffer */
3383 	if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) {
3384 		goto fail;
3385 	}
3386 #endif	/* SNAPSHOT_UPLOAD */
3387 
3388 	return BCME_OK;
3389 
3390 fail:
3391 	return BCME_NOMEM;
3392 }
3393 
3394 /**
3395  * The PCIE FD protocol layer is constructed in two phases:
3396  *    Phase 1. dhd_prot_attach()
3397  *    Phase 2. dhd_prot_init()
3398  *
3399  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
3400  * All Common rings are also attached (msgbuf_ring_t objects are allocated
3401  * with DMA-able buffers).
3402  * All dhd_dma_buf_t objects are also allocated here.
3403  *
3404  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
3405  * initialization of objects that requires information advertized by the dongle
3406  * may not be performed here.
3407  * E.g. the number of TxPost flowrings is not know at this point, neither do
3408  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
3409  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
3410  * rings (common + flow).
3411  *
3412  * dhd_prot_init() is invoked after the bus layer has fetched the information
3413  * advertized by the dongle in the pcie_shared_t.
3414  */
3415 int
dhd_prot_attach(dhd_pub_t * dhd)3416 dhd_prot_attach(dhd_pub_t *dhd)
3417 {
3418 	osl_t *osh = dhd->osh;
3419 	dhd_prot_t *prot;
3420 	uint32 trap_buf_len;
3421 
3422 	/* Allocate prot structure */
3423 	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
3424 		sizeof(dhd_prot_t)))) {
3425 		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
3426 		goto fail;
3427 	}
3428 	memset(prot, 0, sizeof(*prot));
3429 
3430 	prot->osh = osh;
3431 	dhd->prot = prot;
3432 
3433 	/* DMAing ring completes supported? FALSE by default  */
3434 	dhd->dma_d2h_ring_upd_support = FALSE;
3435 	dhd->dma_h2d_ring_upd_support = FALSE;
3436 	dhd->dma_ring_upd_overwrite = FALSE;
3437 
3438 	dhd->idma_inited = 0;
3439 	dhd->ifrm_inited = 0;
3440 	dhd->dar_inited = 0;
3441 
3442 	if (dhd_prot_allocate_bufs(dhd, prot) != BCME_OK) {
3443 		goto fail;
3444 	}
3445 
3446 #ifdef DHD_RX_CHAINING
3447 	dhd_rxchain_reset(&prot->rxchain);
3448 #endif
3449 
3450 	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL);
3451 	if (prot->pktid_ctrl_map == NULL) {
3452 		goto fail;
3453 	}
3454 
3455 	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX);
3456 	if (prot->pktid_rx_map == NULL)
3457 		goto fail;
3458 
3459 	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX);
3460 	if (prot->pktid_rx_map == NULL)
3461 		goto fail;
3462 
3463 #ifdef IOCTLRESP_USE_CONSTMEM
3464 	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
3465 		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
3466 	if (prot->pktid_map_handle_ioctl == NULL) {
3467 		goto fail;
3468 	}
3469 #endif /* IOCTLRESP_USE_CONSTMEM */
3470 
3471 #ifdef DHD_MAP_PKTID_LOGGING
3472 	prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3473 	if (prot->pktid_dma_map == NULL) {
3474 		DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
3475 			__FUNCTION__));
3476 	}
3477 
3478 	prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3479 	if (prot->pktid_dma_unmap == NULL) {
3480 		DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
3481 			__FUNCTION__));
3482 	}
3483 #endif /* DHD_MAP_PKTID_LOGGING */
3484 
3485 #ifdef D2H_MINIDUMP
3486 	if (dhd->bus->sih->buscorerev < 71) {
3487 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
3488 	} else {
3489 		/* buscorerev >= 71, supports minimdump of len 96KB */
3490 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN;
3491 	}
3492 #else
3493 	/* FW going to DMA extended trap data,
3494 	 * allocate buffer for the maximum extended trap data.
3495 	 */
3496 	trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
3497 #endif /* D2H_MINIDUMP */
3498 
3499 	/* Initialize trap buffer */
3500 	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3501 		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3502 		goto fail;
3503 	}
3504 
3505 	return BCME_OK;
3506 
3507 fail:
3508 
3509 	if (prot) {
3510 		/* Free up all allocated memories */
3511 		dhd_prot_detach(dhd);
3512 	}
3513 
3514 	return BCME_NOMEM;
3515 } /* dhd_prot_attach */
3516 
3517 static int
dhd_alloc_host_scbs(dhd_pub_t * dhd)3518 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3519 {
3520 	int ret = BCME_OK;
3521 	sh_addr_t base_addr;
3522 	dhd_prot_t *prot = dhd->prot;
3523 	uint32 host_scb_size = 0;
3524 
3525 	if (dhd->hscb_enable) {
3526 		/* read number of bytes to allocate from F/W */
3527 		dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3528 		if (host_scb_size) {
3529 			/* In fw reload scenario the buffer could have been allocated for previous
3530 			 * run. Check the existing buffer if there is one that can accommodate
3531 			 * the new firmware requirement and reuse the buffer is possible.
3532 			 */
3533 			if (prot->host_scb_buf.va) {
3534 				if (prot->host_scb_buf.len >= host_scb_size) {
3535 					prot->host_scb_buf.len = host_scb_size;
3536 				} else {
3537 					dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3538 				}
3539 			}
3540 			/* alloc array of host scbs */
3541 			if (prot->host_scb_buf.va == NULL) {
3542 				ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3543 			}
3544 			/* write host scb address to F/W */
3545 			if (ret == BCME_OK) {
3546 				dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3547 				dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3548 					HOST_SCB_ADDR, 0);
3549 			}
3550 		}
3551 	} else {
3552 		DHD_TRACE(("%s: Host scb not supported in F/W. \n", __FUNCTION__));
3553 	}
3554 
3555 	if (ret != BCME_OK) {
3556 		DHD_ERROR(("%s dhd_alloc_host_scbs, alloc failed: Err Code %d\n",
3557 			__FUNCTION__, ret));
3558 	}
3559 	return ret;
3560 }
3561 
3562 void
dhd_set_host_cap(dhd_pub_t * dhd)3563 dhd_set_host_cap(dhd_pub_t *dhd)
3564 {
3565 	uint32 data = 0;
3566 	dhd_prot_t *prot = dhd->prot;
3567 #ifdef D2H_MINIDUMP
3568 	uint16 host_trap_addr_len;
3569 #endif /* D2H_MINIDUMP */
3570 
3571 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3572 		if (dhd->h2d_phase_supported) {
3573 			data |= HOSTCAP_H2D_VALID_PHASE;
3574 			if (dhd->force_dongletrap_on_bad_h2d_phase)
3575 				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3576 		}
3577 		if (prot->host_ipc_version > prot->device_ipc_version)
3578 			prot->active_ipc_version = prot->device_ipc_version;
3579 		else
3580 			prot->active_ipc_version = prot->host_ipc_version;
3581 
3582 		data |= prot->active_ipc_version;
3583 
3584 		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3585 			DHD_INFO(("Advertise Hostready Capability\n"));
3586 			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3587 		}
3588 #ifdef PCIE_INB_DW
3589 		if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
3590 			DHD_INFO(("Advertise Inband-DW Capability\n"));
3591 			data |= HOSTCAP_DS_INBAND_DW;
3592 			data |= HOSTCAP_DS_NO_OOB_DW;
3593 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
3594 			if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) {
3595 				dhd_init_dongle_ds_lock(dhd->bus);
3596 				dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE);
3597 			}
3598 		} else
3599 #endif /* PCIE_INB_DW */
3600 #ifdef PCIE_OOB
3601 		if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
3602 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
3603 		} else
3604 #endif /* PCIE_OOB */
3605 		{
3606 			/* Disable DS altogether */
3607 			data |= HOSTCAP_DS_NO_OOB_DW;
3608 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3609 		}
3610 
3611 		/* Indicate support for extended trap data */
3612 		data |= HOSTCAP_EXTENDED_TRAP_DATA;
3613 
3614 		/* Indicate support for TX status metadata */
3615 		if (dhd->pcie_txs_metadata_enable != 0)
3616 			data |= HOSTCAP_TXSTATUS_METADATA;
3617 
3618 #ifdef BTLOG
3619 		/* Indicate support for BT logging */
3620 		if (dhd->bt_logging) {
3621 			if (dhd->bt_logging_enabled) {
3622 				data |= HOSTCAP_BT_LOGGING;
3623 				DHD_ERROR(("BT LOGGING  enabled\n"));
3624 			}
3625 			else {
3626 				DHD_ERROR(("BT logging upported in FW, BT LOGGING disabled\n"));
3627 			}
3628 		}
3629 		else {
3630 			DHD_ERROR(("BT LOGGING not enabled in FW !!\n"));
3631 		}
3632 #endif	/* BTLOG */
3633 
3634 		/* Enable fast delete ring in firmware if supported */
3635 		if (dhd->fast_delete_ring_support) {
3636 			data |= HOSTCAP_FAST_DELETE_RING;
3637 		}
3638 
3639 		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3640 			DHD_ERROR(("IDMA inited\n"));
3641 			data |= HOSTCAP_H2D_IDMA;
3642 			dhd->idma_inited = TRUE;
3643 		} else {
3644 			DHD_ERROR(("IDMA not enabled in FW !!\n"));
3645 			dhd->idma_inited = FALSE;
3646 		}
3647 
3648 		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3649 			DHD_ERROR(("IFRM Inited\n"));
3650 			data |= HOSTCAP_H2D_IFRM;
3651 			dhd->ifrm_inited = TRUE;
3652 			dhd->dma_h2d_ring_upd_support = FALSE;
3653 			dhd_prot_dma_indx_free(dhd);
3654 		} else {
3655 			DHD_ERROR(("IFRM not enabled in FW !!\n"));
3656 			dhd->ifrm_inited = FALSE;
3657 		}
3658 
3659 		if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3660 			DHD_ERROR(("DAR doorbell Use\n"));
3661 			data |= HOSTCAP_H2D_DAR;
3662 			dhd->dar_inited = TRUE;
3663 		} else {
3664 			DHD_ERROR(("DAR not enabled in FW !!\n"));
3665 			dhd->dar_inited = FALSE;
3666 		}
3667 
3668 		/* FW Checks for HOSTCAP_UR_FW_NO_TRAP and Does not TRAP if set
3669 		 * Radar 36403220 JIRA SWWLAN-182145
3670 		 */
3671 		data |= HOSTCAP_UR_FW_NO_TRAP;
3672 
3673 #ifdef SNAPSHOT_UPLOAD
3674 		/* Indicate support for snapshot upload */
3675 		if (dhd->snapshot_upload) {
3676 			data |= HOSTCAP_SNAPSHOT_UPLOAD;
3677 			DHD_ERROR(("ALLOW SNAPSHOT UPLOAD!!\n"));
3678 		}
3679 #endif	/* SNAPSHOT_UPLOAD */
3680 
3681 		if (dhd->hscb_enable) {
3682 			data |= HOSTCAP_HSCB;
3683 		}
3684 
3685 #ifdef EWP_EDL
3686 		if (dhd->dongle_edl_support) {
3687 			data |= HOSTCAP_EDL_RING;
3688 			DHD_ERROR(("Enable EDL host cap\n"));
3689 		} else {
3690 			DHD_ERROR(("DO NOT SET EDL host cap\n"));
3691 		}
3692 #endif /* EWP_EDL */
3693 
3694 #ifdef D2H_MINIDUMP
3695 		if (dhd_bus_is_minidump_enabled(dhd)) {
3696 			data |= HOSTCAP_EXT_TRAP_DBGBUF;
3697 			DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
3698 		}
3699 #endif /* D2H_MINIDUMP */
3700 #ifdef DHD_HP2P
3701 		if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) {
3702 			data |= HOSTCAP_PKT_TIMESTAMP;
3703 			data |= HOSTCAP_PKT_HP2P;
3704 			DHD_ERROR(("Enable HP2P in host cap\n"));
3705 		} else {
3706 			DHD_ERROR(("HP2P not enabled in host cap\n"));
3707 		}
3708 #endif /* DHD_HP2P */
3709 
3710 #ifdef DHD_DB0TS
3711 		if (dhd->db0ts_capable) {
3712 			data |= HOSTCAP_DB0_TIMESTAMP;
3713 			DHD_ERROR(("Enable DB0 TS in host cap\n"));
3714 		} else {
3715 			DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3716 		}
3717 #endif /* DHD_DB0TS */
3718 		if (dhd->extdtxs_in_txcpl) {
3719 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3720 			data |= HOSTCAP_PKT_TXSTATUS;
3721 		}
3722 		else {
3723 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3724 		}
3725 
3726 		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3727 			__FUNCTION__,
3728 			prot->active_ipc_version, prot->host_ipc_version,
3729 			prot->device_ipc_version));
3730 
3731 		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3732 		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3733 			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3734 #ifdef D2H_MINIDUMP
3735 		if (dhd_bus_is_minidump_enabled(dhd)) {
3736 			/* Dongle expects the host_trap_addr_len in terms of words */
3737 			host_trap_addr_len = prot->fw_trap_buf.len / 4;
3738 			dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
3739 				sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
3740 		}
3741 #endif /* D2H_MINIDUMP */
3742 	}
3743 
3744 #ifdef DHD_TIMESYNC
3745 	dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
3746 #endif /* DHD_TIMESYNC */
3747 }
3748 
3749 #ifdef AGG_H2D_DB
dhd_agg_inflight_stats_dump(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)3750 void dhd_agg_inflight_stats_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
3751 {
3752 	uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo;
3753 	uint32 i;
3754 	uint64 total_inflight_histo = 0;
3755 
3756 	bcm_bprintf(strbuf, "inflight: \t count\n");
3757 	for (i = 0; i < DHD_NUM_INFLIGHT_HISTO_ROWS; i++) {
3758 		bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<<i, inflight_histo[i]);
3759 		total_inflight_histo += inflight_histo[i];
3760 	}
3761 	bcm_bprintf(strbuf, "total_inflight_histo: %llu\n", total_inflight_histo);
3762 }
3763 
dhd_agg_inflights_stats_update(dhd_pub_t * dhd,uint32 inflight)3764 void dhd_agg_inflights_stats_update(dhd_pub_t *dhd, uint32 inflight)
3765 {
3766 	uint64 *bin = dhd->prot->agg_h2d_db_info.inflight_histo;
3767 	uint64 *p;
3768 	uint32 bin_power;
3769 	bin_power = next_larger_power2(inflight);
3770 
3771 	switch (bin_power) {
3772 		case   1: p = bin + 0; break;
3773 		case   2: p = bin + 1; break;
3774 		case   4: p = bin + 2; break;
3775 		case   8: p = bin + 3; break;
3776 		case  16: p = bin + 4; break;
3777 		case  32: p = bin + 5; break;
3778 		case  64: p = bin + 6; break;
3779 		case 128: p = bin + 7; break;
3780 		case 256: p = bin + 8; break;
3781 		case 512: p = bin + 9; break;
3782 		case 1024: p = bin + 10; break;
3783 		case 2048: p = bin + 11; break;
3784 		case 4096: p = bin + 12; break;
3785 		case 8192: p = bin + 13; break;
3786 		default : p = bin + 13; break;
3787 	}
3788 	ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS);
3789 	*p = *p + 1;
3790 	return;
3791 }
3792 
3793 /*
3794  * dhd_msgbuf_agg_h2d_db_timer_fn:
3795  * Timer callback function for ringing h2d DB.
3796  * This is run in isr context (HRTIMER_MODE_REL),
3797  * do not hold any spin_lock_bh().
3798  * Using HRTIMER_MODE_REL_SOFT causing TPUT regressions.
3799  */
3800 enum hrtimer_restart
dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer * timer)3801 dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer *timer)
3802 {
3803 	agg_h2d_db_info_t *agg_db_info;
3804 	dhd_pub_t *dhd;
3805 	dhd_prot_t *prot;
3806 	uint32 db_index;
3807 	uint corerev;
3808 
3809 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
3810 	agg_db_info = container_of(timer, agg_h2d_db_info_t, timer);
3811 	GCC_DIAGNOSTIC_POP();
3812 
3813 	dhd = agg_db_info->dhd;
3814 	prot = dhd->prot;
3815 
3816 	prot->agg_h2d_db_info.timer_db_cnt++;
3817 	if (IDMA_ACTIVE(dhd)) {
3818 		db_index = IDMA_IDX0;
3819 		if (dhd->bus->sih) {
3820 			corerev = dhd->bus->sih->buscorerev;
3821 			if (corerev >= 24) {
3822 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
3823 			}
3824 		}
3825 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
3826 	} else {
3827 		prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC);
3828 	}
3829 
3830 	return HRTIMER_NORESTART;
3831 }
3832 
3833 void
dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t * prot)3834 dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t *prot)
3835 {
3836 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3837 
3838 	/* Queue the timer only when it is not in the queue */
3839 	if (!hrtimer_active(&agg_db_info->timer)) {
3840 		hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC),
3841 				HRTIMER_MODE_REL);
3842 	}
3843 }
3844 
3845 static void
dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t * dhd)3846 dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t *dhd)
3847 {
3848 	dhd_prot_t *prot = dhd->prot;
3849 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3850 
3851 	agg_db_info->dhd = dhd;
3852 	hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3853 	/* The timer function will run from ISR context, ensure no spin_lock_bh are used */
3854 	agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn;
3855 	agg_db_info->init = TRUE;
3856 	agg_db_info->timer_db_cnt = 0;
3857 	agg_db_info->direct_db_cnt = 0;
3858 	agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE);
3859 }
3860 
3861 static void
dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t * dhd)3862 dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t *dhd)
3863 {
3864 	dhd_prot_t *prot = dhd->prot;
3865 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3866 	if (agg_db_info->init) {
3867 		if (agg_db_info->inflight_histo) {
3868 			MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE);
3869 		}
3870 		hrtimer_try_to_cancel(&agg_db_info->timer);
3871 		agg_db_info->init = FALSE;
3872 	}
3873 }
3874 
3875 static void
dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t * dhd)3876 dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t *dhd)
3877 {
3878 	dhd_prot_t *prot = dhd->prot;
3879 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3880 	hrtimer_try_to_cancel(&agg_db_info->timer);
3881 }
3882 #endif /* AGG_H2D_DB */
3883 
3884 void
dhd_prot_clearcounts(dhd_pub_t * dhd)3885 dhd_prot_clearcounts(dhd_pub_t *dhd)
3886 {
3887 	dhd_prot_t *prot = dhd->prot;
3888 #ifdef AGG_H2D_DB
3889 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3890 	if (agg_db_info->inflight_histo) {
3891 		memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE);
3892 	}
3893 	agg_db_info->direct_db_cnt = 0;
3894 	agg_db_info->timer_db_cnt = 0;
3895 #endif /* AGG_H2D_DB */
3896 	prot->txcpl_db_cnt = 0;
3897 	prot->tx_h2d_db_cnt = 0;
3898 }
3899 
3900 /**
3901  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3902  * completed it's initialization of the pcie_shared structure, we may now fetch
3903  * the dongle advertized features and adjust the protocol layer accordingly.
3904  *
3905  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3906  */
3907 int
dhd_prot_init(dhd_pub_t * dhd)3908 dhd_prot_init(dhd_pub_t *dhd)
3909 {
3910 	sh_addr_t base_addr;
3911 	dhd_prot_t *prot = dhd->prot;
3912 	int ret = 0;
3913 	uint32 idmacontrol;
3914 	uint32 waitcount = 0;
3915 	uint16 max_eventbufpost = 0;
3916 
3917 	/**
3918 	 * A user defined value can be assigned to global variable h2d_max_txpost via
3919 	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3920 	 * 2. module parameter h2d_max_txpost
3921 	 * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM,
3922 	 * if user has not defined any buffers by one of the above methods.
3923 	 */
3924 	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3925 	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3926 
3927 #if defined(DHD_HTPUT_TUNABLES)
3928 	prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost;
3929 	DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n",
3930 		__FUNCTION__, __LINE__, prot->h2d_htput_max_txpost));
3931 #endif /* DHD_HTPUT_TUNABLES */
3932 
3933 	/* Read max rx packets supported by dongle */
3934 	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3935 	if (prot->max_rxbufpost == 0) {
3936 		/* This would happen if the dongle firmware is not */
3937 		/* using the latest shared structure template */
3938 		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3939 	}
3940 	DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3941 
3942 	/* Initialize.  bzero() would blow away the dma pointers. */
3943 	max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus);
3944 	prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >=
3945 		H2DRING_CTRL_SUB_MAX_ITEM) ? DHD_FLOWRING_MAX_EVENTBUF_POST : max_eventbufpost;
3946 	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3947 	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3948 #ifdef BTLOG
3949 	prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST;
3950 #endif	/* BTLOG */
3951 	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3952 
3953 	prot->cur_ioctlresp_bufs_posted = 0;
3954 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3955 	prot->data_seq_no = 0;
3956 	prot->ioctl_seq_no = 0;
3957 	prot->rxbufpost = 0;
3958 	prot->tot_rxbufpost = 0;
3959 	prot->tot_rxcpl = 0;
3960 	prot->cur_event_bufs_posted = 0;
3961 	prot->ioctl_state = 0;
3962 	prot->curr_ioctl_cmd = 0;
3963 	prot->cur_ts_bufs_posted = 0;
3964 	prot->infobufpost = 0;
3965 #ifdef BTLOG
3966 	prot->btlogbufpost = 0;
3967 #endif	/* BTLOG */
3968 
3969 	prot->dmaxfer.srcmem.va = NULL;
3970 	prot->dmaxfer.dstmem.va = NULL;
3971 	prot->dmaxfer.in_progress = FALSE;
3972 
3973 #ifdef DHD_HMAPTEST
3974 	prot->hmaptest.in_progress = FALSE;
3975 #endif /* DHD_HMAPTEST */
3976 	prot->metadata_dbg = FALSE;
3977 	prot->rx_metadata_offset = 0;
3978 	prot->tx_metadata_offset = 0;
3979 	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3980 
3981 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3982 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3983 	prot->ioctl_state = 0;
3984 	prot->ioctl_status = 0;
3985 	prot->ioctl_resplen = 0;
3986 	prot->ioctl_received = IOCTL_WAIT;
3987 
3988 	/* Initialize Common MsgBuf Rings */
3989 
3990 	prot->device_ipc_version = dhd->bus->api.fw_rev;
3991 	prot->host_ipc_version = PCIE_SHARED_VERSION;
3992 	prot->no_tx_resource = FALSE;
3993 
3994 	/* Init the host API version */
3995 	dhd_set_host_cap(dhd);
3996 
3997 	/* alloc and configure scb host address for dongle */
3998 	if ((ret = dhd_alloc_host_scbs(dhd))) {
3999 		return ret;
4000 	}
4001 
4002 	/* Register the interrupt function upfront */
4003 	/* remove corerev checks in data path */
4004 	/* do this after host/fw negotiation for DAR */
4005 	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
4006 	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
4007 
4008 	prot->tx_h2d_db_cnt = 0;
4009 #ifdef AGG_H2D_DB
4010 	dhd_msgbuf_agg_h2d_db_timer_init(dhd);
4011 #endif /* AGG_H2D_DB */
4012 
4013 	dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
4014 
4015 	/* If supported by the host, indicate the memory block
4016 	 * for completion writes / submission reads to shared space
4017 	 */
4018 	if (dhd->dma_d2h_ring_upd_support) {
4019 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
4020 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4021 			D2H_DMA_INDX_WR_BUF, 0);
4022 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
4023 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4024 			H2D_DMA_INDX_RD_BUF, 0);
4025 	}
4026 
4027 	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
4028 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
4029 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4030 			H2D_DMA_INDX_WR_BUF, 0);
4031 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
4032 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4033 			D2H_DMA_INDX_RD_BUF, 0);
4034 	}
4035 
4036 	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
4037 	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
4038 	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
4039 
4040 	/* Make it compatibile with pre-rev7 Firmware */
4041 	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
4042 		prot->d2hring_tx_cpln.item_len =
4043 			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
4044 		prot->d2hring_rx_cpln.item_len =
4045 			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
4046 	}
4047 	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
4048 	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
4049 
4050 	dhd_prot_d2h_sync_init(dhd);
4051 
4052 	dhd_prot_h2d_sync_init(dhd);
4053 
4054 #ifdef PCIE_INB_DW
4055 	/* Set the initial DS state */
4056 	if (INBAND_DW_ENAB(dhd->bus)) {
4057 		dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
4058 			DW_DEVICE_DS_ACTIVE);
4059 	}
4060 #endif /* PCIE_INB_DW */
4061 
4062 	/* init the scratch buffer */
4063 	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
4064 	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4065 		D2H_DMA_SCRATCH_BUF, 0);
4066 	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
4067 		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
4068 #ifdef DHD_DMA_INDICES_SEQNUM
4069 	prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO;
4070 #endif /* DHD_DMA_INDICES_SEQNUM */
4071 	/* Signal to the dongle that common ring init is complete */
4072 	if (dhd->hostrdy_after_init)
4073 		dhd_bus_hostready(dhd->bus);
4074 
4075 	/*
4076 	 * If the DMA-able buffers for flowring needs to come from a specific
4077 	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
4078 	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
4079 	 * this contiguous memory region, for each of the flowrings.
4080 	 */
4081 
4082 	/* Pre-allocate pool of msgbuf_ring for flowrings */
4083 	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
4084 		return BCME_ERROR;
4085 	}
4086 
4087 	dhd->ring_attached = TRUE;
4088 
4089 	/* If IFRM is enabled, wait for FW to setup the DMA channel */
4090 	if (IFRM_ENAB(dhd)) {
4091 		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
4092 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4093 			H2D_IFRM_INDX_WR_BUF, 0);
4094 	}
4095 
4096 	/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
4097 	 * Waiting just before configuring doorbell
4098 	 */
4099 #ifdef BCMQT
4100 #define	IDMA_ENABLE_WAIT  100
4101 #else
4102 #define	IDMA_ENABLE_WAIT  10
4103 #endif
4104 	if (IDMA_ACTIVE(dhd)) {
4105 		/* wait for idma_en bit in IDMAcontrol register to be set */
4106 		/* Loop till idma_en is not set */
4107 		uint buscorerev = dhd->bus->sih->buscorerev;
4108 		idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4109 			IDMAControl(buscorerev), 0, 0);
4110 		while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
4111 			(waitcount++ < IDMA_ENABLE_WAIT)) {
4112 
4113 			DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
4114 				waitcount, idmacontrol));
4115 			OSL_DELAY(1000); /* 1ms as its onetime only */
4116 			idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4117 				IDMAControl(buscorerev), 0, 0);
4118 		}
4119 
4120 		if (waitcount < IDMA_ENABLE_WAIT) {
4121 			DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
4122 		} else {
4123 			DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
4124 				waitcount, idmacontrol));
4125 			return BCME_ERROR;
4126 		}
4127 		// add delay to fix bring up issue
4128 		OSL_SLEEP(1);
4129 	}
4130 
4131 	/* Host should configure soft doorbells if needed ... here */
4132 
4133 	/* Post to dongle host configured soft doorbells */
4134 	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
4135 
4136 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4137 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
4138 
4139 	prot->no_retry = FALSE;
4140 	prot->no_aggr = FALSE;
4141 	prot->fixed_rate = FALSE;
4142 
4143 	/*
4144 	 * Note that any communication with the Dongle should be added
4145 	 * below this point. Any other host data structure initialiation that
4146 	 * needs to be done prior to the DPC starts executing should be done
4147 	 * befor this point.
4148 	 * Because once we start sending H2D requests to Dongle, the Dongle
4149 	 * respond immediately. So the DPC context to handle this
4150 	 * D2H response could preempt the context in which dhd_prot_init is running.
4151 	 * We want to ensure that all the Host part of dhd_prot_init is
4152 	 * done before that.
4153 	 */
4154 
4155 	/* See if info rings could be created, info rings should be created
4156 	* only if dongle does not support EDL
4157 	*/
4158 #ifdef EWP_EDL
4159 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
4160 #else
4161 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
4162 #endif /* EWP_EDL */
4163 	{
4164 		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
4165 			/* For now log and proceed, further clean up action maybe necessary
4166 			 * when we have more clarity.
4167 			 */
4168 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4169 				__FUNCTION__, ret));
4170 		}
4171 	}
4172 
4173 #ifdef EWP_EDL
4174 		/* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
4175 		if (dhd->dongle_edl_support) {
4176 			if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
4177 				DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
4178 					__FUNCTION__, ret));
4179 			}
4180 		}
4181 #endif /* EWP_EDL */
4182 
4183 #ifdef BTLOG
4184 	/* create BT log rings */
4185 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) {
4186 		if ((ret = dhd_prot_init_btlog_rings(dhd)) != BCME_OK) {
4187 			/* For now log and proceed, further clean up action maybe necessary
4188 			 * when we have more clarity.
4189 			 */
4190 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4191 				__FUNCTION__, ret));
4192 		}
4193 	}
4194 #endif	/* BTLOG */
4195 
4196 #ifdef DHD_HP2P
4197 	/* create HPP txcmpl/rxcmpl rings */
4198 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
4199 		if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
4200 			/* For now log and proceed, further clean up action maybe necessary
4201 			 * when we have more clarity.
4202 			 */
4203 			DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
4204 				__FUNCTION__, ret));
4205 		}
4206 	}
4207 #endif /* DHD_HP2P */
4208 
4209 #ifdef DHD_LB_RXP
4210 	/* defualt rx flow ctrl thresholds. Can be changed at run time through sysfs */
4211 	dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR);
4212 	dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR);
4213 	atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
4214 #endif /* DHD_LB_RXP */
4215 	return BCME_OK;
4216 } /* dhd_prot_init */
4217 
4218 /**
4219  * dhd_prot_detach - PCIE FD protocol layer destructor.
4220  * Unlink, frees allocated protocol memory (including dhd_prot)
4221  */
dhd_prot_detach(dhd_pub_t * dhd)4222 void dhd_prot_detach(dhd_pub_t *dhd)
4223 {
4224 	dhd_prot_t *prot = dhd->prot;
4225 
4226 	/* Stop the protocol module */
4227 	if (prot) {
4228 		/* For non-android platforms, devreset will not be called,
4229 		 * so call prot_reset here. It is harmless if called twice.
4230 		 */
4231 		dhd_prot_reset(dhd);
4232 
4233 		/* free up all DMA-able buffers allocated during prot attach/init */
4234 
4235 		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
4236 #ifdef DHD_HMAPTEST
4237 		dhd_dma_buf_free(dhd, &prot->hmaptest.mem);
4238 #endif /* DHD_HMAPTEST */
4239 		dhd_dma_buf_free(dhd, &prot->retbuf);
4240 		dhd_dma_buf_free(dhd, &prot->ioctbuf);
4241 		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
4242 		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
4243 		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
4244 		dhd_dma_buf_free(dhd, &prot->host_scb_buf);
4245 #ifdef SNAPSHOT_UPLOAD
4246 		dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf);
4247 #endif	/* SNAPSHOT_UPLOAD */
4248 
4249 		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4250 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
4251 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
4252 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
4253 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
4254 
4255 		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
4256 
4257 		/* Common MsgBuf Rings */
4258 		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
4259 		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
4260 		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
4261 		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
4262 		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
4263 
4264 		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
4265 		dhd_prot_flowrings_pool_detach(dhd);
4266 
4267 		/* detach info rings */
4268 		dhd_prot_detach_info_rings(dhd);
4269 
4270 #ifdef BTLOG
4271 		/* detach BT log rings */
4272 		dhd_prot_detach_btlog_rings(dhd);
4273 #endif	/* BTLOG */
4274 
4275 #ifdef EWP_EDL
4276 		dhd_prot_detach_edl_rings(dhd);
4277 #endif
4278 #ifdef DHD_HP2P
4279 		/* detach HPP rings */
4280 		dhd_prot_detach_hp2p_rings(dhd);
4281 #endif /* DHD_HP2P */
4282 
4283 		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
4284 		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
4285 		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
4286 		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
4287 		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
4288 		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
4289 		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
4290 		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
4291 		 */
4292 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
4293 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
4294 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
4295 #ifdef IOCTLRESP_USE_CONSTMEM
4296 		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4297 #endif
4298 #ifdef DHD_MAP_PKTID_LOGGING
4299 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
4300 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
4301 #endif /* DHD_MAP_PKTID_LOGGING */
4302 #ifdef DHD_DMA_INDICES_SEQNUM
4303 		if (prot->h2d_dma_indx_rd_copy_buf) {
4304 			MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf,
4305 				prot->h2d_dma_indx_rd_copy_bufsz);
4306 		}
4307 		if (prot->d2h_dma_indx_wr_copy_buf) {
4308 			MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf,
4309 				prot->d2h_dma_indx_wr_copy_bufsz);
4310 		}
4311 #endif /* DHD_DMA_INDICES_SEQNUM */
4312 		DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
4313 
4314 		dhd->prot = NULL;
4315 	}
4316 } /* dhd_prot_detach */
4317 
4318 /**
4319  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
4320  * This may be invoked to soft reboot the dongle, without having to
4321  * detach and attach the entire protocol layer.
4322  *
4323  * After dhd_prot_reset(), dhd_prot_init() may be invoked
4324  * without going througha dhd_prot_attach() phase.
4325  */
4326 void
dhd_prot_reset(dhd_pub_t * dhd)4327 dhd_prot_reset(dhd_pub_t *dhd)
4328 {
4329 	struct dhd_prot *prot = dhd->prot;
4330 
4331 	DHD_TRACE(("%s\n", __FUNCTION__));
4332 
4333 	if (prot == NULL) {
4334 		return;
4335 	}
4336 
4337 	dhd->ring_attached = FALSE;
4338 
4339 	dhd_prot_flowrings_pool_reset(dhd);
4340 
4341 	/* Reset Common MsgBuf Rings */
4342 	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
4343 	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
4344 	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
4345 	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
4346 	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
4347 
4348 	/* Reset info rings */
4349 	if (prot->h2dring_info_subn) {
4350 		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
4351 	}
4352 
4353 	if (prot->d2hring_info_cpln) {
4354 		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
4355 	}
4356 
4357 #ifdef EWP_EDL
4358 	if (prot->d2hring_edl) {
4359 		dhd_prot_ring_reset(dhd, prot->d2hring_edl);
4360 	}
4361 #endif /* EWP_EDL */
4362 
4363 	/* Reset all DMA-able buffers allocated during prot attach */
4364 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
4365 #ifdef DHD_HMAPTEST
4366 	dhd_dma_buf_reset(dhd, &prot->hmaptest.mem);
4367 #endif /* DHD_HMAPTEST */
4368 	dhd_dma_buf_reset(dhd, &prot->retbuf);
4369 	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
4370 	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
4371 	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
4372 	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
4373 	dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
4374 #ifdef SNAPSHOT_UPLOAD
4375 	dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf);
4376 #endif /* SNAPSHOT_UPLOAD */
4377 
4378 	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
4379 
4380 	/* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4381 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
4382 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
4383 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
4384 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
4385 
4386 #ifdef DHD_DMA_INDICES_SEQNUM
4387 		if (prot->d2h_dma_indx_wr_copy_buf) {
4388 			dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf,
4389 				prot->h2d_dma_indx_rd_copy_bufsz);
4390 			dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf,
4391 				prot->d2h_dma_indx_wr_copy_bufsz);
4392 		}
4393 #endif /* DHD_DMA_INDICES_SEQNUM */
4394 
4395 	/* XXX: dmaxfer src and dst? */
4396 
4397 	prot->rx_metadata_offset = 0;
4398 	prot->tx_metadata_offset = 0;
4399 
4400 	prot->rxbufpost = 0;
4401 	prot->cur_event_bufs_posted = 0;
4402 	prot->cur_ioctlresp_bufs_posted = 0;
4403 
4404 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
4405 	prot->data_seq_no = 0;
4406 	prot->ioctl_seq_no = 0;
4407 	prot->ioctl_state = 0;
4408 	prot->curr_ioctl_cmd = 0;
4409 	prot->ioctl_received = IOCTL_WAIT;
4410 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
4411 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
4412 	prot->txcpl_db_cnt = 0;
4413 
4414 	/* dhd_flow_rings_init is located at dhd_bus_start,
4415 	 * so when stopping bus, flowrings shall be deleted
4416 	 */
4417 	if (dhd->flow_rings_inited) {
4418 		dhd_flow_rings_deinit(dhd);
4419 	}
4420 
4421 #ifdef BTLOG
4422 	/* Reset BTlog rings */
4423 	if (prot->h2dring_btlog_subn) {
4424 		dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn);
4425 	}
4426 
4427 	if (prot->d2hring_btlog_cpln) {
4428 		dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln);
4429 	}
4430 #endif	/* BTLOG */
4431 #ifdef DHD_HP2P
4432 	if (prot->d2hring_hp2p_txcpl) {
4433 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
4434 	}
4435 	if (prot->d2hring_hp2p_rxcpl) {
4436 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
4437 	}
4438 #endif /* DHD_HP2P */
4439 
4440 	/* Reset PKTID map */
4441 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
4442 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
4443 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
4444 #ifdef IOCTLRESP_USE_CONSTMEM
4445 	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4446 #endif /* IOCTLRESP_USE_CONSTMEM */
4447 #ifdef DMAMAP_STATS
4448 	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
4449 	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
4450 #ifndef IOCTLRESP_USE_CONSTMEM
4451 	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
4452 #endif /* IOCTLRESP_USE_CONSTMEM */
4453 	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
4454 	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
4455 	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
4456 #endif /* DMAMAP_STATS */
4457 
4458 #ifdef AGG_H2D_DB
4459 	dhd_msgbuf_agg_h2d_db_timer_reset(dhd);
4460 #endif /* AGG_H2D_DB */
4461 
4462 } /* dhd_prot_reset */
4463 
4464 #if defined(DHD_LB_RXP)
4465 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
4466 #else /* !DHD_LB_RXP */
4467 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
4468 #endif /* !DHD_LB_RXP */
4469 
4470 #if defined(DHD_LB)
4471 /* DHD load balancing: deferral of work to another online CPU */
4472 /* DHD_LB_RXP dispatchers, in dhd_linux.c */
4473 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
4474 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
4475 extern unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
4476 
4477 #if defined(DHD_LB_RXP)
4478 /**
4479  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
4480  * to other CPU cores
4481  */
4482 static INLINE void
dhd_lb_dispatch_rx_process(dhd_pub_t * dhdp)4483 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
4484 {
4485 	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
4486 }
4487 #endif /* DHD_LB_RXP */
4488 #endif /* DHD_LB */
4489 
4490 void
dhd_prot_rx_dataoffset(dhd_pub_t * dhd,uint32 rx_offset)4491 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
4492 {
4493 	dhd_prot_t *prot = dhd->prot;
4494 	prot->rx_dataoffset = rx_offset;
4495 }
4496 
4497 static int
dhd_check_create_info_rings(dhd_pub_t * dhd)4498 dhd_check_create_info_rings(dhd_pub_t *dhd)
4499 {
4500 	dhd_prot_t *prot = dhd->prot;
4501 	int ret = BCME_ERROR;
4502 	uint16 ringid;
4503 
4504 #ifdef BTLOG
4505 	if (dhd->submit_count_WAR) {
4506 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4507 	} else
4508 #endif	/* BTLOG */
4509 	{
4510 		/* dongle may increase max_submission_rings so keep
4511 		 * ringid at end of dynamic rings
4512 		 */
4513 		ringid = dhd->bus->max_tx_flowrings +
4514 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4515 			BCMPCIE_H2D_COMMON_MSGRINGS;
4516 	}
4517 
4518 	if (prot->d2hring_info_cpln) {
4519 		/* for d2hring re-entry case, clear inited flag */
4520 		prot->d2hring_info_cpln->inited = FALSE;
4521 	}
4522 
4523 	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
4524 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4525 	}
4526 
4527 	if (prot->h2dring_info_subn == NULL) {
4528 		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4529 
4530 		if (prot->h2dring_info_subn == NULL) {
4531 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4532 				__FUNCTION__));
4533 			return BCME_NOMEM;
4534 		}
4535 
4536 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4537 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
4538 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4539 			ringid);
4540 		if (ret != BCME_OK) {
4541 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4542 				__FUNCTION__));
4543 			goto err;
4544 		}
4545 	}
4546 
4547 	if (prot->d2hring_info_cpln == NULL) {
4548 		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4549 
4550 		if (prot->d2hring_info_cpln == NULL) {
4551 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4552 				__FUNCTION__));
4553 			return BCME_NOMEM;
4554 		}
4555 
4556 		/* create the debug info completion ring next to debug info submit ring
4557 		* ringid = id next to debug info submit ring
4558 		*/
4559 		ringid = ringid + 1;
4560 
4561 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4562 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
4563 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4564 			ringid);
4565 		if (ret != BCME_OK) {
4566 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4567 				__FUNCTION__));
4568 			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
4569 			goto err;
4570 		}
4571 	}
4572 
4573 	return ret;
4574 err:
4575 	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4576 
4577 	if (prot->d2hring_info_cpln) {
4578 		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4579 	}
4580 	return ret;
4581 } /* dhd_check_create_info_rings */
4582 
4583 int
dhd_prot_init_info_rings(dhd_pub_t * dhd)4584 dhd_prot_init_info_rings(dhd_pub_t *dhd)
4585 {
4586 	dhd_prot_t *prot = dhd->prot;
4587 	int ret = BCME_OK;
4588 
4589 	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
4590 		DHD_ERROR(("%s: info rings aren't created! \n",
4591 			__FUNCTION__));
4592 		return ret;
4593 	}
4594 
4595 	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
4596 		DHD_INFO(("Info completion ring was created!\n"));
4597 		return ret;
4598 	}
4599 
4600 	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
4601 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
4602 		BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
4603 	if (ret != BCME_OK)
4604 		return ret;
4605 
4606 	prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
4607 	prot->h2dring_info_subn->current_phase = 0;
4608 	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4609 	prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4610 
4611 	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
4612 	prot->h2dring_info_subn->n_completion_ids = 1;
4613 	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
4614 
4615 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
4616 		BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
4617 
4618 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4619 	 * so can not cleanup if one ring was created while the other failed
4620 	 */
4621 	return ret;
4622 } /* dhd_prot_init_info_rings */
4623 
4624 static void
dhd_prot_detach_info_rings(dhd_pub_t * dhd)4625 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
4626 {
4627 	if (dhd->prot->h2dring_info_subn) {
4628 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
4629 		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4630 	}
4631 	if (dhd->prot->d2hring_info_cpln) {
4632 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
4633 		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4634 	}
4635 }
4636 
4637 #ifdef DHD_HP2P
4638 static int
dhd_check_create_hp2p_rings(dhd_pub_t * dhd)4639 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
4640 {
4641 	dhd_prot_t *prot = dhd->prot;
4642 	int ret = BCME_ERROR;
4643 	uint16 ringid;
4644 
4645 	/* Last 2 dynamic ring indices are used by hp2p rings */
4646 	ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
4647 
4648 	if (prot->d2hring_hp2p_txcpl == NULL) {
4649 		prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4650 
4651 		if (prot->d2hring_hp2p_txcpl == NULL) {
4652 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
4653 				__FUNCTION__));
4654 			return BCME_NOMEM;
4655 		}
4656 
4657 		DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
4658 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
4659 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
4660 			ringid);
4661 		if (ret != BCME_OK) {
4662 			DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
4663 				__FUNCTION__));
4664 			goto err2;
4665 		}
4666 	} else {
4667 		/* for re-entry case, clear inited flag */
4668 		prot->d2hring_hp2p_txcpl->inited = FALSE;
4669 	}
4670 	if (prot->d2hring_hp2p_rxcpl == NULL) {
4671 		prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4672 
4673 		if (prot->d2hring_hp2p_rxcpl == NULL) {
4674 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
4675 				__FUNCTION__));
4676 			return BCME_NOMEM;
4677 		}
4678 
4679 		/* create the hp2p rx completion ring next to hp2p tx compl ring
4680 		* ringid = id next to hp2p tx compl ring
4681 		*/
4682 		ringid = ringid + 1;
4683 
4684 		DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
4685 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
4686 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
4687 			ringid);
4688 		if (ret != BCME_OK) {
4689 			DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
4690 				__FUNCTION__));
4691 			goto err1;
4692 		}
4693 	} else {
4694 		/* for re-entry case, clear inited flag */
4695 		prot->d2hring_hp2p_rxcpl->inited = FALSE;
4696 	}
4697 
4698 	if (prot->d2hring_hp2p_rxcpl != NULL &&
4699 		prot->d2hring_hp2p_txcpl != NULL) {
4700 		/* dhd_prot_init rentry after a dhd_prot_reset */
4701 		ret = BCME_OK;
4702 	}
4703 
4704 	return ret;
4705 err1:
4706 	MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4707 	prot->d2hring_hp2p_rxcpl = NULL;
4708 
4709 err2:
4710 	MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4711 	prot->d2hring_hp2p_txcpl = NULL;
4712 	return ret;
4713 } /* dhd_check_create_hp2p_rings */
4714 
4715 int
dhd_prot_init_hp2p_rings(dhd_pub_t * dhd)4716 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4717 {
4718 	dhd_prot_t *prot = dhd->prot;
4719 	int ret = BCME_OK;
4720 
4721 	dhd->hp2p_ring_more = TRUE;
4722 	/* default multiflow not allowed */
4723 	dhd->hp2p_mf_enable = FALSE;
4724 
4725 	if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4726 		DHD_ERROR(("%s: hp2p rings aren't created! \n",
4727 			__FUNCTION__));
4728 		return ret;
4729 	}
4730 
4731 	if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4732 		DHD_INFO(("hp2p tx completion ring was created!\n"));
4733 		return ret;
4734 	}
4735 
4736 	DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4737 		prot->d2hring_hp2p_txcpl->idx));
4738 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4739 		BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4740 	if (ret != BCME_OK)
4741 		return ret;
4742 
4743 	prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4744 	prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4745 
4746 	if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4747 		DHD_INFO(("hp2p rx completion ring was created!\n"));
4748 		return ret;
4749 	}
4750 
4751 	DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4752 		prot->d2hring_hp2p_rxcpl->idx));
4753 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4754 		BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4755 	if (ret != BCME_OK)
4756 		return ret;
4757 
4758 	prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4759 	prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4760 
4761 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4762 	 * so can not cleanup if one ring was created while the other failed
4763 	 */
4764 	return BCME_OK;
4765 } /* dhd_prot_init_hp2p_rings */
4766 
4767 static void
dhd_prot_detach_hp2p_rings(dhd_pub_t * dhd)4768 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4769 {
4770 	if (dhd->prot->d2hring_hp2p_txcpl) {
4771 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4772 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4773 		dhd->prot->d2hring_hp2p_txcpl = NULL;
4774 	}
4775 	if (dhd->prot->d2hring_hp2p_rxcpl) {
4776 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4777 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4778 		dhd->prot->d2hring_hp2p_rxcpl = NULL;
4779 	}
4780 }
4781 #endif /* DHD_HP2P */
4782 
4783 #ifdef BTLOG
4784 static int
dhd_check_create_btlog_rings(dhd_pub_t * dhd)4785 dhd_check_create_btlog_rings(dhd_pub_t *dhd)
4786 {
4787 	dhd_prot_t *prot = dhd->prot;
4788 	int ret = BCME_ERROR;
4789 	uint16 ringid;
4790 
4791 	if (dhd->submit_count_WAR) {
4792 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2;
4793 	} else {
4794 		/* ringid is one less than ringids assign by dhd_check_create_info_rings */
4795 		ringid = dhd->bus->max_tx_flowrings +
4796 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4797 			BCMPCIE_H2D_COMMON_MSGRINGS - 1;
4798 	}
4799 
4800 	if (prot->d2hring_btlog_cpln) {
4801 		/* for re-entry case, clear inited flag */
4802 		prot->d2hring_btlog_cpln->inited = FALSE;
4803 	}
4804 
4805 	if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) {
4806 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4807 	}
4808 
4809 	if (prot->h2dring_btlog_subn == NULL) {
4810 		prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4811 
4812 		if (prot->h2dring_btlog_subn == NULL) {
4813 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4814 				__FUNCTION__));
4815 			return BCME_NOMEM;
4816 		}
4817 
4818 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4819 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog",
4820 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4821 			ringid);
4822 		if (ret != BCME_OK) {
4823 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4824 				__FUNCTION__));
4825 			goto err;
4826 		}
4827 	}
4828 
4829 	if (prot->d2hring_btlog_cpln == NULL) {
4830 		prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4831 
4832 		if (prot->d2hring_btlog_cpln == NULL) {
4833 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4834 				__FUNCTION__));
4835 			return BCME_NOMEM;
4836 		}
4837 
4838 		if (dhd->submit_count_WAR) {
4839 			ringid = ringid + 1;
4840 		} else {
4841 			/* advance ringid past BTLOG submit ring and INFO submit and cmplt rings */
4842 			ringid = ringid + 3;
4843 		}
4844 
4845 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4846 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog",
4847 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4848 			ringid);
4849 		if (ret != BCME_OK) {
4850 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4851 				__FUNCTION__));
4852 			dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn);
4853 			goto err;
4854 		}
4855 	}
4856 
4857 	return ret;
4858 err:
4859 	MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4860 
4861 	if (prot->d2hring_btlog_cpln) {
4862 		MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4863 	}
4864 	return ret;
4865 } /* dhd_check_create_btlog_rings */
4866 
4867 int
dhd_prot_init_btlog_rings(dhd_pub_t * dhd)4868 dhd_prot_init_btlog_rings(dhd_pub_t *dhd)
4869 {
4870 	dhd_prot_t *prot = dhd->prot;
4871 	int ret = BCME_OK;
4872 
4873 	if ((ret = dhd_check_create_btlog_rings(dhd)) != BCME_OK) {
4874 		DHD_ERROR(("%s: btlog rings aren't created! \n",
4875 			__FUNCTION__));
4876 		return ret;
4877 	}
4878 
4879 	if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) {
4880 		DHD_INFO(("Info completion ring was created!\n"));
4881 		return ret;
4882 	}
4883 
4884 	DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx));
4885 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln,
4886 		BCMPCIE_D2H_RING_TYPE_BTLOG_CPL, DHD_D2H_BTLOGRING_REQ_PKTID);
4887 	if (ret != BCME_OK)
4888 		return ret;
4889 
4890 	prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL;
4891 	prot->h2dring_btlog_subn->current_phase = 0;
4892 	prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4893 	prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4894 
4895 	DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx));
4896 	prot->h2dring_btlog_subn->n_completion_ids = 1;
4897 	prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx;
4898 
4899 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn,
4900 		BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT, DHD_H2D_BTLOGRING_REQ_PKTID);
4901 
4902 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4903 	 * so can not cleanup if one ring was created while the other failed
4904 	 */
4905 	return ret;
4906 } /* dhd_prot_init_btlog_rings */
4907 
4908 static void
dhd_prot_detach_btlog_rings(dhd_pub_t * dhd)4909 dhd_prot_detach_btlog_rings(dhd_pub_t *dhd)
4910 {
4911 	if (dhd->prot->h2dring_btlog_subn) {
4912 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn);
4913 		MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4914 	}
4915 	if (dhd->prot->d2hring_btlog_cpln) {
4916 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln);
4917 		MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4918 	}
4919 }
4920 #endif	/* BTLOG */
4921 
4922 #ifdef EWP_EDL
4923 static int
dhd_check_create_edl_rings(dhd_pub_t * dhd)4924 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4925 {
4926 	dhd_prot_t *prot = dhd->prot;
4927 	int ret = BCME_ERROR;
4928 	uint16 ringid;
4929 
4930 #ifdef BTLOG
4931 	if (dhd->submit_count_WAR) {
4932 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4933 	} else
4934 #endif	/* BTLOG */
4935 	{
4936 		/* dongle may increase max_submission_rings so keep
4937 		 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4938 		 */
4939 		ringid = dhd->bus->max_tx_flowrings +
4940 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4941 			BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4942 	}
4943 
4944 	if (prot->d2hring_edl) {
4945 		prot->d2hring_edl->inited = FALSE;
4946 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4947 	}
4948 
4949 	if (prot->d2hring_edl == NULL) {
4950 		prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4951 
4952 		if (prot->d2hring_edl == NULL) {
4953 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4954 				__FUNCTION__));
4955 			return BCME_NOMEM;
4956 		}
4957 
4958 		DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4959 			ringid));
4960 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4961 			D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4962 			ringid);
4963 		if (ret != BCME_OK) {
4964 			DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4965 				__FUNCTION__));
4966 			goto err;
4967 		}
4968 	}
4969 
4970 	return ret;
4971 err:
4972 	MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4973 	prot->d2hring_edl = NULL;
4974 
4975 	return ret;
4976 } /* dhd_check_create_btlog_rings */
4977 
4978 int
dhd_prot_init_edl_rings(dhd_pub_t * dhd)4979 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4980 {
4981 	dhd_prot_t *prot = dhd->prot;
4982 	int ret = BCME_ERROR;
4983 
4984 	if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4985 		DHD_ERROR(("%s: EDL rings aren't created! \n",
4986 			__FUNCTION__));
4987 		return ret;
4988 	}
4989 
4990 	if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4991 		DHD_INFO(("EDL completion ring was created!\n"));
4992 		return ret;
4993 	}
4994 
4995 	DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4996 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4997 		BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4998 	if (ret != BCME_OK)
4999 		return ret;
5000 
5001 	prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
5002 	prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5003 
5004 	return BCME_OK;
5005 } /* dhd_prot_init_btlog_rings */
5006 
5007 static void
dhd_prot_detach_edl_rings(dhd_pub_t * dhd)5008 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
5009 {
5010 	if (dhd->prot->d2hring_edl) {
5011 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
5012 		MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
5013 		dhd->prot->d2hring_edl = NULL;
5014 	}
5015 }
5016 #endif	/* EWP_EDL */
5017 
5018 /**
5019  * Initialize protocol: sync w/dongle state.
5020  * Sets dongle media info (iswl, drv_version, mac address).
5021  */
dhd_sync_with_dongle(dhd_pub_t * dhd)5022 int dhd_sync_with_dongle(dhd_pub_t *dhd)
5023 {
5024 	int ret = 0;
5025 	uint len = 0;
5026 	wlc_rev_info_t revinfo;
5027 	char buf[128];
5028 	dhd_prot_t *prot = dhd->prot;
5029 
5030 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5031 
5032 	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
5033 
5034 	/* Post ts buffer after shim layer is attached */
5035 	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
5036 
5037 	/* query for 'wlc_ver' to get version info from firmware */
5038 	/* memsetting to zero */
5039 	bzero(buf, sizeof(buf));
5040 	len = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf));
5041 	if (len == 0) {
5042 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5043 		ret = BCME_ERROR;
5044 		goto done;
5045 	}
5046 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5047 	if (ret < 0) {
5048 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
5049 	} else {
5050 		dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major;
5051 		dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
5052 	}
5053 
5054 	DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor));
5055 #ifndef OEM_ANDROID
5056 	/* Get the device MAC address */
5057 	bzero(buf, sizeof(buf));
5058 	strlcpy(buf, "cur_etheraddr", sizeof(buf));
5059 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5060 	if (ret < 0) {
5061 		DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
5062 		goto done;
5063 	}
5064 	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
5065 	if (dhd_msg_level & DHD_INFO_VAL) {
5066 		bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
5067 	}
5068 #endif /* OEM_ANDROID */
5069 
5070 #ifdef DHD_FW_COREDUMP
5071 	/* Check the memdump capability */
5072 	dhd_get_memdump_info(dhd);
5073 #endif /* DHD_FW_COREDUMP */
5074 #ifdef BCMASSERT_LOG
5075 	dhd_get_assert_info(dhd);
5076 #endif /* BCMASSERT_LOG */
5077 
5078 	/* Get the device rev info */
5079 	memset(&revinfo, 0, sizeof(revinfo));
5080 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
5081 	if (ret < 0) {
5082 		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
5083 		goto done;
5084 	}
5085 	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
5086 		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
5087 
5088 	/* Get the RxBuf post size */
5089 	/* Use default value in case of failure */
5090 	prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5091 	memset(buf, 0, sizeof(buf));
5092 	len = bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
5093 	if (len == 0) {
5094 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5095 	} else {
5096 		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5097 		if (ret < 0) {
5098 			DHD_ERROR(("%s: GET RxBuf post FAILED, use default %d\n",
5099 				__FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5100 		} else {
5101 			if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz),
5102 					buf, sizeof(uint16)) != BCME_OK) {
5103 				DHD_ERROR(("%s: rxbufpost_sz memcpy failed\n", __FUNCTION__));
5104 			}
5105 
5106 			if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
5107 				DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
5108 					__FUNCTION__, prot->rxbufpost_sz,
5109 					DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5110 					prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5111 			} else {
5112 				DHD_ERROR(("%s: RxBuf Post : %d\n",
5113 					__FUNCTION__, prot->rxbufpost_sz));
5114 			}
5115 		}
5116 	}
5117 
5118 	/* Post buffers for packet reception */
5119 	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5120 
5121 	DHD_SSSR_DUMP_INIT(dhd);
5122 
5123 	dhd_process_cid_mac(dhd, TRUE);
5124 	ret = dhd_preinit_ioctls(dhd);
5125 	dhd_process_cid_mac(dhd, FALSE);
5126 #if defined(DHD_SDTC_ETB_DUMP)
5127 	dhd_sdtc_etb_init(dhd);
5128 #endif /* DHD_SDTC_ETB_DUMP */
5129 #if defined(DHD_H2D_LOG_TIME_SYNC)
5130 #ifdef DHD_HP2P
5131 	if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
5132 #else
5133 	if (FW_SUPPORTED(dhd, h2dlogts))
5134 #endif // endif
5135 	{
5136 #ifdef DHD_HP2P
5137 		if (dhd->hp2p_enable) {
5138 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
5139 		} else {
5140 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5141 		}
5142 #else
5143 		dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5144 #endif /* DHD_HP2P */
5145 		dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
5146 		/* This is during initialization. */
5147 		dhd_h2d_log_time_sync(dhd);
5148 	} else {
5149 		dhd->dhd_rte_time_sync_ms = 0;
5150 	}
5151 #endif /* DHD_H2D_LOG_TIME_SYNC */
5152 
5153 #ifdef HOST_SFH_LLC
5154 	if (FW_SUPPORTED(dhd, host_sfhllc)) {
5155 		dhd->host_sfhllc_supported = TRUE;
5156 	} else {
5157 		dhd->host_sfhllc_supported = FALSE;
5158 	}
5159 #endif /* HOST_SFH_LLC */
5160 
5161 	/* Always assumes wl for now */
5162 	dhd->iswl = TRUE;
5163 done:
5164 	return ret;
5165 } /* dhd_sync_with_dongle */
5166 
5167 #define DHD_DBG_SHOW_METADATA	0
5168 
5169 #if DHD_DBG_SHOW_METADATA
5170 static void
BCMFASTPATH(dhd_prot_print_metadata)5171 BCMFASTPATH(dhd_prot_print_metadata)(dhd_pub_t *dhd, void *ptr, int len)
5172 {
5173 	uint8 tlv_t;
5174 	uint8 tlv_l;
5175 	uint8 *tlv_v = (uint8 *)ptr;
5176 
5177 	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
5178 		return;
5179 
5180 	len -= BCMPCIE_D2H_METADATA_HDRLEN;
5181 	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
5182 
5183 	while (len > TLV_HDR_LEN) {
5184 		tlv_t = tlv_v[TLV_TAG_OFF];
5185 		tlv_l = tlv_v[TLV_LEN_OFF];
5186 
5187 		len -= TLV_HDR_LEN;
5188 		tlv_v += TLV_HDR_LEN;
5189 		if (len < tlv_l)
5190 			break;
5191 		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
5192 			break;
5193 
5194 		switch (tlv_t) {
5195 		case WLFC_CTL_TYPE_TXSTATUS: {
5196 			uint32 txs;
5197 			memcpy(&txs, tlv_v, sizeof(uint32));
5198 			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
5199 				printf("METADATA TX_STATUS: %08x\n", txs);
5200 			} else {
5201 				wl_txstatus_additional_info_t tx_add_info;
5202 				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
5203 					sizeof(wl_txstatus_additional_info_t));
5204 				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
5205 					" rate = %08x tries = %d - %d\n", txs,
5206 					tx_add_info.seq, tx_add_info.entry_ts,
5207 					tx_add_info.enq_ts, tx_add_info.last_ts,
5208 					tx_add_info.rspec, tx_add_info.rts_cnt,
5209 					tx_add_info.tx_cnt);
5210 			}
5211 			} break;
5212 
5213 		case WLFC_CTL_TYPE_RSSI: {
5214 			if (tlv_l == 1)
5215 				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
5216 			else
5217 				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
5218 					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
5219 					(int8)(*tlv_v), *(tlv_v + 1));
5220 			} break;
5221 
5222 		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
5223 			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
5224 			break;
5225 
5226 		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
5227 			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
5228 			break;
5229 
5230 		case WLFC_CTL_TYPE_RX_STAMP: {
5231 			struct {
5232 				uint32 rspec;
5233 				uint32 bus_time;
5234 				uint32 wlan_time;
5235 			} rx_tmstamp;
5236 			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
5237 			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
5238 				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
5239 			} break;
5240 
5241 		case WLFC_CTL_TYPE_TRANS_ID:
5242 			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
5243 			break;
5244 
5245 		case WLFC_CTL_TYPE_COMP_TXSTATUS:
5246 			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
5247 			break;
5248 
5249 		default:
5250 			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
5251 			break;
5252 		}
5253 
5254 		len -= tlv_l;
5255 		tlv_v += tlv_l;
5256 	}
5257 }
5258 #endif /* DHD_DBG_SHOW_METADATA */
5259 
5260 static INLINE void
BCMFASTPATH(dhd_prot_packet_free)5261 BCMFASTPATH(dhd_prot_packet_free)(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
5262 {
5263 	if (pkt) {
5264 		if (pkttype == PKTTYPE_IOCTL_RX ||
5265 			pkttype == PKTTYPE_EVENT_RX ||
5266 			pkttype == PKTTYPE_INFO_RX ||
5267 			pkttype == PKTTYPE_TSBUF_RX) {
5268 #ifdef DHD_USE_STATIC_CTRLBUF
5269 			PKTFREE_STATIC(dhd->osh, pkt, send);
5270 #else
5271 			PKTFREE(dhd->osh, pkt, send);
5272 #endif /* DHD_USE_STATIC_CTRLBUF */
5273 		} else {
5274 			PKTFREE(dhd->osh, pkt, send);
5275 		}
5276 	}
5277 }
5278 
5279 /**
5280  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
5281  * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
5282  * to ensure thread safety, so no need to hold any locks for this function
5283  */
5284 static INLINE void *
BCMFASTPATH(dhd_prot_packet_get)5285 BCMFASTPATH(dhd_prot_packet_get)(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
5286 {
5287 	void *PKTBUF;
5288 	dmaaddr_t pa;
5289 	uint32 len;
5290 	void *dmah;
5291 	void *secdma;
5292 
5293 #ifdef DHD_PCIE_PKTID
5294 	if (free_pktid) {
5295 		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
5296 			pktid, pa, len, dmah, secdma, pkttype);
5297 	} else {
5298 		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
5299 			pktid, pa, len, dmah, secdma, pkttype);
5300 	}
5301 #else
5302 	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
5303 		len, dmah, secdma, pkttype);
5304 #endif /* DHD_PCIE_PKTID */
5305 	if (PKTBUF) {
5306 		DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5307 #ifdef DMAMAP_STATS
5308 		switch (pkttype) {
5309 #ifndef IOCTLRESP_USE_CONSTMEM
5310 			case PKTTYPE_IOCTL_RX:
5311 				dhd->dma_stats.ioctl_rx--;
5312 				dhd->dma_stats.ioctl_rx_sz -= len;
5313 				break;
5314 #endif /* IOCTLRESP_USE_CONSTMEM */
5315 			case PKTTYPE_EVENT_RX:
5316 				dhd->dma_stats.event_rx--;
5317 				dhd->dma_stats.event_rx_sz -= len;
5318 				break;
5319 			case PKTTYPE_INFO_RX:
5320 				dhd->dma_stats.info_rx--;
5321 				dhd->dma_stats.info_rx_sz -= len;
5322 				break;
5323 			case PKTTYPE_TSBUF_RX:
5324 				dhd->dma_stats.tsbuf_rx--;
5325 				dhd->dma_stats.tsbuf_rx_sz -= len;
5326 				break;
5327 		}
5328 #endif /* DMAMAP_STATS */
5329 	}
5330 
5331 	return PKTBUF;
5332 }
5333 
5334 #ifdef IOCTLRESP_USE_CONSTMEM
5335 static INLINE void
BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)5336 BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
5337 {
5338 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5339 	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
5340 		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
5341 
5342 	return;
5343 }
5344 #endif
5345 
5346 #ifdef PCIE_INB_DW
5347 static int
dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t * bus)5348 dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
5349 {
5350 	unsigned long flags = 0;
5351 
5352 	if (INBAND_DW_ENAB(bus)) {
5353 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5354 		bus->host_active_cnt++;
5355 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5356 		if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
5357 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5358 			bus->host_active_cnt--;
5359 			dhd_bus_inb_ack_pending_ds_req(bus);
5360 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5361 			return BCME_ERROR;
5362 		}
5363 	}
5364 
5365 	return BCME_OK;
5366 }
5367 
5368 static void
dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t * bus)5369 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
5370 {
5371 	unsigned long flags = 0;
5372 	if (INBAND_DW_ENAB(bus)) {
5373 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5374 		bus->host_active_cnt--;
5375 		dhd_bus_inb_ack_pending_ds_req(bus);
5376 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5377 	}
5378 }
5379 #endif /* PCIE_INB_DW */
5380 
5381 static void
BCMFASTPATH(dhd_msgbuf_rxbuf_post)5382 BCMFASTPATH(dhd_msgbuf_rxbuf_post)(dhd_pub_t *dhd, bool use_rsv_pktid)
5383 {
5384 	dhd_prot_t *prot = dhd->prot;
5385 	int16 fillbufs;
5386 	int retcount = 0;
5387 
5388 	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5389 	while (fillbufs >= RX_BUF_BURST) {
5390 		/* Post in a burst of 32 buffers at a time */
5391 		fillbufs = MIN(fillbufs, RX_BUF_BURST);
5392 
5393 		/* Post buffers */
5394 		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
5395 
5396 		if (retcount > 0) {
5397 			prot->rxbufpost += (uint16)retcount;
5398 			/* how many more to post */
5399 			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5400 		} else {
5401 			/* Make sure we don't run loop any further */
5402 			fillbufs = 0;
5403 		}
5404 	}
5405 }
5406 
5407 /** Post 'count' no of rx buffers to dongle */
5408 static int
BCMFASTPATH(dhd_prot_rxbuf_post)5409 BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
5410 {
5411 	void *p, **pktbuf;
5412 	uint8 *rxbuf_post_tmp;
5413 	host_rxbuf_post_t *rxbuf_post;
5414 	void *msg_start;
5415 	dmaaddr_t pa, *pktbuf_pa;
5416 	uint32 *pktlen;
5417 	uint16 i = 0, alloced = 0;
5418 	unsigned long flags;
5419 	uint32 pktid;
5420 	dhd_prot_t *prot = dhd->prot;
5421 	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
5422 	void *lcl_buf;
5423 	uint16 lcl_buf_size;
5424 #ifdef BCM_ROUTER_DHD
5425 	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM;
5426 #else
5427 	uint16 pktsz = prot->rxbufpost_sz;
5428 #endif /* BCM_ROUTER_DHD */
5429 
5430 #ifdef PCIE_INB_DW
5431 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5432 		return BCME_ERROR;
5433 #endif /* PCIE_INB_DW */
5434 	/* allocate a local buffer to store pkt buffer va, pa and length */
5435 	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
5436 		RX_BUF_BURST;
5437 	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
5438 	if (!lcl_buf) {
5439 		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
5440 #ifdef PCIE_INB_DW
5441 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5442 #endif
5443 		return 0;
5444 	}
5445 	pktbuf = lcl_buf;
5446 	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
5447 	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
5448 
5449 	for (i = 0; i < count; i++) {
5450 		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
5451 			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
5452 			dhd->rx_pktgetfail++;
5453 			break;
5454 		}
5455 
5456 #ifdef BCM_ROUTER_DHD
5457 		/* Reserve extra headroom for router builds */
5458 		PKTPULL(dhd->osh, p, BCMEXTRAHDROOM);
5459 #endif /* BCM_ROUTER_DHD */
5460 		pktlen[i] = PKTLEN(dhd->osh, p);
5461 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
5462 
5463 		if (PHYSADDRISZERO(pa)) {
5464 			PKTFREE(dhd->osh, p, FALSE);
5465 			DHD_ERROR(("Invalid phyaddr 0\n"));
5466 			ASSERT(0);
5467 			break;
5468 		}
5469 #ifdef DMAMAP_STATS
5470 		dhd->dma_stats.rxdata++;
5471 		dhd->dma_stats.rxdata_sz += pktlen[i];
5472 #endif /* DMAMAP_STATS */
5473 
5474 		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
5475 		pktlen[i] = PKTLEN(dhd->osh, p);
5476 		pktbuf[i] = p;
5477 		pktbuf_pa[i] = pa;
5478 	}
5479 
5480 	/* only post what we have */
5481 	count = i;
5482 
5483 	/* grab the ring lock to allocate pktid and post on ring */
5484 	DHD_RING_LOCK(ring->ring_lock, flags);
5485 
5486 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5487 	msg_start = (void *)
5488 		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
5489 	if (msg_start == NULL) {
5490 		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5491 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5492 		goto cleanup;
5493 	}
5494 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5495 	ASSERT(alloced > 0);
5496 
5497 	rxbuf_post_tmp = (uint8*)msg_start;
5498 
5499 	for (i = 0; i < alloced; i++) {
5500 		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
5501 		p = pktbuf[i];
5502 		pa = pktbuf_pa[i];
5503 
5504 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
5505 			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
5506 #if defined(DHD_PCIE_PKTID)
5507 		if (pktid == DHD_PKTID_INVALID) {
5508 			break;
5509 		}
5510 #endif /* DHD_PCIE_PKTID */
5511 
5512 #ifdef DHD_HMAPTEST
5513 	if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) {
5514 		/* scratchbuf area */
5515 		dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va
5516 			+ dhd->prot->hmaptest.offset;
5517 
5518 		dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset;
5519 		if ((dhd->prot->hmap_rx_buf_va +  dhd->prot->hmap_rx_buf_len) >
5520 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
5521 			DHD_ERROR(("hmaptest: ERROR Rxpost outside HMAPTEST buffer\n"));
5522 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
5523 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
5524 			dhd->prot->hmaptest.in_progress = FALSE;
5525 		} else {
5526 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va,
5527 				dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0);
5528 
5529 			dhd->prot->hmap_rx_buf_pa = pa;
5530 			dhd->prot->hmaptest_rx_pktid = pktid;
5531 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED;
5532 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf pktid=0x%08x\n",
5533 				pktid));
5534 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf va=0x%p pa.lo=0x%08x\n",
5535 				dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa)));
5536 			DHD_ERROR(("hmaptest: d11write rxpost orig pktdata va=0x%p pa.lo=0x%08x\n",
5537 				PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i])));
5538 		}
5539 	}
5540 #endif /* DHD_HMAPTEST */
5541 		dhd->prot->tot_rxbufpost++;
5542 		/* Common msg header */
5543 		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
5544 		rxbuf_post->cmn_hdr.if_id = 0;
5545 		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5546 		rxbuf_post->cmn_hdr.flags = ring->current_phase;
5547 		ring->seqnum++;
5548 		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
5549 		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5550 		rxbuf_post->data_buf_addr.low_addr =
5551 			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
5552 
5553 		if (prot->rx_metadata_offset) {
5554 			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
5555 			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5556 			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5557 		} else {
5558 			rxbuf_post->metadata_buf_len = 0;
5559 			rxbuf_post->metadata_buf_addr.high_addr = 0;
5560 			rxbuf_post->metadata_buf_addr.low_addr  = 0;
5561 		}
5562 
5563 #ifdef DHD_PKTID_AUDIT_RING
5564 		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
5565 #endif /* DHD_PKTID_AUDIT_RING */
5566 
5567 		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5568 
5569 		/* Move rxbuf_post_tmp to next item */
5570 		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
5571 #ifdef DHD_LBUF_AUDIT
5572 		PKTAUDIT(dhd->osh, p);
5573 #endif
5574 	}
5575 
5576 	if (i < alloced) {
5577 		if (ring->wr < (alloced - i))
5578 			ring->wr = ring->max_items - (alloced - i);
5579 		else
5580 			ring->wr -= (alloced - i);
5581 
5582 		if (ring->wr == 0) {
5583 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5584 				ring->current_phase = ring->current_phase ?
5585 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5586 		}
5587 
5588 		alloced = i;
5589 	}
5590 
5591 	/* update ring's WR index and ring doorbell to dongle */
5592 	if (alloced > 0) {
5593 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5594 	}
5595 
5596 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5597 
5598 cleanup:
5599 	for (i = alloced; i < count; i++) {
5600 		p = pktbuf[i];
5601 		pa = pktbuf_pa[i];
5602 
5603 		DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
5604 		PKTFREE(dhd->osh, p, FALSE);
5605 	}
5606 
5607 	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
5608 #ifdef PCIE_INB_DW
5609 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5610 #endif
5611 
5612 	return alloced;
5613 } /* dhd_prot_rxbufpost */
5614 
5615 #if !defined(BCM_ROUTER_DHD)
5616 static int
dhd_prot_infobufpost(dhd_pub_t * dhd,msgbuf_ring_t * ring)5617 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5618 {
5619 	unsigned long flags;
5620 	uint32 pktid;
5621 	dhd_prot_t *prot = dhd->prot;
5622 	uint16 alloced = 0;
5623 	uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
5624 	uint32 pktlen;
5625 	info_buf_post_msg_t *infobuf_post;
5626 	uint8 *infobuf_post_tmp;
5627 	void *p;
5628 	void* msg_start;
5629 	uint8 i = 0;
5630 	dmaaddr_t pa;
5631 	int16 count = 0;
5632 
5633 	if (ring == NULL)
5634 		return 0;
5635 
5636 	if (ring->inited != TRUE)
5637 		return 0;
5638 	if (ring == dhd->prot->h2dring_info_subn) {
5639 		if (prot->max_infobufpost == 0)
5640 			return 0;
5641 
5642 		count = prot->max_infobufpost - prot->infobufpost;
5643 	}
5644 #ifdef BTLOG
5645 	else if (ring == dhd->prot->h2dring_btlog_subn) {
5646 		if (prot->max_btlogbufpost == 0)
5647 			return 0;
5648 
5649 		pktsz = DHD_BTLOG_RX_BUFPOST_PKTSZ;
5650 		count = prot->max_btlogbufpost - prot->btlogbufpost;
5651 	}
5652 #endif	/* BTLOG */
5653 	else {
5654 		DHD_ERROR(("Unknown ring\n"));
5655 		return 0;
5656 	}
5657 
5658 	if (count <= 0) {
5659 		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
5660 			__FUNCTION__));
5661 		return 0;
5662 	}
5663 
5664 #ifdef PCIE_INB_DW
5665 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5666 		return BCME_ERROR;
5667 #endif /* PCIE_INB_DW */
5668 
5669 	/* grab the ring lock to allocate pktid and post on ring */
5670 	DHD_RING_LOCK(ring->ring_lock, flags);
5671 
5672 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5673 	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
5674 
5675 	if (msg_start == NULL) {
5676 		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5677 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5678 #ifdef PCIE_INB_DW
5679 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5680 #endif
5681 		return -1;
5682 	}
5683 
5684 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5685 	ASSERT(alloced > 0);
5686 
5687 	infobuf_post_tmp = (uint8*) msg_start;
5688 
5689 	/* loop through each allocated message in the host ring */
5690 	for (i = 0; i < alloced; i++) {
5691 		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
5692 		/* Create a rx buffer */
5693 #ifdef DHD_USE_STATIC_CTRLBUF
5694 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5695 #else
5696 		p = PKTGET(dhd->osh, pktsz, FALSE);
5697 #endif /* DHD_USE_STATIC_CTRLBUF */
5698 		if (p == NULL) {
5699 			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
5700 			dhd->rx_pktgetfail++;
5701 			break;
5702 		}
5703 		pktlen = PKTLEN(dhd->osh, p);
5704 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5705 		if (PHYSADDRISZERO(pa)) {
5706 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5707 #ifdef DHD_USE_STATIC_CTRLBUF
5708 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5709 #else
5710 			PKTFREE(dhd->osh, p, FALSE);
5711 #endif /* DHD_USE_STATIC_CTRLBUF */
5712 			DHD_ERROR(("Invalid phyaddr 0\n"));
5713 			ASSERT(0);
5714 			break;
5715 		}
5716 #ifdef DMAMAP_STATS
5717 		dhd->dma_stats.info_rx++;
5718 		dhd->dma_stats.info_rx_sz += pktlen;
5719 #endif /* DMAMAP_STATS */
5720 		pktlen = PKTLEN(dhd->osh, p);
5721 
5722 		/* Common msg header */
5723 		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
5724 		infobuf_post->cmn_hdr.if_id = 0;
5725 		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5726 		infobuf_post->cmn_hdr.flags = ring->current_phase;
5727 		ring->seqnum++;
5728 
5729 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
5730 			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
5731 
5732 #if defined(DHD_PCIE_PKTID)
5733 		if (pktid == DHD_PKTID_INVALID) {
5734 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
5735 
5736 #ifdef DHD_USE_STATIC_CTRLBUF
5737 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5738 #else
5739 			PKTFREE(dhd->osh, p, FALSE);
5740 #endif /* DHD_USE_STATIC_CTRLBUF */
5741 			DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5742 			break;
5743 		}
5744 #endif /* DHD_PCIE_PKTID */
5745 
5746 		infobuf_post->host_buf_len = htol16((uint16)pktlen);
5747 		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5748 		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5749 
5750 #ifdef DHD_PKTID_AUDIT_RING
5751 		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
5752 #endif /* DHD_PKTID_AUDIT_RING */
5753 
5754 		DHD_MSGBUF_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
5755 			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
5756 			infobuf_post->host_buf_addr.high_addr));
5757 
5758 		infobuf_post->cmn_hdr.request_id = htol32(pktid);
5759 		/* Move rxbuf_post_tmp to next item */
5760 		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
5761 #ifdef DHD_LBUF_AUDIT
5762 		PKTAUDIT(dhd->osh, p);
5763 #endif
5764 	}
5765 
5766 	if (i < alloced) {
5767 		if (ring->wr < (alloced - i))
5768 			ring->wr = ring->max_items - (alloced - i);
5769 		else
5770 			ring->wr -= (alloced - i);
5771 
5772 		alloced = i;
5773 		if (alloced && ring->wr == 0) {
5774 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5775 			ring->current_phase = ring->current_phase ?
5776 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5777 		}
5778 	}
5779 
5780 	/* Update the write pointer in TCM & ring bell */
5781 	if (alloced > 0) {
5782 		if (ring == dhd->prot->h2dring_info_subn) {
5783 			prot->infobufpost += alloced;
5784 		}
5785 #ifdef BTLOG
5786 		if (ring == dhd->prot->h2dring_btlog_subn) {
5787 			prot->btlogbufpost += alloced;
5788 		}
5789 #endif	/* BTLOG */
5790 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5791 	}
5792 
5793 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5794 
5795 #ifdef PCIE_INB_DW
5796 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5797 #endif
5798 	return alloced;
5799 } /* dhd_prot_infobufpost */
5800 #endif /* !BCM_ROUTER_DHD */
5801 
5802 #ifdef IOCTLRESP_USE_CONSTMEM
5803 static int
alloc_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)5804 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5805 {
5806 	int err;
5807 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5808 
5809 	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
5810 		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
5811 		ASSERT(0);
5812 		return BCME_NOMEM;
5813 	}
5814 
5815 	return BCME_OK;
5816 }
5817 
5818 static void
free_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)5819 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5820 {
5821 	/* retbuf (declared on stack) not fully populated ...  */
5822 	if (retbuf->va) {
5823 		uint32 dma_pad;
5824 		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
5825 		retbuf->len = IOCT_RETBUF_SIZE;
5826 		retbuf->_alloced = retbuf->len + dma_pad;
5827 	}
5828 
5829 	dhd_dma_buf_free(dhd, retbuf);
5830 	return;
5831 }
5832 #endif /* IOCTLRESP_USE_CONSTMEM */
5833 
5834 static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t * dhd,uint8 msg_type)5835 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
5836 {
5837 	void *p;
5838 	uint16 pktsz;
5839 	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
5840 	dmaaddr_t pa;
5841 	uint32 pktlen;
5842 	dhd_prot_t *prot = dhd->prot;
5843 	uint16 alloced = 0;
5844 	unsigned long flags;
5845 	dhd_dma_buf_t retbuf;
5846 	void *dmah = NULL;
5847 	uint32 pktid;
5848 	void *map_handle;
5849 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5850 	bool non_ioctl_resp_buf = 0;
5851 	dhd_pkttype_t buf_type;
5852 
5853 	if (dhd->busstate == DHD_BUS_DOWN) {
5854 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5855 		return -1;
5856 	}
5857 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
5858 
5859 	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
5860 		buf_type = PKTTYPE_IOCTL_RX;
5861 	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
5862 		buf_type = PKTTYPE_EVENT_RX;
5863 	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
5864 		buf_type = PKTTYPE_TSBUF_RX;
5865 	else {
5866 		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
5867 		/* XXX: may be add an assert */
5868 		return -1;
5869 	}
5870 #ifdef PCIE_INB_DW
5871 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
5872 		 return BCME_ERROR;
5873 	}
5874 #endif /* PCIE_INB_DW */
5875 
5876 	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
5877 		non_ioctl_resp_buf = TRUE;
5878 	else
5879 		non_ioctl_resp_buf = FALSE;
5880 
5881 	if (non_ioctl_resp_buf) {
5882 		/* Allocate packet for not ioctl resp buffer post */
5883 		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5884 	} else {
5885 		/* Allocate packet for ctrl/ioctl buffer post */
5886 		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
5887 	}
5888 
5889 #ifdef IOCTLRESP_USE_CONSTMEM
5890 	if (!non_ioctl_resp_buf) {
5891 		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
5892 			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
5893 			goto fail;
5894 		}
5895 		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
5896 		p = retbuf.va;
5897 		pktlen = retbuf.len;
5898 		pa = retbuf.pa;
5899 		dmah = retbuf.dmah;
5900 	} else
5901 #endif /* IOCTLRESP_USE_CONSTMEM */
5902 	{
5903 #ifdef DHD_USE_STATIC_CTRLBUF
5904 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5905 #else
5906 		p = PKTGET(dhd->osh, pktsz, FALSE);
5907 #endif /* DHD_USE_STATIC_CTRLBUF */
5908 		if (p == NULL) {
5909 			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
5910 				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
5911 				"EVENT" : "IOCTL RESP"));
5912 			dhd->rx_pktgetfail++;
5913 			goto fail;
5914 		}
5915 
5916 		pktlen = PKTLEN(dhd->osh, p);
5917 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5918 
5919 		if (PHYSADDRISZERO(pa)) {
5920 			DHD_ERROR(("Invalid physaddr 0\n"));
5921 			ASSERT(0);
5922 			goto free_pkt_return;
5923 		}
5924 
5925 #ifdef DMAMAP_STATS
5926 		switch (buf_type) {
5927 #ifndef IOCTLRESP_USE_CONSTMEM
5928 			case PKTTYPE_IOCTL_RX:
5929 				dhd->dma_stats.ioctl_rx++;
5930 				dhd->dma_stats.ioctl_rx_sz += pktlen;
5931 				break;
5932 #endif /* !IOCTLRESP_USE_CONSTMEM */
5933 			case PKTTYPE_EVENT_RX:
5934 				dhd->dma_stats.event_rx++;
5935 				dhd->dma_stats.event_rx_sz += pktlen;
5936 				break;
5937 			case PKTTYPE_TSBUF_RX:
5938 				dhd->dma_stats.tsbuf_rx++;
5939 				dhd->dma_stats.tsbuf_rx_sz += pktlen;
5940 				break;
5941 			default:
5942 				break;
5943 		}
5944 #endif /* DMAMAP_STATS */
5945 
5946 	}
5947 
5948 	/* grab the ring lock to allocate pktid and post on ring */
5949 	DHD_RING_LOCK(ring->ring_lock, flags);
5950 
5951 	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5952 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5953 
5954 	if (rxbuf_post == NULL) {
5955 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5956 		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5957 			__FUNCTION__, __LINE__));
5958 
5959 #ifdef IOCTLRESP_USE_CONSTMEM
5960 		if (non_ioctl_resp_buf)
5961 #endif /* IOCTLRESP_USE_CONSTMEM */
5962 		{
5963 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5964 		}
5965 		goto free_pkt_return;
5966 	}
5967 
5968 	/* CMN msg header */
5969 	rxbuf_post->cmn_hdr.msg_type = msg_type;
5970 
5971 #ifdef IOCTLRESP_USE_CONSTMEM
5972 	if (!non_ioctl_resp_buf) {
5973 		map_handle = dhd->prot->pktid_map_handle_ioctl;
5974 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5975 			ring->dma_buf.secdma, buf_type);
5976 	} else
5977 #endif /* IOCTLRESP_USE_CONSTMEM */
5978 	{
5979 		map_handle = dhd->prot->pktid_ctrl_map;
5980 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5981 			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5982 			buf_type);
5983 	}
5984 
5985 	if (pktid == DHD_PKTID_INVALID) {
5986 		if (ring->wr == 0) {
5987 			ring->wr = ring->max_items - 1;
5988 		} else {
5989 			ring->wr--;
5990 			if (ring->wr == 0) {
5991 				ring->current_phase = ring->current_phase ? 0 :
5992 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5993 			}
5994 		}
5995 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5996 		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5997 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5998 		goto free_pkt_return;
5999 	}
6000 
6001 #ifdef DHD_PKTID_AUDIT_RING
6002 	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
6003 #endif /* DHD_PKTID_AUDIT_RING */
6004 
6005 	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
6006 	rxbuf_post->cmn_hdr.if_id = 0;
6007 	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
6008 	ring->seqnum++;
6009 	rxbuf_post->cmn_hdr.flags = ring->current_phase;
6010 
6011 #if defined(DHD_PCIE_PKTID)
6012 	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
6013 		if (ring->wr == 0) {
6014 			ring->wr = ring->max_items - 1;
6015 		} else {
6016 			if (ring->wr == 0) {
6017 				ring->current_phase = ring->current_phase ? 0 :
6018 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6019 			}
6020 		}
6021 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6022 #ifdef IOCTLRESP_USE_CONSTMEM
6023 		if (non_ioctl_resp_buf)
6024 #endif /* IOCTLRESP_USE_CONSTMEM */
6025 		{
6026 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
6027 		}
6028 		goto free_pkt_return;
6029 	}
6030 #endif /* DHD_PCIE_PKTID */
6031 
6032 #ifndef IOCTLRESP_USE_CONSTMEM
6033 	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
6034 #else
6035 	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
6036 #endif /* IOCTLRESP_USE_CONSTMEM */
6037 	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6038 	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
6039 #ifdef DHD_LBUF_AUDIT
6040 	if (non_ioctl_resp_buf)
6041 		PKTAUDIT(dhd->osh, p);
6042 #endif
6043 	/* update ring's WR index and ring doorbell to dongle */
6044 	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
6045 
6046 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6047 
6048 #ifdef PCIE_INB_DW
6049 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6050 #endif
6051 	return 1;
6052 
6053 free_pkt_return:
6054 	if (!non_ioctl_resp_buf) {
6055 #ifdef IOCTLRESP_USE_CONSTMEM
6056 		free_ioctl_return_buffer(dhd, &retbuf);
6057 #else
6058 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6059 #endif /* IOCTLRESP_USE_CONSTMEM */
6060 	} else {
6061 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6062 	}
6063 
6064 fail:
6065 #ifdef PCIE_INB_DW
6066 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6067 #endif
6068 	return -1;
6069 } /* dhd_prot_rxbufpost_ctrl */
6070 
6071 static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t * dhd,uint8 msg_type,uint32 max_to_post)6072 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
6073 {
6074 	uint32 i = 0;
6075 	int32 ret_val;
6076 
6077 	DHD_MSGBUF_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
6078 
6079 	if (dhd->busstate == DHD_BUS_DOWN) {
6080 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
6081 		return 0;
6082 	}
6083 
6084 	while (i < max_to_post) {
6085 		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
6086 		if (ret_val < 0)
6087 			break;
6088 		i++;
6089 	}
6090 	DHD_MSGBUF_INFO(("posted %d buffers of type %d\n", i, msg_type));
6091 	return (uint16)i;
6092 }
6093 
6094 static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t * dhd)6095 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
6096 {
6097 	dhd_prot_t *prot = dhd->prot;
6098 	int max_to_post;
6099 
6100 	DHD_MSGBUF_INFO(("ioctl resp buf post\n"));
6101 	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
6102 	if (max_to_post <= 0) {
6103 		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
6104 			__FUNCTION__));
6105 		return;
6106 	}
6107 	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6108 		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
6109 }
6110 
6111 static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t * dhd)6112 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
6113 {
6114 	dhd_prot_t *prot = dhd->prot;
6115 	int max_to_post;
6116 
6117 	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
6118 	if (max_to_post <= 0) {
6119 		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
6120 			__FUNCTION__));
6121 		return;
6122 	}
6123 	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6124 		MSG_TYPE_EVENT_BUF_POST, max_to_post);
6125 }
6126 
6127 static int
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t * dhd)6128 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
6129 {
6130 #ifdef DHD_TIMESYNC
6131 	dhd_prot_t *prot = dhd->prot;
6132 	int max_to_post;
6133 
6134 	if (prot->active_ipc_version < 7) {
6135 		DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
6136 			prot->active_ipc_version));
6137 		return 0;
6138 	}
6139 
6140 	max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
6141 	if (max_to_post <= 0) {
6142 		DHD_INFO(("%s: Cannot post more than max ts buffers\n",
6143 			__FUNCTION__));
6144 		return 0;
6145 	}
6146 
6147 	prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6148 		MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
6149 #endif /* DHD_TIMESYNC */
6150 	return 0;
6151 }
6152 
6153 bool
BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)6154 BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)(dhd_pub_t *dhd, uint bound)
6155 {
6156 	dhd_prot_t *prot = dhd->prot;
6157 	bool more = TRUE;
6158 	uint n = 0;
6159 	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
6160 	unsigned long flags;
6161 
6162 	if (ring == NULL)
6163 		return FALSE;
6164 	if (ring->inited != TRUE)
6165 		return FALSE;
6166 
6167 	/* Process all the messages - DTOH direction */
6168 	while (!dhd_is_device_removed(dhd)) {
6169 		uint8 *msg_addr;
6170 		uint32 msg_len;
6171 
6172 		if (dhd->hang_was_sent) {
6173 			more = FALSE;
6174 			break;
6175 		}
6176 
6177 		if (dhd->smmu_fault_occurred) {
6178 			more = FALSE;
6179 			break;
6180 		}
6181 
6182 		DHD_RING_LOCK(ring->ring_lock, flags);
6183 		/* Get the message from ring */
6184 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6185 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6186 		if (msg_addr == NULL) {
6187 			more = FALSE;
6188 			break;
6189 		}
6190 
6191 		/* Prefetch data to populate the cache */
6192 		OSL_PREFETCH(msg_addr);
6193 
6194 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6195 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6196 				__FUNCTION__, msg_len));
6197 		}
6198 
6199 		/* Update read pointer */
6200 		dhd_prot_upd_read_idx(dhd, ring);
6201 
6202 		/* After batch processing, check RX bound */
6203 		n += msg_len / ring->item_len;
6204 		if (n >= bound) {
6205 			break;
6206 		}
6207 	}
6208 
6209 	return more;
6210 }
6211 
6212 #ifdef BTLOG
6213 bool
BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)6214 BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)(dhd_pub_t *dhd, uint bound)
6215 {
6216 	dhd_prot_t *prot = dhd->prot;
6217 	bool more = TRUE;
6218 	uint n = 0;
6219 	msgbuf_ring_t *ring = prot->d2hring_btlog_cpln;
6220 
6221 	if (ring == NULL)
6222 		return FALSE;
6223 	if (ring->inited != TRUE)
6224 		return FALSE;
6225 
6226 	/* Process all the messages - DTOH direction */
6227 	while (!dhd_is_device_removed(dhd)) {
6228 		uint8 *msg_addr;
6229 		uint32 msg_len;
6230 
6231 		if (dhd_query_bus_erros(dhd)) {
6232 			more = FALSE;
6233 			break;
6234 		}
6235 
6236 		if (dhd->hang_was_sent) {
6237 			more = FALSE;
6238 			break;
6239 		}
6240 
6241 		if (dhd->smmu_fault_occurred) {
6242 			more = FALSE;
6243 			break;
6244 		}
6245 
6246 		/* Get the message from ring */
6247 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6248 		if (msg_addr == NULL) {
6249 			more = FALSE;
6250 			break;
6251 		}
6252 
6253 		/* Prefetch data to populate the cache */
6254 		OSL_PREFETCH(msg_addr);
6255 
6256 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6257 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6258 				__FUNCTION__, msg_len));
6259 		}
6260 
6261 		/* Update read pointer */
6262 		dhd_prot_upd_read_idx(dhd, ring);
6263 
6264 		/* After batch processing, check RX bound */
6265 		n += msg_len / ring->item_len;
6266 		if (n >= bound) {
6267 			break;
6268 		}
6269 	}
6270 
6271 	return more;
6272 }
6273 #endif	/* BTLOG */
6274 
6275 #ifdef EWP_EDL
6276 bool
dhd_prot_process_msgbuf_edl(dhd_pub_t * dhd)6277 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
6278 {
6279 	dhd_prot_t *prot = dhd->prot;
6280 	msgbuf_ring_t *ring = prot->d2hring_edl;
6281 	unsigned long flags = 0;
6282 	uint32 items = 0;
6283 	uint16 rd = 0;
6284 	uint16 depth = 0;
6285 
6286 	if (ring == NULL)
6287 		return FALSE;
6288 	if (ring->inited != TRUE)
6289 		return FALSE;
6290 	if (ring->item_len == 0) {
6291 		DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
6292 			__FUNCTION__, ring->idx, ring->item_len));
6293 		return FALSE;
6294 	}
6295 
6296 	if (dhd_query_bus_erros(dhd)) {
6297 		return FALSE;
6298 	}
6299 
6300 	if (dhd->hang_was_sent) {
6301 		return FALSE;
6302 	}
6303 
6304 	/* in this DPC context just check if wr index has moved
6305 	 * and schedule deferred context to actually process the
6306 	 * work items.
6307 	*/
6308 
6309 	/* update the write index */
6310 	DHD_RING_LOCK(ring->ring_lock, flags);
6311 	if (dhd->dma_d2h_ring_upd_support) {
6312 		/* DMAing write/read indices supported */
6313 		ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
6314 	} else {
6315 		dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
6316 	}
6317 	rd = ring->rd;
6318 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6319 
6320 	depth = ring->max_items;
6321 	/* check for avail space, in number of ring items */
6322 	items = READ_AVAIL_SPACE(ring->wr, rd, depth);
6323 	if (items == 0) {
6324 		/* no work items in edl ring */
6325 		return FALSE;
6326 	}
6327 	if (items > ring->max_items) {
6328 		DHD_ERROR(("\r\n======================= \r\n"));
6329 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
6330 			__FUNCTION__, ring, ring->name, ring->max_items, items));
6331 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n",
6332 			ring->wr, ring->rd, depth));
6333 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
6334 			dhd->busstate, dhd->bus->wait_for_d3_ack));
6335 		DHD_ERROR(("\r\n======================= \r\n"));
6336 #ifdef SUPPORT_LINKDOWN_RECOVERY
6337 		if (ring->wr >= ring->max_items) {
6338 			dhd->bus->read_shm_fail = TRUE;
6339 		}
6340 #else
6341 #ifdef DHD_FW_COREDUMP
6342 		if (dhd->memdump_enabled) {
6343 			/* collect core dump */
6344 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
6345 			dhd_bus_mem_dump(dhd);
6346 
6347 		}
6348 #endif /* DHD_FW_COREDUMP */
6349 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6350 		dhd_schedule_reset(dhd);
6351 
6352 		return FALSE;
6353 	}
6354 
6355 	if (items > D2HRING_EDL_WATERMARK) {
6356 		DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
6357 			" rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
6358 			ring->rd, ring->wr, depth));
6359 	}
6360 
6361 	dhd_schedule_logtrace(dhd->info);
6362 
6363 	return FALSE;
6364 }
6365 
6366 /*
6367  * This is called either from work queue context of 'event_log_dispatcher_work' or
6368  * from the kthread context of dhd_logtrace_thread
6369  */
6370 int
dhd_prot_process_edl_complete(dhd_pub_t * dhd,void * evt_decode_data)6371 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
6372 {
6373 	dhd_prot_t *prot = NULL;
6374 	msgbuf_ring_t *ring = NULL;
6375 	int err = 0;
6376 	unsigned long flags = 0;
6377 	cmn_msg_hdr_t *msg = NULL;
6378 	uint8 *msg_addr = NULL;
6379 	uint32 max_items_to_process = 0, n = 0;
6380 	uint32 num_items = 0, new_items = 0;
6381 	uint16 depth = 0;
6382 	volatile uint16 wr = 0;
6383 
6384 	if (!dhd || !dhd->prot)
6385 		return 0;
6386 
6387 	prot = dhd->prot;
6388 	ring = prot->d2hring_edl;
6389 
6390 	if (!ring || !evt_decode_data) {
6391 		return 0;
6392 	}
6393 
6394 	if (dhd->hang_was_sent) {
6395 		return FALSE;
6396 	}
6397 
6398 	DHD_RING_LOCK(ring->ring_lock, flags);
6399 	ring->curr_rd = ring->rd;
6400 	wr = ring->wr;
6401 	depth = ring->max_items;
6402 	/* check for avail space, in number of ring items
6403 	 * Note, that this will only give the # of items
6404 	 * from rd to wr if wr>=rd, or from rd to ring end
6405 	 * if wr < rd. So in the latter case strictly speaking
6406 	 * not all the items are read. But this is OK, because
6407 	 * these will be processed in the next doorbell as rd
6408 	 * would have wrapped around. Processing in the next
6409 	 * doorbell is acceptable since EDL only contains debug data
6410 	 */
6411 	num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6412 
6413 	if (num_items == 0) {
6414 		/* no work items in edl ring */
6415 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6416 		return 0;
6417 	}
6418 
6419 	DHD_INFO(("%s: EDL work items [%u] available \n",
6420 			__FUNCTION__, num_items));
6421 
6422 	/* if space is available, calculate address to be read */
6423 	msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
6424 
6425 	max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
6426 
6427 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6428 
6429 	/* Prefetch data to populate the cache */
6430 	OSL_PREFETCH(msg_addr);
6431 
6432 	n = max_items_to_process;
6433 	while (n > 0) {
6434 		msg = (cmn_msg_hdr_t *)msg_addr;
6435 		/* wait for DMA of work item to complete */
6436 		if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
6437 			DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n",
6438 				__FUNCTION__, err));
6439 		}
6440 		/*
6441 		 * Update the curr_rd to the current index in the ring, from where
6442 		 * the work item is fetched. This way if the fetched work item
6443 		 * fails in LIVELOCK, we can print the exact read index in the ring
6444 		 * that shows up the corrupted work item.
6445 		 */
6446 		if ((ring->curr_rd + 1) >= ring->max_items) {
6447 			ring->curr_rd = 0;
6448 		} else {
6449 			ring->curr_rd += 1;
6450 		}
6451 
6452 		if (err != BCME_OK) {
6453 			return 0;
6454 		}
6455 
6456 		/* process the edl work item, i.e, the event log */
6457 		err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
6458 
6459 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
6460 		OSL_SLEEP(0);
6461 
6462 		/* Prefetch data to populate the cache */
6463 		OSL_PREFETCH(msg_addr + ring->item_len);
6464 
6465 		msg_addr += ring->item_len;
6466 		--n;
6467 	}
6468 
6469 	DHD_RING_LOCK(ring->ring_lock, flags);
6470 	/* update host ring read pointer */
6471 	if ((ring->rd + max_items_to_process) >= ring->max_items)
6472 		ring->rd = 0;
6473 	else
6474 		ring->rd += max_items_to_process;
6475 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6476 
6477 	/* Now after processing max_items_to_process update dongle rd index.
6478 	 * The TCM rd index is updated only if bus is not
6479 	 * in D3. Else, the rd index is updated from resume
6480 	 * context in - 'dhdpcie_bus_suspend'
6481 	 */
6482 	DHD_GENERAL_LOCK(dhd, flags);
6483 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
6484 		DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
6485 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
6486 		DHD_GENERAL_UNLOCK(dhd, flags);
6487 	} else {
6488 		DHD_GENERAL_UNLOCK(dhd, flags);
6489 		DHD_EDL_RING_TCM_RD_UPDATE(dhd);
6490 	}
6491 
6492 	/* if num_items > bound, then anyway we will reschedule and
6493 	 * this function runs again, so that if in between the DPC has
6494 	 * updated the wr index, then the updated wr is read. But if
6495 	 * num_items <= bound, and if DPC executes and updates the wr index
6496 	 * when the above while loop is running, then the updated 'wr' index
6497 	 * needs to be re-read from here, If we don't do so, then till
6498 	 * the next time this function is scheduled
6499 	 * the event logs will not be processed.
6500 	*/
6501 	if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
6502 		/* read the updated wr index if reqd. and update num_items */
6503 		DHD_RING_LOCK(ring->ring_lock, flags);
6504 		if (wr != (volatile uint16)ring->wr) {
6505 			wr = (volatile uint16)ring->wr;
6506 			new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6507 			DHD_INFO(("%s: new items [%u] avail in edl\n",
6508 				__FUNCTION__, new_items));
6509 			num_items += new_items;
6510 		}
6511 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6512 	}
6513 
6514 	/* if # of items processed is less than num_items, need to re-schedule
6515 	* the deferred ctx
6516 	*/
6517 	if (max_items_to_process < num_items) {
6518 		DHD_INFO(("%s: EDL bound hit / new items found, "
6519 				"items processed=%u; remaining=%u, "
6520 				"resched deferred ctx...\n",
6521 				__FUNCTION__, max_items_to_process,
6522 				num_items - max_items_to_process));
6523 		return (num_items - max_items_to_process);
6524 	}
6525 
6526 	return 0;
6527 
6528 }
6529 
6530 void
dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t * dhd)6531 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
6532 {
6533 	dhd_prot_t *prot = NULL;
6534 	unsigned long flags = 0;
6535 	msgbuf_ring_t *ring = NULL;
6536 
6537 	if (!dhd)
6538 		return;
6539 
6540 	prot = dhd->prot;
6541 	if (!prot || !prot->d2hring_edl)
6542 		return;
6543 
6544 	ring = prot->d2hring_edl;
6545 	DHD_RING_LOCK(ring->ring_lock, flags);
6546 	dhd_prot_upd_read_idx(dhd, ring);
6547 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6548 	if (dhd->dma_h2d_ring_upd_support &&
6549 		!IDMA_ACTIVE(dhd)) {
6550 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
6551 	}
6552 }
6553 #endif /* EWP_EDL */
6554 
6555 static void
dhd_prot_rx_frame(dhd_pub_t * dhd,void * pkt,int ifidx,uint pkt_count)6556 dhd_prot_rx_frame(dhd_pub_t *dhd, void *pkt, int ifidx, uint pkt_count)
6557 {
6558 
6559 #ifdef DHD_LB_RXP
6560 	if (dhd_read_lb_rxp(dhd) == 1) {
6561 		dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
6562 		return;
6563 	}
6564 #endif /* DHD_LB_RXP */
6565 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count);
6566 }
6567 
6568 #ifdef DHD_LB_RXP
dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t * dhd)6569 static int dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t *dhd)
6570 {
6571 	if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) {
6572 		/* when either of stop and start thresholds are zero flow ctrl is not enabled */
6573 		return FALSE;
6574 	}
6575 
6576 	if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) &&
6577 			(!atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6578 		atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE);
6579 #ifdef DHD_LB_STATS
6580 		dhd->lb_rxp_stop_thr_hitcnt++;
6581 #endif /* DHD_LB_STATS */
6582 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_stop_thr %d\n",
6583 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr));
6584 	} else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) &&
6585 			(atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6586 		atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
6587 #ifdef DHD_LB_STATS
6588 		dhd->lb_rxp_strt_thr_hitcnt++;
6589 #endif /* DHD_LB_STATS */
6590 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_strt_thr %d\n",
6591 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr));
6592 	}
6593 
6594 	return atomic_read(&dhd->lb_rxp_flow_ctrl);
6595 }
6596 #endif /* DHD_LB_RXP */
6597 
6598 /** called when DHD needs to check for 'receive complete' messages from the dongle */
6599 bool
BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)6600 BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6601 {
6602 	bool more = FALSE;
6603 	uint n = 0;
6604 	dhd_prot_t *prot = dhd->prot;
6605 	msgbuf_ring_t *ring;
6606 	uint16 item_len;
6607 	host_rxbuf_cmpl_t *msg = NULL;
6608 	uint8 *msg_addr;
6609 	uint32 msg_len;
6610 	uint16 pkt_cnt, pkt_cnt_newidx;
6611 	unsigned long flags;
6612 	dmaaddr_t pa;
6613 	uint32 len;
6614 	void *dmah;
6615 	void *secdma;
6616 	int ifidx = 0, if_newidx = 0;
6617 	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
6618 	uint32 pktid;
6619 	int i;
6620 	uint8 sync;
6621 
6622 #ifdef DHD_LB_RXP
6623 	/* must be the first check in this function */
6624 	if (dhd_prot_lb_rxp_flow_ctrl(dhd)) {
6625 		/* DHD is holding a lot of RX packets.
6626 		 * Just give chance for netwrok stack to consumes RX packets.
6627 		 */
6628 		return FALSE;
6629 	}
6630 #endif /* DHD_LB_RXP */
6631 #ifdef DHD_PCIE_RUNTIMEPM
6632 	/* Set rx_pending_due_to_rpm if device is not in resume state */
6633 	if (dhdpcie_runtime_bus_wake(dhd, FALSE, dhd_prot_process_msgbuf_rxcpl)) {
6634 		dhd->rx_pending_due_to_rpm = TRUE;
6635 		return more;
6636 	}
6637 	dhd->rx_pending_due_to_rpm = FALSE;
6638 #endif /* DHD_PCIE_RUNTIMEPM */
6639 
6640 #ifdef DHD_HP2P
6641 	if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
6642 		ring = prot->d2hring_hp2p_rxcpl;
6643 	else
6644 #endif /* DHD_HP2P */
6645 		ring = &prot->d2hring_rx_cpln;
6646 	item_len = ring->item_len;
6647 	while (1) {
6648 		if (dhd_is_device_removed(dhd))
6649 			break;
6650 
6651 		if (dhd_query_bus_erros(dhd))
6652 			break;
6653 
6654 		if (dhd->hang_was_sent)
6655 			break;
6656 
6657 		if (dhd->smmu_fault_occurred) {
6658 			break;
6659 		}
6660 
6661 		pkt_cnt = 0;
6662 		pktqhead = pkt_newidx = NULL;
6663 		pkt_cnt_newidx = 0;
6664 
6665 		DHD_RING_LOCK(ring->ring_lock, flags);
6666 
6667 		/* Get the address of the next message to be read from ring */
6668 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6669 		if (msg_addr == NULL) {
6670 			DHD_RING_UNLOCK(ring->ring_lock, flags);
6671 			break;
6672 		}
6673 
6674 		while (msg_len > 0) {
6675 			msg = (host_rxbuf_cmpl_t *)msg_addr;
6676 
6677 			/* Wait until DMA completes, then fetch msg_type */
6678 			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
6679 			/*
6680 			 * Update the curr_rd to the current index in the ring, from where
6681 			 * the work item is fetched. This way if the fetched work item
6682 			 * fails in LIVELOCK, we can print the exact read index in the ring
6683 			 * that shows up the corrupted work item.
6684 			 */
6685 			if ((ring->curr_rd + 1) >= ring->max_items) {
6686 				ring->curr_rd = 0;
6687 			} else {
6688 				ring->curr_rd += 1;
6689 			}
6690 
6691 			if (!sync) {
6692 				msg_len -= item_len;
6693 				msg_addr += item_len;
6694 				continue;
6695 			}
6696 
6697 			pktid = ltoh32(msg->cmn_hdr.request_id);
6698 
6699 #ifdef DHD_PKTID_AUDIT_RING
6700 			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
6701 				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
6702 #endif /* DHD_PKTID_AUDIT_RING */
6703 
6704 			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
6705 			        len, dmah, secdma, PKTTYPE_DATA_RX);
6706 			/* Sanity check of shinfo nrfrags */
6707 			if (!pkt || (dhd_check_shinfo_nrfrags(dhd, pkt, &pa, pktid) != BCME_OK)) {
6708 				msg_len -= item_len;
6709 				msg_addr += item_len;
6710 				continue;
6711 			}
6712 			dhd->prot->tot_rxcpl++;
6713 
6714 			DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6715 
6716 #ifdef DMAMAP_STATS
6717 			dhd->dma_stats.rxdata--;
6718 			dhd->dma_stats.rxdata_sz -= len;
6719 #endif /* DMAMAP_STATS */
6720 #ifdef DHD_HMAPTEST
6721 			if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) &&
6722 				(pktid == dhd->prot->hmaptest_rx_pktid)) {
6723 
6724 				uchar *ptr;
6725 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6726 				DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa,
6727 					(uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah);
6728 				DHD_ERROR(("hmaptest: d11write rxcpl rcvd sc rxbuf pktid=0x%08x\n",
6729 					pktid));
6730 				DHD_ERROR(("hmaptest: d11write rxcpl r0_st=0x%08x r1_stat=0x%08x\n",
6731 					msg->rx_status_0, msg->rx_status_1));
6732 				DHD_ERROR(("hmaptest: d11write rxcpl rxbuf va=0x%p pa=0x%08x\n",
6733 					dhd->prot->hmap_rx_buf_va,
6734 					(uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa)));
6735 				DHD_ERROR(("hmaptest: d11write rxcpl pktdata va=0x%p pa=0x%08x\n",
6736 					PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa)));
6737 				memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len);
6738 				dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
6739 				dhd->prot->hmap_rx_buf_va = NULL;
6740 				dhd->prot->hmap_rx_buf_len = 0;
6741 				PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0);
6742 				PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0);
6743 				prot->hmaptest.in_progress = FALSE;
6744 			}
6745 #endif /* DHD_HMAPTEST */
6746 			DHD_MSGBUF_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
6747 				"pktdata %p, metalen %d\n",
6748 				ltoh32(msg->cmn_hdr.request_id),
6749 				ltoh16(msg->data_offset),
6750 				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
6751 				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
6752 				ltoh16(msg->metadata_len)));
6753 
6754 			pkt_cnt++;
6755 			msg_len -= item_len;
6756 			msg_addr += item_len;
6757 
6758 #if !defined(BCM_ROUTER_DHD)
6759 #if DHD_DBG_SHOW_METADATA
6760 			if (prot->metadata_dbg && prot->rx_metadata_offset &&
6761 			        msg->metadata_len) {
6762 				uchar *ptr;
6763 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6764 				/* header followed by data */
6765 				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
6766 				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
6767 			}
6768 #endif /* DHD_DBG_SHOW_METADATA */
6769 #endif /* !BCM_ROUTER_DHD */
6770 
6771 			/* data_offset from buf start */
6772 			if (ltoh16(msg->data_offset)) {
6773 				/* data offset given from dongle after split rx */
6774 				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
6775 			}
6776 			else if (prot->rx_dataoffset) {
6777 				/* DMA RX offset updated through shared area */
6778 				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
6779 			}
6780 			/* Actual length of the packet */
6781 			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
6782 #ifdef DHD_PKTTS
6783 			if (dhd_get_pktts_enab(dhd) == TRUE) {
6784 				uint fwr1 = 0, fwr2 = 0;
6785 
6786 				/* firmware mark rx_pktts.tref with 0xFFFFFFFF for errors */
6787 				if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) {
6788 					fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref));
6789 					fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) +
6790 						ltoh16(msg->rx_pktts.d_t2));
6791 
6792 					/* check for overflow */
6793 					if (ntohl(fwr2) > ntohl(fwr1)) {
6794 						/* send rx timestamp to netlnik socket */
6795 						dhd_msgbuf_send_msg_rx_ts(dhd, pkt, fwr1, fwr2);
6796 					}
6797 				}
6798 			}
6799 #endif /* DHD_PKTTS */
6800 
6801 #if defined(WL_MONITOR)
6802 			if (dhd_monitor_enabled(dhd, ifidx)) {
6803 				if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
6804 					dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
6805 					continue;
6806 				} else {
6807 					DHD_ERROR(("Received non 802.11 packet, "
6808 						"when monitor mode is enabled\n"));
6809 				}
6810 			}
6811 #endif /* WL_MONITOR */
6812 
6813 			if (!pktqhead) {
6814 				pktqhead = prevpkt = pkt;
6815 				ifidx = msg->cmn_hdr.if_id;
6816 			} else {
6817 				if (ifidx != msg->cmn_hdr.if_id) {
6818 					pkt_newidx = pkt;
6819 					if_newidx = msg->cmn_hdr.if_id;
6820 					pkt_cnt--;
6821 					pkt_cnt_newidx = 1;
6822 					break;
6823 				} else {
6824 					PKTSETNEXT(dhd->osh, prevpkt, pkt);
6825 					prevpkt = pkt;
6826 				}
6827 			}
6828 
6829 #ifdef DHD_HP2P
6830 			if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
6831 #ifdef DHD_HP2P_DEBUG
6832 				bcm_print_bytes("Rxcpl", (uchar *)msg,  sizeof(host_rxbuf_cmpl_t));
6833 #endif /* DHD_HP2P_DEBUG */
6834 				dhd_update_hp2p_rxstats(dhd, msg);
6835 			}
6836 #endif /* DHD_HP2P */
6837 
6838 #ifdef DHD_TIMESYNC
6839 			if (dhd->prot->rx_ts_log_enabled) {
6840 				dhd_pkt_parse_t parse;
6841 				ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
6842 
6843 				memset(&parse, 0, sizeof(dhd_pkt_parse_t));
6844 				dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
6845 
6846 				if (parse.proto == IP_PROT_ICMP)
6847 					dhd_timesync_log_rx_timestamp(dhd->ts, ifidx,
6848 							ts->low, ts->high, &parse);
6849 			}
6850 #endif /* DHD_TIMESYNC */
6851 
6852 #ifdef DHD_LBUF_AUDIT
6853 			PKTAUDIT(dhd->osh, pkt);
6854 #endif
6855 		}
6856 
6857 		/* roll back read pointer for unprocessed message */
6858 		if (msg_len > 0) {
6859 			if (ring->rd < msg_len / item_len)
6860 				ring->rd = ring->max_items - msg_len / item_len;
6861 			else
6862 				ring->rd -= msg_len / item_len;
6863 		}
6864 
6865 		/* Update read pointer */
6866 		dhd_prot_upd_read_idx(dhd, ring);
6867 
6868 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6869 
6870 		pkt = pktqhead;
6871 		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
6872 			nextpkt = PKTNEXT(dhd->osh, pkt);
6873 			PKTSETNEXT(dhd->osh, pkt, NULL);
6874 #ifdef DHD_RX_CHAINING
6875 			dhd_rxchain_frame(dhd, pkt, ifidx);
6876 #else
6877 			dhd_prot_rx_frame(dhd, pkt, ifidx, 1);
6878 #endif /* DHD_LB_RXP */
6879 		}
6880 
6881 		if (pkt_newidx) {
6882 #ifdef DHD_RX_CHAINING
6883 			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
6884 #else
6885 			dhd_prot_rx_frame(dhd, pkt_newidx, if_newidx, 1);
6886 #endif /* DHD_LB_RXP */
6887 		}
6888 
6889 		pkt_cnt += pkt_cnt_newidx;
6890 
6891 		/* Post another set of rxbufs to the device */
6892 		dhd_prot_return_rxbuf(dhd, ring, 0, pkt_cnt);
6893 
6894 #ifdef DHD_RX_CHAINING
6895 		dhd_rxchain_commit(dhd);
6896 #endif
6897 
6898 		/* After batch processing, check RX bound */
6899 		n += pkt_cnt;
6900 		if (n >= bound) {
6901 			more = TRUE;
6902 			break;
6903 		}
6904 	}
6905 
6906 	/* Call lb_dispatch only if packets are queued */
6907 	if (n &&
6908 #ifdef WL_MONITOR
6909 	!(dhd_monitor_enabled(dhd, ifidx)) &&
6910 #endif /* WL_MONITOR */
6911 	TRUE) {
6912 		DHD_LB_DISPATCH_RX_PROCESS(dhd);
6913 	}
6914 
6915 	return more;
6916 
6917 }
6918 
6919 /**
6920  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
6921  */
6922 void
dhd_prot_update_txflowring(dhd_pub_t * dhd,uint16 flowid,void * msgring)6923 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
6924 {
6925 	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
6926 
6927 	if (ring == NULL) {
6928 		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
6929 		return;
6930 	}
6931 	/* Update read pointer */
6932 	if (dhd->dma_d2h_ring_upd_support) {
6933 		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6934 	}
6935 
6936 	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
6937 		ring->idx, flowid, ring->wr, ring->rd));
6938 
6939 	/* Need more logic here, but for now use it directly */
6940 	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
6941 }
6942 
6943 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
6944 bool
BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)6945 BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6946 {
6947 	bool more = TRUE;
6948 	uint n = 0;
6949 	msgbuf_ring_t *ring;
6950 	unsigned long flags;
6951 
6952 #ifdef DHD_HP2P
6953 	if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
6954 		ring = dhd->prot->d2hring_hp2p_txcpl;
6955 	else
6956 #endif /* DHD_HP2P */
6957 		ring = &dhd->prot->d2hring_tx_cpln;
6958 
6959 	/* Process all the messages - DTOH direction */
6960 	while (!dhd_is_device_removed(dhd)) {
6961 		uint8 *msg_addr;
6962 		uint32 msg_len;
6963 
6964 		if (dhd_query_bus_erros(dhd)) {
6965 			more = FALSE;
6966 			break;
6967 		}
6968 
6969 		if (dhd->hang_was_sent) {
6970 			more = FALSE;
6971 			break;
6972 		}
6973 
6974 		if (dhd->smmu_fault_occurred) {
6975 			more = FALSE;
6976 			break;
6977 		}
6978 
6979 		DHD_RING_LOCK(ring->ring_lock, flags);
6980 		/* Get the address of the next message to be read from ring */
6981 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6982 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6983 
6984 		if (msg_addr == NULL) {
6985 			more = FALSE;
6986 			break;
6987 		}
6988 
6989 		/* Prefetch data to populate the cache */
6990 		OSL_PREFETCH(msg_addr);
6991 
6992 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6993 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
6994 				__FUNCTION__, ring->name, msg_addr, msg_len));
6995 		}
6996 
6997 		/* Write to dngl rd ptr */
6998 		dhd_prot_upd_read_idx(dhd, ring);
6999 
7000 		/* After batch processing, check bound */
7001 		n += msg_len / ring->item_len;
7002 		if (n >= bound) {
7003 			break;
7004 		}
7005 	}
7006 
7007 	if (n) {
7008 		/* For IDMA and HWA case, doorbell is sent along with read index update.
7009 		 * For DMA indices case ring doorbell once n items are read to sync with dongle.
7010 		 */
7011 		if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
7012 			dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
7013 			dhd->prot->txcpl_db_cnt++;
7014 		}
7015 	}
7016 	return more;
7017 }
7018 
7019 int
BCMFASTPATH(dhd_prot_process_trapbuf)7020 BCMFASTPATH(dhd_prot_process_trapbuf)(dhd_pub_t *dhd)
7021 {
7022 	uint32 data;
7023 	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
7024 
7025 	/* Interrupts can come in before this struct
7026 	 *  has been initialized.
7027 	 */
7028 	if (trap_addr->va == NULL) {
7029 		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
7030 		return 0;
7031 	}
7032 
7033 	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
7034 	data = *(uint32 *)(trap_addr->va);
7035 
7036 	if (data & D2H_DEV_FWHALT) {
7037 		if (dhd->db7_trap.fw_db7w_trap_inprogress) {
7038 			DHD_ERROR(("DB7 FW responded 0x%04x\n", data));
7039 		} else {
7040 			DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
7041 		}
7042 
7043 		if (data & D2H_DEV_EXT_TRAP_DATA)
7044 		{
7045 			if (dhd->extended_trap_data) {
7046 				OSL_CACHE_INV((void *)trap_addr->va,
7047 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7048 				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
7049 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7050 			}
7051 			if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
7052 				DHD_ERROR(("Extended trap data available\n"));
7053 			}
7054 		}
7055 #ifdef BT_OVER_PCIE
7056 		if (data & D2H_DEV_TRAP_DUE_TO_BT) {
7057 			DHD_ERROR(("WLAN Firmware trapped due to BT\n"));
7058 			dhd->dongle_trap_due_to_bt = TRUE;
7059 		}
7060 #endif /* BT_OVER_PCIE */
7061 		return data;
7062 	}
7063 	return 0;
7064 }
7065 
7066 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
7067 int
BCMFASTPATH(dhd_prot_process_ctrlbuf)7068 BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd)
7069 {
7070 	dhd_prot_t *prot = dhd->prot;
7071 	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
7072 	unsigned long flags;
7073 
7074 	/* Process all the messages - DTOH direction */
7075 	while (!dhd_is_device_removed(dhd)) {
7076 		uint8 *msg_addr;
7077 		uint32 msg_len;
7078 
7079 		if (dhd_query_bus_erros(dhd)) {
7080 			break;
7081 		}
7082 
7083 		if (dhd->hang_was_sent) {
7084 			break;
7085 		}
7086 
7087 		if (dhd->smmu_fault_occurred) {
7088 			break;
7089 		}
7090 
7091 		DHD_RING_LOCK(ring->ring_lock, flags);
7092 		/* Get the address of the next message to be read from ring */
7093 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
7094 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7095 
7096 		if (msg_addr == NULL) {
7097 			break;
7098 		}
7099 
7100 		/* Prefetch data to populate the cache */
7101 		OSL_PREFETCH(msg_addr);
7102 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
7103 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
7104 				__FUNCTION__, ring->name, msg_addr, msg_len));
7105 		}
7106 
7107 		/* Write to dngl rd ptr */
7108 		dhd_prot_upd_read_idx(dhd, ring);
7109 	}
7110 
7111 	return 0;
7112 }
7113 
7114 /**
7115  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
7116  * memory has completed, before invoking the message handler via a table lookup
7117  * of the cmn_msg_hdr::msg_type.
7118  */
7119 static int
BCMFASTPATH(dhd_prot_process_msgtype)7120 BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
7121 {
7122 	uint32 buf_len = len;
7123 	uint16 item_len;
7124 	uint8 msg_type;
7125 	cmn_msg_hdr_t *msg = NULL;
7126 	int ret = BCME_OK;
7127 
7128 	ASSERT(ring);
7129 	item_len = ring->item_len;
7130 	if (item_len == 0) {
7131 		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
7132 			__FUNCTION__, ring->idx, item_len, buf_len));
7133 		return BCME_ERROR;
7134 	}
7135 
7136 	while (buf_len > 0) {
7137 		if (dhd->hang_was_sent) {
7138 			ret = BCME_ERROR;
7139 			goto done;
7140 		}
7141 
7142 		if (dhd->smmu_fault_occurred) {
7143 			ret = BCME_ERROR;
7144 			goto done;
7145 		}
7146 
7147 		msg = (cmn_msg_hdr_t *)buf;
7148 
7149 		/* Wait until DMA completes, then fetch msg_type */
7150 		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
7151 
7152 		/*
7153 		 * Update the curr_rd to the current index in the ring, from where
7154 		 * the work item is fetched. This way if the fetched work item
7155 		 * fails in LIVELOCK, we can print the exact read index in the ring
7156 		 * that shows up the corrupted work item.
7157 		 */
7158 		if ((ring->curr_rd + 1) >= ring->max_items) {
7159 			ring->curr_rd = 0;
7160 		} else {
7161 			ring->curr_rd += 1;
7162 		}
7163 
7164 		/* Prefetch data to populate the cache */
7165 		OSL_PREFETCH(buf + item_len);
7166 
7167 		DHD_MSGBUF_INFO(("msg_type %d item_len %d buf_len %d\n",
7168 			msg_type, item_len, buf_len));
7169 
7170 		if (msg_type == MSG_TYPE_LOOPBACK) {
7171 			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
7172 			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
7173 		}
7174 
7175 		ASSERT(msg_type < DHD_PROT_FUNCS);
7176 		if (msg_type >= DHD_PROT_FUNCS) {
7177 			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
7178 				__FUNCTION__, msg_type, item_len, buf_len));
7179 			ret = BCME_ERROR;
7180 			goto done;
7181 		}
7182 
7183 #if !defined(BCM_ROUTER_DHD)
7184 		if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
7185 			if (ring == dhd->prot->d2hring_info_cpln) {
7186 				if (!dhd->prot->infobufpost) {
7187 					DHD_ERROR(("infobuf posted are zero,"
7188 						   "but there is a completion\n"));
7189 					goto done;
7190 				}
7191 				dhd->prot->infobufpost--;
7192 				dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
7193 				dhd_prot_process_infobuf_complete(dhd, buf);
7194 			}
7195 #ifdef BTLOG
7196 			else if (ring == dhd->prot->d2hring_btlog_cpln) {
7197 				info_buf_resp_t *resp = (info_buf_resp_t *)buf;
7198 
7199 				if (!dhd->prot->btlogbufpost) {
7200 					DHD_ERROR(("btlogbuf posted are zero,"
7201 						   "but there is a completion\n"));
7202 					goto done;
7203 				}
7204 
7205 				dhd->prot->btlogbufpost--;
7206 				if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) {
7207 					dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
7208 				}
7209 				dhd_prot_process_btlog_complete(dhd, buf);
7210 			}
7211 #endif	/* BTLOG */
7212 		} else
7213 #endif	/* !defined(BCM_ROUTER_DHD) */
7214 		if (table_lookup[msg_type]) {
7215 			table_lookup[msg_type](dhd, buf);
7216 		}
7217 
7218 		if (buf_len < item_len) {
7219 			ret = BCME_ERROR;
7220 			goto done;
7221 		}
7222 		buf_len = buf_len - item_len;
7223 		buf = buf + item_len;
7224 	}
7225 
7226 done:
7227 
7228 #ifdef DHD_RX_CHAINING
7229 	dhd_rxchain_commit(dhd);
7230 #endif
7231 
7232 	return ret;
7233 } /* dhd_prot_process_msgtype */
7234 
7235 static void
dhd_prot_noop(dhd_pub_t * dhd,void * msg)7236 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
7237 {
7238 	return;
7239 }
7240 
7241 /** called on MSG_TYPE_RING_STATUS message received from dongle */
7242 static void
dhd_prot_ringstatus_process(dhd_pub_t * dhd,void * msg)7243 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
7244 {
7245 	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
7246 	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
7247 	uint16 status = ltoh16(ring_status->compl_hdr.status);
7248 	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
7249 
7250 	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
7251 		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
7252 
7253 	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
7254 		return;
7255 	if (status == BCMPCIE_BAD_PHASE) {
7256 		/* bad phase report from */
7257 		/* XXX: if the request is ioctl request finish the ioctl, rather than timing out */
7258 		DHD_ERROR(("Bad phase\n"));
7259 	}
7260 	if (status != BCMPCIE_BADOPTION)
7261 		return;
7262 
7263 	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
7264 		/* XXX: see if the debug ring create is pending */
7265 		if (dhd->prot->h2dring_info_subn != NULL) {
7266 			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
7267 				DHD_ERROR(("H2D ring create failed for info ring\n"));
7268 				dhd->prot->h2dring_info_subn->create_pending = FALSE;
7269 			}
7270 			else
7271 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7272 		} else {
7273 			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
7274 		}
7275 	}
7276 	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
7277 		/* XXX: see if the debug ring create is pending */
7278 		if (dhd->prot->d2hring_info_cpln != NULL) {
7279 			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
7280 				DHD_ERROR(("D2H ring create failed for info ring\n"));
7281 				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
7282 			}
7283 			else
7284 				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
7285 		} else {
7286 			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
7287 		}
7288 	}
7289 #ifdef BTLOG
7290 	else if (request_id == DHD_H2D_BTLOGRING_REQ_PKTID) {
7291 		/* XXX: see if the debug ring create is pending */
7292 		if (dhd->prot->h2dring_btlog_subn != NULL) {
7293 			if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) {
7294 				DHD_ERROR(("H2D ring create failed for btlog ring\n"));
7295 				dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
7296 			}
7297 			else
7298 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7299 		} else {
7300 			DHD_ERROR(("%s btlog submit ring doesn't exist\n", __FUNCTION__));
7301 		}
7302 	}
7303 	else if (request_id == DHD_D2H_BTLOGRING_REQ_PKTID) {
7304 		/* XXX: see if the debug ring create is pending */
7305 		if (dhd->prot->d2hring_btlog_cpln != NULL) {
7306 			if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) {
7307 				DHD_ERROR(("D2H ring create failed for btlog ring\n"));
7308 				dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
7309 			}
7310 			else
7311 				DHD_ERROR(("ring create ID for btlog ring, create not pending\n"));
7312 		} else {
7313 			DHD_ERROR(("%s btlog cpl ring doesn't exist\n", __FUNCTION__));
7314 		}
7315 	}
7316 #endif	/* BTLOG */
7317 #ifdef DHD_HP2P
7318 	else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
7319 		/* XXX: see if the HPP txcmpl ring create is pending */
7320 		if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
7321 			if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
7322 				DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
7323 				dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
7324 			}
7325 			else
7326 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7327 		} else {
7328 			DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
7329 		}
7330 	}
7331 	else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
7332 		/* XXX: see if the hp2p rxcmpl ring create is pending */
7333 		if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
7334 			if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
7335 				DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
7336 				dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
7337 			}
7338 			else
7339 				DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
7340 		} else {
7341 			DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
7342 		}
7343 	}
7344 #endif /* DHD_HP2P */
7345 	else {
7346 		DHD_ERROR(("don;t know how to pair with original request\n"));
7347 	}
7348 	/* How do we track this to pair it with ??? */
7349 	return;
7350 }
7351 
7352 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
7353 static void
dhd_prot_genstatus_process(dhd_pub_t * dhd,void * msg)7354 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
7355 {
7356 	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
7357 	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
7358 		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
7359 		gen_status->compl_hdr.flow_ring_id));
7360 
7361 	/* How do we track this to pair it with ??? */
7362 	return;
7363 }
7364 
7365 /**
7366  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
7367  * dongle received the ioctl message in dongle memory.
7368  */
7369 static void
dhd_prot_ioctack_process(dhd_pub_t * dhd,void * msg)7370 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
7371 {
7372 	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
7373 	unsigned long flags;
7374 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7375 	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
7376 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7377 
7378 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7379 	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
7380 	if (pktid != DHD_IOCTL_REQ_PKTID) {
7381 #ifndef IOCTLRESP_USE_CONSTMEM
7382 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
7383 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7384 #else
7385 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
7386 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7387 #endif /* !IOCTLRESP_USE_CONSTMEM */
7388 	}
7389 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7390 
7391 	dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
7392 
7393 	DHD_GENERAL_LOCK(dhd, flags);
7394 	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
7395 		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7396 		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
7397 	} else {
7398 		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
7399 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7400 		prhex("dhd_prot_ioctack_process:",
7401 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7402 	}
7403 	DHD_GENERAL_UNLOCK(dhd, flags);
7404 
7405 	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
7406 		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
7407 		ioct_ack->compl_hdr.flow_ring_id));
7408 	if (ioct_ack->compl_hdr.status != 0)  {
7409 		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
7410 		/* FIXME: should we fail the pending IOCTL compelteion wait process... */
7411 	}
7412 #ifdef REPORT_FATAL_TIMEOUTS
7413 	else {
7414 		dhd_stop_bus_timer(dhd);
7415 	}
7416 #endif /* REPORT_FATAL_TIMEOUTS */
7417 }
7418 
7419 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
7420 static void
dhd_prot_ioctcmplt_process(dhd_pub_t * dhd,void * msg)7421 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
7422 {
7423 	dhd_prot_t *prot = dhd->prot;
7424 	uint32 pkt_id, xt_id;
7425 	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
7426 	void *pkt;
7427 	unsigned long flags;
7428 	dhd_dma_buf_t retbuf;
7429 #ifdef REPORT_FATAL_TIMEOUTS
7430 	uint16	dhd_xt_id;
7431 #endif
7432 
7433 	/* Check for ioctl timeout induce flag, which is set by firing
7434 	 * dhd iovar to induce IOCTL timeout. If flag is set,
7435 	 * return from here, which results in to IOCTL timeout.
7436 	 */
7437 	if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
7438 		DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
7439 		return;
7440 	}
7441 
7442 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
7443 
7444 	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
7445 
7446 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7447 #ifndef IOCTLRESP_USE_CONSTMEM
7448 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
7449 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7450 #else
7451 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
7452 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7453 #endif /* !IOCTLRESP_USE_CONSTMEM */
7454 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7455 
7456 	DHD_GENERAL_LOCK(dhd, flags);
7457 	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
7458 		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7459 		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
7460 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7461 		prhex("dhd_prot_ioctcmplt_process:",
7462 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7463 		DHD_GENERAL_UNLOCK(dhd, flags);
7464 		return;
7465 	}
7466 
7467 	dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
7468 
7469 	/* Clear Response pending bit */
7470 	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
7471 	DHD_GENERAL_UNLOCK(dhd, flags);
7472 
7473 #ifndef IOCTLRESP_USE_CONSTMEM
7474 	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
7475 #else
7476 	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
7477 	pkt = retbuf.va;
7478 #endif /* !IOCTLRESP_USE_CONSTMEM */
7479 	if (!pkt) {
7480 		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
7481 		prhex("dhd_prot_ioctcmplt_process:",
7482 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7483 		return;
7484 	}
7485 
7486 	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
7487 	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
7488 	xt_id = ltoh16(ioct_resp->trans_id);
7489 
7490 	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
7491 		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
7492 			__FUNCTION__, xt_id, prot->ioctl_trans_id,
7493 			prot->curr_ioctl_cmd, ioct_resp->cmd));
7494 #ifdef REPORT_FATAL_TIMEOUTS
7495 		dhd_stop_cmd_timer(dhd);
7496 #endif /* REPORT_FATAL_TIMEOUTS */
7497 		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
7498 		dhd_prot_debug_info_print(dhd);
7499 #ifdef DHD_FW_COREDUMP
7500 		if (dhd->memdump_enabled) {
7501 			/* collect core dump */
7502 			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
7503 			dhd_bus_mem_dump(dhd);
7504 		}
7505 #else
7506 		ASSERT(0);
7507 #endif /* DHD_FW_COREDUMP */
7508 		dhd_schedule_reset(dhd);
7509 		goto exit;
7510 	}
7511 #ifdef REPORT_FATAL_TIMEOUTS
7512 	dhd_xt_id = dhd_get_request_id(dhd);
7513 	if (xt_id == dhd_xt_id) {
7514 		dhd_stop_cmd_timer(dhd);
7515 	} else {
7516 		DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
7517 			__FUNCTION__, xt_id, dhd_xt_id));
7518 	}
7519 #endif /* REPORT_FATAL_TIMEOUTS */
7520 	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
7521 		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
7522 
7523 	if (prot->ioctl_resplen > 0) {
7524 #ifndef IOCTLRESP_USE_CONSTMEM
7525 		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
7526 #else
7527 		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
7528 #endif /* !IOCTLRESP_USE_CONSTMEM */
7529 	}
7530 
7531 	/* wake up any dhd_os_ioctl_resp_wait() */
7532 	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
7533 
7534 exit:
7535 #ifndef IOCTLRESP_USE_CONSTMEM
7536 	dhd_prot_packet_free(dhd, pkt,
7537 		PKTTYPE_IOCTL_RX, FALSE);
7538 #else
7539 	free_ioctl_return_buffer(dhd, &retbuf);
7540 #endif /* !IOCTLRESP_USE_CONSTMEM */
7541 
7542 	/* Post another ioctl buf to the device */
7543 	if (prot->cur_ioctlresp_bufs_posted > 0) {
7544 		prot->cur_ioctlresp_bufs_posted--;
7545 	}
7546 
7547 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
7548 }
7549 
7550 int
dhd_prot_check_tx_resource(dhd_pub_t * dhd)7551 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
7552 {
7553 	return dhd->prot->no_tx_resource;
7554 }
7555 
7556 #ifdef DHD_PKTTS
7557 /**
7558  * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported)
7559  * 1. pointer to data portion of pkt
7560  * 2. five tuple checksum of pkt
7561  *   = {scr_ip, dst_ip, src_port, dst_port, proto}
7562  * 3. ip_prec
7563  *
7564  * @dhdp: pointer to dhd_pub object
7565  * @pkt: packet pointer
7566  * @ptr: retuns pointer to data portion of pkt
7567  * @chksum: returns five tuple checksum of pkt
7568  * @prec: returns ip precedence
7569  * @tcp_seqno: returns tcp sequnce number
7570  *
7571  * returns packet length remaining after tcp/udp header or BCME_ERROR.
7572  */
7573 static int
dhd_msgbuf_get_ip_info(dhd_pub_t * dhdp,void * pkt,void ** ptr,uint32 * chksum,uint32 * prec,uint32 * tcp_seqno,uint32 * tcp_ackno)7574 dhd_msgbuf_get_ip_info(dhd_pub_t *dhdp, void *pkt, void **ptr, uint32 *chksum,
7575 	uint32 *prec, uint32 *tcp_seqno, uint32 *tcp_ackno)
7576 {
7577 	char *pdata;
7578 	uint plen;
7579 	uint32 type, len;
7580 	uint32 checksum = 0;
7581 	uint8 dscp_prio = 0;
7582 	struct bcmtcp_hdr *tcp = NULL;
7583 
7584 	pdata = PKTDATA(dhdp->osh, pkt);
7585 	plen = PKTLEN(dhdp->osh, pkt);
7586 
7587 	/* Ethernet header */
7588 	if (plen < ETHER_HDR_LEN) {
7589 		return BCME_ERROR;
7590 	}
7591 	type = ntoh16(((struct ether_header *)pdata)->ether_type);
7592 	pdata += ETHER_HDR_LEN;
7593 	plen -= ETHER_HDR_LEN;
7594 
7595 	if ((type == ETHER_TYPE_IP) ||
7596 		(type == ETHER_TYPE_IPV6)) {
7597 		dscp_prio = (IP_TOS46(pdata) >> IPV4_TOS_PREC_SHIFT);
7598 	}
7599 
7600 	/* IP header (v4 or v6) */
7601 	if (type == ETHER_TYPE_IP) {
7602 		struct ipv4_hdr *iph = (struct ipv4_hdr *)pdata;
7603 		if (plen <= sizeof(*iph)) {
7604 			return BCME_ERROR;
7605 		}
7606 
7607 		len = IPV4_HLEN(iph);
7608 		if (plen <= len || IP_VER(iph) != IP_VER_4 || len < IPV4_MIN_HEADER_LEN) {
7609 			return BCME_ERROR;
7610 		}
7611 
7612 		type = IPV4_PROT(iph);
7613 		pdata += len;
7614 		plen -= len;
7615 
7616 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip,
7617 			sizeof(iph->src_ip) / sizeof(uint32));
7618 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip,
7619 			sizeof(iph->dst_ip) / sizeof(uint32));
7620 	} else if (type == ETHER_TYPE_IPV6) {
7621 		struct ipv6_hdr *ip6h = (struct ipv6_hdr *)pdata;
7622 
7623 		if (plen <= IPV6_MIN_HLEN || IP_VER(ip6h) != IP_VER_6) {
7624 			return BCME_ERROR;
7625 		}
7626 
7627 		type = IPV6_PROT(ip6h);
7628 		pdata += IPV6_MIN_HLEN;
7629 		plen -= IPV6_MIN_HLEN;
7630 		if (IPV6_EXTHDR(type)) {
7631 			uint8 proto = 0;
7632 			int32 exth_len = ipv6_exthdr_len(pdata, &proto);
7633 			if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
7634 				return BCME_ERROR;
7635 			}
7636 			type = proto;
7637 			pdata += exth_len;
7638 			plen -= exth_len;
7639 		}
7640 
7641 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr,
7642 			sizeof(ip6h->saddr) / sizeof(uint32));
7643 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr,
7644 			sizeof(ip6h->saddr) / sizeof(uint32));
7645 	}
7646 
7647 	/* return error if not TCP or UDP */
7648 	if ((type != IP_PROT_UDP) && (type != IP_PROT_TCP)) {
7649 		return BCME_ERROR;
7650 	}
7651 
7652 	/* src_port and dst_port (together 32bit) */
7653 	checksum ^= bcm_compute_xor32((volatile uint32 *)pdata, 1);
7654 	checksum ^= bcm_compute_xor32((volatile uint32 *)&type, 1);
7655 
7656 	if (type == IP_PROT_TCP) {
7657 		tcp = (struct bcmtcp_hdr *)pdata;
7658 		len = TCP_HDRLEN(pdata[TCP_HLEN_OFFSET]) << 2;
7659 	} else { /* IP_PROT_UDP */
7660 		len =	sizeof(struct bcmudp_hdr);
7661 	}
7662 
7663 	/* length check */
7664 	if (plen < len) {
7665 		return BCME_ERROR;
7666 	}
7667 
7668 	pdata += len;
7669 	plen -= len;
7670 
7671 	/* update data[0] */
7672 	*ptr = (void *)pdata;
7673 
7674 	/* update fivetuple checksum */
7675 	*chksum = checksum;
7676 
7677 	/* update ip prec */
7678 	*prec = dscp_prio;
7679 
7680 	/* update tcp sequence number */
7681 	if (tcp != NULL) {
7682 		*tcp_seqno = tcp->seq_num;
7683 		*tcp_ackno = tcp->ack_num;
7684 	}
7685 
7686 	return plen;
7687 }
7688 
7689 /**
7690  * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket
7691  *
7692  * @dhdp: pointer to dhd_pub object
7693  * @pkt: packet pointer
7694  * @fwts: firmware timestamp {fwt1..fwt4}
7695  * @version: pktlat version supported in firmware
7696  */
7697 static void
dhd_msgbuf_send_msg_tx_ts(dhd_pub_t * dhdp,void * pkt,void * fw_ts,uint16 version)7698 dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhdp, void *pkt, void *fw_ts, uint16 version)
7699 {
7700 	bcm_to_info_tx_ts_t to_tx_info;
7701 	void *ptr = NULL;
7702 	int dlen = 0;
7703 	uint32 checksum = 0;
7704 	uint32 prec = 0;
7705 	pktts_flow_t *flow = NULL;
7706 	uint32 flow_pkt_offset = 0;
7707 	uint32 num_config = 0;
7708 	uint32 tcp_seqno = 0;
7709 	uint32 tcp_ackno = 0;
7710 
7711 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7712 
7713 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7714 	if (flow) {
7715 		/* there is valid config for this chksum */
7716 		flow_pkt_offset = flow->pkt_offset;
7717 	} else if (num_config) {
7718 		/* there is valid config + no matching config for this chksum */
7719 		return;
7720 	} else {
7721 		/* there is no valid config. pass all to netlink */
7722 	}
7723 
7724 	memset(&to_tx_info, 0, sizeof(to_tx_info));
7725 	to_tx_info.hdr.type = BCM_TS_TX;
7726 	to_tx_info.hdr.flowid = checksum;
7727 	to_tx_info.hdr.prec = prec;
7728 
7729 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7730 	if (!flow && tcp_seqno) {
7731 		uint32 *xbytes = (uint32 *)to_tx_info.hdr.xbytes;
7732 
7733 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7734 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7735 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7736 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7737 	} else if ((dlen > flow_pkt_offset) &&
7738 		((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) {
7739 		(void)memcpy_s(to_tx_info.hdr.xbytes, sizeof(to_tx_info.hdr.xbytes),
7740 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_tx_info.hdr.xbytes));
7741 	}
7742 
7743 	to_tx_info.dhdt0 = DHD_PKT_GET_QTIME(pkt);
7744 	to_tx_info.dhdt5 = OSL_SYSUPTIME_US();
7745 
7746 	if (version == METADATA_VER_1) {
7747 		struct pktts_fwtx_v1 *fwts = (struct pktts_fwtx_v1 *)fw_ts;
7748 
7749 		to_tx_info.hdr.magic = BCM_TS_MAGIC;
7750 
7751 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7752 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7753 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7754 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7755 
7756 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, OFFSETOF(bcm_to_info_tx_ts_t, ucts));
7757 	} else if (version == METADATA_VER_2) {
7758 		struct pktts_fwtx_v2 *fwts = (struct pktts_fwtx_v2 *)fw_ts;
7759 
7760 		to_tx_info.hdr.magic = BCM_TS_MAGIC_V2;
7761 
7762 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7763 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7764 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7765 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7766 
7767 		to_tx_info.ucts[0] = ntohl(fwts->ut[0]);
7768 		to_tx_info.ucts[1] = ntohl(fwts->ut[1]);
7769 		to_tx_info.ucts[2] = ntohl(fwts->ut[2]);
7770 		to_tx_info.ucts[3] = ntohl(fwts->ut[3]);
7771 		to_tx_info.ucts[4] = ntohl(fwts->ut[4]);
7772 
7773 		to_tx_info.uccnt[0] = ntohl(fwts->uc[0]);
7774 		to_tx_info.uccnt[1] = ntohl(fwts->uc[1]);
7775 		to_tx_info.uccnt[2] = ntohl(fwts->uc[2]);
7776 		to_tx_info.uccnt[3] = ntohl(fwts->uc[3]);
7777 		to_tx_info.uccnt[4] = ntohl(fwts->uc[4]);
7778 		to_tx_info.uccnt[5] = ntohl(fwts->uc[5]);
7779 		to_tx_info.uccnt[6] = ntohl(fwts->uc[6]);
7780 		to_tx_info.uccnt[7] = ntohl(fwts->uc[7]);
7781 
7782 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, sizeof(to_tx_info));
7783 	}
7784 	return;
7785 }
7786 
7787 /**
7788  * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket
7789  *
7790  * @dhdp: pointer to dhd_pub object
7791  * @pkt: packet pointer
7792  * @fwr1: firmware timestamp at probe point 1
7793  * @fwr2: firmware timestamp at probe point 2
7794  */
7795 static void
dhd_msgbuf_send_msg_rx_ts(dhd_pub_t * dhdp,void * pkt,uint fwr1,uint fwr2)7796 dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhdp, void *pkt, uint fwr1, uint fwr2)
7797 {
7798 	bcm_to_info_rx_ts_t to_rx_info;
7799 	void *ptr = NULL;
7800 	int dlen = 0;
7801 	uint32 checksum = 0;
7802 	uint32 prec = 0;
7803 	pktts_flow_t *flow = NULL;
7804 	uint32 flow_pkt_offset = 0;
7805 	uint32 num_config = 0;
7806 	uint32 tcp_seqno = 0;
7807 	uint32 tcp_ackno = 0;
7808 
7809 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7810 
7811 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7812 	if (flow) {
7813 		/* there is valid config for this chksum */
7814 		flow_pkt_offset = flow->pkt_offset;
7815 	} else if (num_config) {
7816 		/* there is valid config + no matching config for this chksum */
7817 		return;
7818 	} else {
7819 		/* there is no valid config. pass all to netlink */
7820 	}
7821 
7822 	memset(&to_rx_info, 0, sizeof(to_rx_info));
7823 	to_rx_info.hdr.magic = BCM_TS_MAGIC;
7824 	to_rx_info.hdr.type = BCM_TS_RX;
7825 	to_rx_info.hdr.flowid = checksum;
7826 	to_rx_info.hdr.prec = prec;
7827 
7828 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7829 	if (!flow && tcp_seqno) {
7830 		uint32 *xbytes = (uint32 *)to_rx_info.hdr.xbytes;
7831 
7832 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7833 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7834 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7835 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7836 	} else if ((dlen > flow_pkt_offset) &&
7837 		((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) {
7838 		(void)memcpy_s(to_rx_info.hdr.xbytes, sizeof(to_rx_info.hdr.xbytes),
7839 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_rx_info.hdr.xbytes));
7840 	}
7841 
7842 	to_rx_info.dhdr3 = OSL_SYSUPTIME_US();
7843 
7844 	to_rx_info.fwts[0] = ntohl(fwr1);
7845 	to_rx_info.fwts[1] = ntohl(fwr2);
7846 
7847 	dhd_send_msg_to_ts(NULL, (void *)&to_rx_info, sizeof(to_rx_info));
7848 	return;
7849 }
7850 #endif /* DHD_PKTTS */
7851 
7852 /** called on MSG_TYPE_TX_STATUS message received from dongle */
7853 static void
BCMFASTPATH(dhd_prot_txstatus_process)7854 BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
7855 {
7856 	dhd_prot_t *prot = dhd->prot;
7857 	host_txbuf_cmpl_t * txstatus;
7858 	unsigned long flags;
7859 	uint32 pktid;
7860 	void *pkt;
7861 	dmaaddr_t pa;
7862 	uint32 len;
7863 	void *dmah;
7864 	void *secdma;
7865 	bool pkt_fate;
7866 	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
7867 #if defined(TX_STATUS_LATENCY_STATS)
7868 	flow_info_t *flow_info;
7869 	uint64 tx_status_latency;
7870 #endif /* TX_STATUS_LATENCY_STATS */
7871 #ifdef AGG_H2D_DB
7872 	msgbuf_ring_t *flow_ring;
7873 #endif /* AGG_H2D_DB */
7874 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
7875 	dhd_awdl_stats_t *awdl_stats;
7876 	if_flow_lkup_t *if_flow_lkup;
7877 	unsigned long awdl_stats_lock_flags;
7878 	uint8 ifindex;
7879 	uint8 role;
7880 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
7881 	flow_ring_node_t *flow_ring_node;
7882 	uint16 flowid;
7883 #ifdef DHD_PKTTS
7884 	struct metadata_txcmpl_v1 meta_ts_v1;
7885 	struct metadata_txcmpl_v2 meta_ts_v2;
7886 	dhd_dma_buf_t meta_data_buf;
7887 	uint64 addr = 0;
7888 
7889 	BCM_REFERENCE(meta_ts_v1);
7890 	BCM_REFERENCE(meta_ts_v2);
7891 	BCM_REFERENCE(meta_data_buf);
7892 	BCM_REFERENCE(addr);
7893 
7894 	if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) ||
7895 		(dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) {
7896 		DHD_ERROR_RLMT(("%s: return as invalid pktid detected\n", __FUNCTION__));
7897 		return;
7898 	}
7899 
7900 	memset(&meta_ts_v1, 0, sizeof(meta_ts_v1));
7901 	memset(&meta_ts_v2, 0, sizeof(meta_ts_v2));
7902 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
7903 #endif /* DHD_PKTTS */
7904 	txstatus = (host_txbuf_cmpl_t *)msg;
7905 
7906 	flowid = txstatus->compl_hdr.flow_ring_id;
7907 	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
7908 #ifdef AGG_H2D_DB
7909 	flow_ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
7910 	OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight);
7911 #endif /* AGG_H2D_DB */
7912 
7913 	BCM_REFERENCE(flow_ring_node);
7914 
7915 #ifdef DEVICE_TX_STUCK_DETECT
7916 	/**
7917 	 * Since we got a completion message on this flowid,
7918 	 * update tx_cmpl time stamp
7919 	 */
7920 	flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
7921 	/* update host copy of rd pointer */
7922 #ifdef DHD_HP2P
7923 	if (dhd->prot->d2hring_hp2p_txcpl &&
7924 		flow_ring_node->flow_info.tid == HP2P_PRIO) {
7925 		ring = dhd->prot->d2hring_hp2p_txcpl;
7926 	}
7927 #endif /* DHD_HP2P */
7928 	ring->curr_rd++;
7929 	if (ring->curr_rd >= ring->max_items) {
7930 		ring->curr_rd = 0;
7931 	}
7932 #endif /* DEVICE_TX_STUCK_DETECT */
7933 
7934 	/* locks required to protect circular buffer accesses */
7935 	DHD_RING_LOCK(ring->ring_lock, flags);
7936 	pktid = ltoh32(txstatus->cmn_hdr.request_id);
7937 
7938 	if (dhd->pcie_txs_metadata_enable > 1) {
7939 		/* Return metadata format (little endian):
7940 		 * |<--- txstatus --->|<- metadatalen ->|
7941 		 * |____|____|________|________|________|
7942 		 * |    |    |        |        |> total delay from fetch to report (8-bit 1 = 4ms)
7943 		 * |    |    |        |> ucode delay from enqueue to completion (8-bit 1 = 4ms)
7944 		 * |    |    |> 8-bit reserved (pre-filled with original TX status by caller)
7945 		 * |    |> delay time first fetch to the last fetch (4-bit 1 = 32ms)
7946 		 * |> fetch count (4-bit)
7947 		 */
7948 		printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid,
7949 			ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status),
7950 			(txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK),
7951 			((txstatus->tx_status >> 12) & 0xf),
7952 			((txstatus->tx_status >> 8) & 0xf) * 32,
7953 			((txstatus->tx_status_ext & 0xff) * 4),
7954 			((txstatus->tx_status_ext >> 8) & 0xff) * 4);
7955 	}
7956 	pkt_fate = TRUE;
7957 
7958 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7959 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
7960 			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
7961 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7962 
7963 	DHD_MSGBUF_INFO(("txstatus for pktid 0x%04x\n", pktid));
7964 	if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
7965 		DHD_ERROR(("Extra packets are freed\n"));
7966 	}
7967 	ASSERT(pktid != 0);
7968 
7969 #ifdef DHD_HMAPTEST
7970 
7971 	if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) &&
7972 		(pktid == dhd->prot->hmaptest_tx_pktid)) {
7973 		DHD_ERROR(("hmaptest: d11read txcpl received sc txbuf pktid=0x%08x\n", pktid));
7974 		DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status));
7975 		DHD_ERROR(("hmaptest: d11read txcpl sc txbuf va=0x%p pa=0x%08x\n",
7976 			dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa)));
7977 		dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
7978 		dhd->prot->hmap_tx_buf_va = NULL;
7979 		dhd->prot->hmap_tx_buf_len = 0;
7980 		PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0);
7981 		PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0);
7982 		prot->hmaptest.in_progress = FALSE;
7983 	}
7984 	/* original skb is kept as it is because its going to be freed  later in this path */
7985 #endif /* DHD_HMAPTEST */
7986 
7987 #ifdef DHD_PKTTS
7988 	if (dhd_get_pktts_enab(dhd) &&
7989 		dhd->pkt_metadata_buflen) {
7990 		/* Handle the Metadata first */
7991 		meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map,
7992 			meta_data_buf.pa, meta_data_buf._alloced, meta_data_buf.dmah, pktid);
7993 		if (meta_data_buf.va) {
7994 			if (dhd->pkt_metadata_version == METADATA_VER_1) {
7995 				memcpy(&meta_ts_v1, meta_data_buf.va, sizeof(meta_ts_v1));
7996 			} else if (dhd->pkt_metadata_version == METADATA_VER_2) {
7997 				memcpy(&meta_ts_v2, meta_data_buf.va, sizeof(meta_ts_v2));
7998 			}
7999 			memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
8000 			DHD_TRACE(("%s(): pktid %d retrieved mdata buffer %p "
8001 				"pa: %llx dmah: %p\r\n",  __FUNCTION__,
8002 				pktid, meta_data_buf.va, addr,
8003 				meta_data_buf.dmah));
8004 		}
8005 	}
8006 #endif /* DHD_PKTTS */
8007 
8008 	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
8009 		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
8010 	if (!pkt) {
8011 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8012 #ifdef DHD_PKTTS
8013 		/*
8014 		 * Call the free function after the Ring Lock is released.
8015 		 * This is becuase pcie_free_consistent is not supposed to be
8016 		 * called with Interrupts Disabled
8017 		 */
8018 		if (meta_data_buf.va) {
8019 			DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8020 				meta_data_buf.pa, meta_data_buf.dmah);
8021 		}
8022 #endif /* DHD_PKTTS */
8023 		DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
8024 		prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
8025 #ifdef DHD_FW_COREDUMP
8026 		if (dhd->memdump_enabled) {
8027 			/* collect core dump */
8028 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
8029 			dhd_bus_mem_dump(dhd);
8030 		}
8031 #else
8032 		ASSERT(0);
8033 #endif /* DHD_FW_COREDUMP */
8034 		return;
8035 	}
8036 
8037 	if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
8038 		DHD_ERROR_RLMT(("%s: start tx queue as min pktids are available\n",
8039 			__FUNCTION__));
8040 		prot->pktid_txq_stop_cnt--;
8041 		dhd->prot->no_tx_resource = FALSE;
8042 		dhd_bus_start_queue(dhd->bus);
8043 	}
8044 
8045 	DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
8046 
8047 #ifdef TX_STATUS_LATENCY_STATS
8048 	/* update the tx status latency for flowid */
8049 	flow_info = &flow_ring_node->flow_info;
8050 	tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
8051 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8052 	if (dhd->pkt_latency > 0 &&
8053 		tx_status_latency > (dhd->pkt_latency)) {
8054 		DHD_ERROR(("Latency: %llu > %u aw_cnt: %u \n",
8055 			tx_status_latency, dhd->pkt_latency,
8056 			dhd->awdl_aw_counter));
8057 	}
8058 #endif /*  defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
8059 	flow_info->cum_tx_status_latency += tx_status_latency;
8060 	flow_info->num_tx_status++;
8061 #endif /* TX_STATUS_LATENCY_STATS */
8062 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8063 	/* update the tx status latency when this AWDL slot is active */
8064 	if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup;
8065 	ifindex = flow_ring_node->flow_info.ifindex;
8066 	role = if_flow_lkup[ifindex].role;
8067 	if (role == WLC_E_IF_ROLE_AWDL) {
8068 		awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot];
8069 		DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8070 		awdl_stats->cum_tx_status_latency += tx_status_latency;
8071 		awdl_stats->num_tx_status++;
8072 		DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8073 	}
8074 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
8075 
8076 #ifdef HOST_SFH_LLC
8077 	if (dhd->host_sfhllc_supported) {
8078 		struct ether_header eth;
8079 		if (!memcpy_s(&eth, sizeof(eth),
8080 			PKTDATA(dhd->osh, pkt), sizeof(eth))) {
8081 			if (dhd_8023_llc_to_ether_hdr(dhd->osh,
8082 				&eth, pkt) != BCME_OK) {
8083 				DHD_ERROR_RLMT(("%s: host sfh llc"
8084 					" converstion to ether failed\n",
8085 					__FUNCTION__));
8086 			}
8087 		}
8088 	}
8089 #endif /* HOST_SFH_LLC */
8090 
8091 #ifdef DMAMAP_STATS
8092 	dhd->dma_stats.txdata--;
8093 	dhd->dma_stats.txdata_sz -= len;
8094 #endif /* DMAMAP_STATS */
8095 	pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
8096 		ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
8097 #ifdef DHD_PKT_LOGGING
8098 	if (dhd->d11_tx_status) {
8099 		uint16 status = ltoh16(txstatus->compl_hdr.status) &
8100 			WLFC_CTL_PKTFLAG_MASK;
8101 		dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id),
8102 			pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len,
8103 			&status, NULL, TRUE, FALSE, TRUE);
8104 	}
8105 #endif /* DHD_PKT_LOGGING */
8106 #if defined(BCMPCIE) && (defined(LINUX) || defined(OEM_ANDROID) || defined(DHD_EFI))
8107 	dhd_txcomplete(dhd, pkt, pkt_fate);
8108 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
8109 	dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
8110 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8111 #endif /* BCMPCIE && (defined(LINUX) || defined(OEM_ANDROID)) */
8112 
8113 #ifdef DHD_PKTTS
8114 	if (dhd_get_pktts_enab(dhd) == TRUE) {
8115 		if (dhd->pkt_metadata_buflen) {
8116 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8117 			if ((dhd->pkt_metadata_version == METADATA_VER_1) &&
8118 					(ltoh32(meta_ts_v1.tref) != 0xFFFFFFFF)) {
8119 				struct pktts_fwtx_v1 fwts;
8120 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v1.tref));
8121 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8122 					ltoh16(meta_ts_v1.d_t2));
8123 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8124 					ltoh16(meta_ts_v1.d_t3));
8125 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8126 					ltoh16(meta_ts_v1.d_t4));
8127 				/* check for overflow */
8128 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8129 					/* send tx timestamp to netlink socket */
8130 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8131 						dhd->pkt_metadata_version);
8132 				}
8133 			} else if ((dhd->pkt_metadata_version == METADATA_VER_2) &&
8134 					(ltoh32(meta_ts_v2.tref) != 0xFFFFFFFF)) {
8135 				struct pktts_fwtx_v2 fwts;
8136 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref));
8137 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8138 					ltoh16(meta_ts_v2.d_t2));
8139 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8140 					ltoh16(meta_ts_v2.d_t3));
8141 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8142 					ltoh16(meta_ts_v2.d_t4));
8143 
8144 				fwts.ut[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8145 					ltoh16(meta_ts_v2.u_t1));
8146 				fwts.ut[1] = (uint32)htonl(ltoh16(meta_ts_v2.u_t2));
8147 				fwts.ut[2] = (uint32)htonl(ltoh16(meta_ts_v2.u_t3));
8148 				fwts.ut[3] = (uint32)htonl(ltoh16(meta_ts_v2.u_t4));
8149 				fwts.ut[4] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8150 					ltoh16(meta_ts_v2.u_t5));
8151 
8152 				fwts.uc[0] = (uint32)htonl(ltoh32(meta_ts_v2.u_c1));
8153 				fwts.uc[1] = (uint32)htonl(ltoh32(meta_ts_v2.u_c2));
8154 				fwts.uc[2] = (uint32)htonl(ltoh32(meta_ts_v2.u_c3));
8155 				fwts.uc[3] = (uint32)htonl(ltoh32(meta_ts_v2.u_c4));
8156 				fwts.uc[4] = (uint32)htonl(ltoh32(meta_ts_v2.u_c5));
8157 				fwts.uc[5] = (uint32)htonl(ltoh32(meta_ts_v2.u_c6));
8158 				fwts.uc[6] = (uint32)htonl(ltoh32(meta_ts_v2.u_c7));
8159 				fwts.uc[7] = (uint32)htonl(ltoh32(meta_ts_v2.u_c8));
8160 
8161 				DHD_INFO(("uct1:%x uct2:%x uct3:%x uct4:%x uct5:%x\n",
8162 					ntohl(fwts.ut[0]), ntohl(fwts.ut[1]), ntohl(fwts.ut[2]),
8163 					ntohl(fwts.ut[3]), ntohl(fwts.ut[4])));
8164 				DHD_INFO(("ucc1:%x ucc2:%x ucc3:%x ucc4:%x"
8165 					" ucc5:%x ucc6:%x ucc7:%x ucc8:%x\n",
8166 					ntohl(fwts.uc[0]), ntohl(fwts.uc[1]), ntohl(fwts.uc[2]),
8167 					ntohl(fwts.uc[3]), ntohl(fwts.uc[4]), ntohl(fwts.uc[5]),
8168 					ntohl(fwts.uc[6]), ntohl(fwts.uc[7])));
8169 				/* check for overflow */
8170 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8171 					/* send tx timestamp to netlink socket */
8172 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8173 						dhd->pkt_metadata_version);
8174 				}
8175 			}
8176 		} else {
8177 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8178 			if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) {
8179 				struct pktts_fwtx_v1 fwts;
8180 
8181 				fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref));
8182 				fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8183 					ltoh16(txstatus->tx_pktts.d_t2));
8184 				fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8185 					ltoh16(txstatus->tx_pktts.d_t3));
8186 				fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8187 					ltoh16(txstatus->compl_hdr.tx_pktts.d_t4));
8188 
8189 				/* check for overflow */
8190 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8191 					/* send tx timestamp to netlnik socket */
8192 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, METADATA_VER_1);
8193 				}
8194 			}
8195 		}
8196 	}
8197 #endif /* DHD_PKTTS */
8198 
8199 #if DHD_DBG_SHOW_METADATA
8200 	if (dhd->prot->metadata_dbg &&
8201 			dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
8202 		uchar *ptr;
8203 		/* The Ethernet header of TX frame was copied and removed.
8204 		 * Here, move the data pointer forward by Ethernet header size.
8205 		 */
8206 		PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
8207 		ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
8208 		bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
8209 		dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
8210 	}
8211 #endif /* DHD_DBG_SHOW_METADATA */
8212 
8213 #ifdef DHD_HP2P
8214 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8215 #ifdef DHD_HP2P_DEBUG
8216 		bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
8217 #endif /* DHD_HP2P_DEBUG */
8218 		dhd_update_hp2p_txstats(dhd, txstatus);
8219 	}
8220 #endif /* DHD_HP2P */
8221 
8222 #ifdef DHD_TIMESYNC
8223 	if (dhd->prot->tx_ts_log_enabled) {
8224 		dhd_pkt_parse_t parse;
8225 		ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
8226 
8227 		memset(&parse, 0, sizeof(parse));
8228 		dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
8229 
8230 		if (parse.proto == IP_PROT_ICMP)
8231 			dhd_timesync_log_tx_timestamp(dhd->ts,
8232 				txstatus->compl_hdr.flow_ring_id,
8233 				txstatus->cmn_hdr.if_id,
8234 				ts->low, ts->high, &parse);
8235 	}
8236 #endif /* DHD_TIMESYNC */
8237 
8238 #ifdef DHD_LBUF_AUDIT
8239 	PKTAUDIT(dhd->osh, pkt);
8240 #endif
8241 	DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
8242 		txstatus->tx_status);
8243 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8244 #ifdef DHD_PKTTS
8245 	if (meta_data_buf.va) {
8246 		DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8247 			meta_data_buf.pa, meta_data_buf.dmah);
8248 	}
8249 #endif /* DHD_PKTTS */
8250 #ifdef DHD_MEM_STATS
8251 	DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags);
8252 	DHD_MSGBUF_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
8253 		__FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt)));
8254 	dhd->txpath_mem -= PKTLEN(dhd->osh, pkt);
8255 	DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags);
8256 #endif /* DHD_MEM_STATS */
8257 	PKTFREE(dhd->osh, pkt, TRUE);
8258 
8259 	return;
8260 } /* dhd_prot_txstatus_process */
8261 
8262 /* FIXME: assuming that it is getting inline data related to the event data */
8263 /** called on MSG_TYPE_WL_EVENT message received from dongle */
8264 static void
dhd_prot_event_process(dhd_pub_t * dhd,void * msg)8265 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
8266 {
8267 	wlevent_req_msg_t *evnt;
8268 	uint32 bufid;
8269 	uint16 buflen;
8270 	int ifidx = 0;
8271 	void* pkt;
8272 	dhd_prot_t *prot = dhd->prot;
8273 
8274 	/* Event complete header */
8275 	evnt = (wlevent_req_msg_t *)msg;
8276 	bufid = ltoh32(evnt->cmn_hdr.request_id);
8277 
8278 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
8279 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
8280 			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
8281 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
8282 
8283 	buflen = ltoh16(evnt->event_data_len);
8284 
8285 	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
8286 	/* FIXME: check the event status */
8287 
8288 	/* Post another rxbuf to the device */
8289 	if (prot->cur_event_bufs_posted)
8290 		prot->cur_event_bufs_posted--;
8291 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
8292 
8293 	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
8294 
8295 	if (!pkt) {
8296 		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
8297 		return;
8298 	}
8299 
8300 #if !defined(BCM_ROUTER_DHD)
8301 	/* FIXME: make sure the length is more than dataoffset */
8302 	/* DMA RX offset updated through shared area */
8303 	if (dhd->prot->rx_dataoffset)
8304 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8305 #endif /* !BCM_ROUTER_DHD */
8306 
8307 	PKTSETLEN(dhd->osh, pkt, buflen);
8308 #ifdef DHD_LBUF_AUDIT
8309 	PKTAUDIT(dhd->osh, pkt);
8310 #endif
8311 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
8312 }
8313 
8314 #if !defined(BCM_ROUTER_DHD)
8315 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
8316 static void
BCMFASTPATH(dhd_prot_process_infobuf_complete)8317 BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf)
8318 {
8319 	info_buf_resp_t *resp;
8320 	uint32 pktid;
8321 	uint16 buflen;
8322 	void * pkt;
8323 
8324 	resp = (info_buf_resp_t *)buf;
8325 	pktid = ltoh32(resp->cmn_hdr.request_id);
8326 	buflen = ltoh16(resp->info_data_len);
8327 
8328 #ifdef DHD_PKTID_AUDIT_RING
8329 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8330 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8331 #endif /* DHD_PKTID_AUDIT_RING */
8332 
8333 	DHD_MSGBUF_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8334 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8335 		dhd->prot->rx_dataoffset));
8336 
8337 	if (dhd->debug_buf_dest_support) {
8338 		if (resp->dest < DEBUG_BUF_DEST_MAX) {
8339 			dhd->debug_buf_dest_stat[resp->dest]++;
8340 		}
8341 	}
8342 
8343 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8344 	if (!pkt)
8345 		return;
8346 
8347 #if !defined(BCM_ROUTER_DHD)
8348 	/* FIXME: make sure the length is more than dataoffset */
8349 	/* DMA RX offset updated through shared area */
8350 	if (dhd->prot->rx_dataoffset)
8351 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8352 #endif /* !BCM_ROUTER_DHD */
8353 
8354 	PKTSETLEN(dhd->osh, pkt, buflen);
8355 #ifdef DHD_LBUF_AUDIT
8356 	PKTAUDIT(dhd->osh, pkt);
8357 #endif
8358 	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
8359 	 * special ifidx of -1.  This is just internal to dhd to get the data to
8360 	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
8361 	 */
8362 	dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
8363 }
8364 #endif /* !BCM_ROUTER_DHD */
8365 
8366 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
8367 static void
BCMFASTPATH(dhd_prot_process_snapshot_complete)8368 BCMFASTPATH(dhd_prot_process_snapshot_complete)(dhd_pub_t *dhd, void *buf)
8369 {
8370 #ifdef SNAPSHOT_UPLOAD
8371 	dhd_prot_t *prot = dhd->prot;
8372 	snapshot_resp_t *resp;
8373 	uint16 status;
8374 
8375 	resp = (snapshot_resp_t *)buf;
8376 
8377 	/* check completion status */
8378 	status = resp->compl_hdr.status;
8379 	if (status != BCMPCIE_SUCCESS) {
8380 		DHD_ERROR(("%s: failed: %s (%d)\n",
8381 			__FUNCTION__,
8382 			status == BCMPCIE_BT_DMA_ERR ? "DMA_ERR" :
8383 			status == BCMPCIE_BT_DMA_DESCR_FETCH_ERR ?
8384 				"DMA_DESCR_ERR" :
8385 			status == BCMPCIE_SNAPSHOT_ERR ? "SNAPSHOT_ERR" :
8386 			status == BCMPCIE_NOT_READY ? "NOT_READY" :
8387 			status == BCMPCIE_INVALID_DATA ? "INVALID_DATA" :
8388 			status == BCMPCIE_NO_RESPONSE ? "NO_RESPONSE" :
8389 			status == BCMPCIE_NO_CLOCK ? "NO_CLOCK" :
8390 			"", status));
8391 	}
8392 
8393 	/* length may be truncated if error occurred */
8394 	prot->snapshot_upload_len = ltoh32(resp->resp_len);
8395 	prot->snapshot_type = resp->type;
8396 	prot->snapshot_cmpl_pending = FALSE;
8397 
8398 	DHD_INFO(("%s id 0x%04x, phase 0x%02x, resp_len %d, type %d\n",
8399 		__FUNCTION__, ltoh32(resp->cmn_hdr.request_id),
8400 		resp->cmn_hdr.flags,
8401 		prot->snapshot_upload_len, prot->snapshot_type));
8402 #endif	/* SNAPSHOT_UPLOAD */
8403 }
8404 
8405 #ifdef BTLOG
8406 /** called on MSG_TYPE_BT_LOG_CMPLT message received from dongle */
8407 static void
BCMFASTPATH(dhd_prot_process_btlog_complete)8408 BCMFASTPATH(dhd_prot_process_btlog_complete)(dhd_pub_t *dhd, void* buf)
8409 {
8410 	info_buf_resp_t *resp;
8411 	uint32 pktid;
8412 	uint16 buflen;
8413 	void * pkt;
8414 
8415 	resp = (info_buf_resp_t *)buf;
8416 	pktid = ltoh32(resp->cmn_hdr.request_id);
8417 	buflen = ltoh16(resp->info_data_len);
8418 
8419 	/* check completion status */
8420 	if (resp->compl_hdr.status != BCMPCIE_SUCCESS) {
8421 		DHD_ERROR(("%s: failed completion status %d\n",
8422 			__FUNCTION__, resp->compl_hdr.status));
8423 		return;
8424 	}
8425 
8426 #ifdef DHD_PKTID_AUDIT_RING
8427 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8428 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8429 #endif /* DHD_PKTID_AUDIT_RING */
8430 
8431 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8432 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8433 		dhd->prot->rx_dataoffset));
8434 
8435 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8436 
8437 	if (!pkt)
8438 		return;
8439 
8440 #if !defined(BCM_ROUTER_DHD)
8441 	/* FIXME: make sure the length is more than dataoffset */
8442 	/* DMA RX offset updated through shared area */
8443 	if (dhd->prot->rx_dataoffset)
8444 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8445 #endif /* !BCM_ROUTER_DHD */
8446 
8447 	PKTSETLEN(dhd->osh, pkt, buflen);
8448 	PKTSETNEXT(dhd->osh, pkt, NULL);
8449 
8450 	dhd_bus_rx_bt_log(dhd->bus, pkt);
8451 }
8452 #endif	/* BTLOG */
8453 
8454 /** Stop protocol: sync w/dongle state. */
dhd_prot_stop(dhd_pub_t * dhd)8455 void dhd_prot_stop(dhd_pub_t *dhd)
8456 {
8457 	ASSERT(dhd);
8458 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8459 
8460 #if defined(NDIS)
8461 	if (dhd->prot) {
8462 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map);
8463 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map);
8464 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map);
8465 #if defined(IOCTLRESP_USE_CONSTMEM)
8466 		DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl);
8467 #endif /* DHD_PCIE_PKTID */
8468 	}
8469 #endif /* NDIS */
8470 }
8471 
8472 /* Add any protocol-specific data header.
8473  * Caller must reserve prot_hdrlen prepend space.
8474  */
8475 void
BCMFASTPATH(dhd_prot_hdrpush)8476 BCMFASTPATH(dhd_prot_hdrpush)(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
8477 {
8478 	return;
8479 }
8480 
8481 uint
dhd_prot_hdrlen(dhd_pub_t * dhd,void * PKTBUF)8482 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
8483 {
8484 	return 0;
8485 }
8486 
8487 #define PKTBUF pktbuf
8488 
8489 /**
8490  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
8491  * the corresponding flow ring.
8492  */
8493 int
BCMFASTPATH(dhd_prot_txdata)8494 BCMFASTPATH(dhd_prot_txdata)(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
8495 {
8496 	unsigned long flags;
8497 	dhd_prot_t *prot = dhd->prot;
8498 	host_txbuf_post_t *txdesc = NULL;
8499 	dmaaddr_t pa, meta_pa;
8500 	uint8 *pktdata;
8501 	uint32 pktlen;
8502 	uint32 pktid;
8503 	uint8	prio;
8504 	uint16 flowid = 0;
8505 	uint16 alloced = 0;
8506 	uint16	headroom;
8507 	msgbuf_ring_t *ring;
8508 	flow_ring_table_t *flow_ring_table;
8509 	flow_ring_node_t *flow_ring_node;
8510 #if defined(BCMINTERNAL) && defined(LINUX)
8511 	void *pkt_to_free = NULL;
8512 #endif /* BCMINTERNAL && LINUX */
8513 #ifdef DHD_PKTTS
8514 	dhd_dma_buf_t	meta_data_buf;
8515 	uint16	meta_data_buf_len = dhd->pkt_metadata_buflen;
8516 	uint64 addr = 0;
8517 #endif /* DHD_PKTTS */
8518 	void *big_pktbuf = NULL;
8519 	uint8 dhd_udr = FALSE;
8520 	bool host_sfh_llc_reqd = dhd->host_sfhllc_supported;
8521 	bool llc_inserted = FALSE;
8522 
8523 	BCM_REFERENCE(llc_inserted);
8524 #ifdef PCIE_INB_DW
8525 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
8526 		DHD_ERROR(("failed to increment hostactive_devwake\n"));
8527 		return BCME_ERROR;
8528 	}
8529 #endif /* PCIE_INB_DW */
8530 
8531 	if (dhd->flow_ring_table == NULL) {
8532 		DHD_ERROR(("dhd flow_ring_table is NULL\n"));
8533 		goto fail;
8534 	}
8535 
8536 #ifdef DHD_PCIE_PKTID
8537 		if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
8538 			if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
8539 				DHD_ERROR(("%s: stop tx queue as pktid_depleted_cnt maxed\n",
8540 					__FUNCTION__));
8541 				prot->pktid_txq_stop_cnt++;
8542 				dhd_bus_stop_queue(dhd->bus);
8543 				dhd->prot->no_tx_resource = TRUE;
8544 			}
8545 			dhd->prot->pktid_depleted_cnt++;
8546 			goto fail;
8547 		} else {
8548 			dhd->prot->pktid_depleted_cnt = 0;
8549 		}
8550 #endif /* DHD_PCIE_PKTID */
8551 
8552 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) {
8553 		if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) {
8554 			DHD_ERROR(("%s:%d: PKTGET for txbuf failed\n", __FUNCTION__, __LINE__));
8555 			goto fail;
8556 		}
8557 
8558 		memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE);
8559 		DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF),
8560 				PKTLEN(dhd->osh, big_pktbuf)));
8561 		if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE,
8562 				PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) {
8563 			DHD_ERROR(("%s:%d: memcpy_s big_pktbuf failed\n", __FUNCTION__, __LINE__));
8564 			ASSERT(0);
8565 		}
8566 	}
8567 
8568 	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
8569 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
8570 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
8571 
8572 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
8573 
8574 	/*
8575 	 * XXX:
8576 	 * JIRA SW4349-436:
8577 	 * Copying the TX Buffer to an SKB that lives in the DMA Zone
8578 	 * is done here. Previously this was done from dhd_stat_xmit
8579 	 * On conditions where the Host is pumping heavy traffic to
8580 	 * the dongle, we see that the Queue that is backing up the
8581 	 * flow rings is getting full and holds the precious memory
8582 	 * from DMA Zone, leading the host to run out of memory in DMA
8583 	 * Zone. So after this change the back up queue would continue to
8584 	 * hold the pointers from Network Stack, just before putting
8585 	 * the PHY ADDR in the flow rings, we'll do the copy.
8586 	 */
8587 #if defined(BCMINTERNAL) && defined(LINUX)
8588 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) {
8589 		struct sk_buff *skb;
8590 		/*
8591 		 * We are about to add the Ethernet header and send out,
8592 		 * copy the skb here.
8593 		 */
8594 		skb = skb_copy(PKTBUF, GFP_DMA);
8595 		if (skb == NULL) {
8596 			/*
8597 			 * Memory allocation failed, the old packet can
8598 			 * live in the queue, return BCME_NORESOURCE so
8599 			 * the caller re-queues this packet
8600 			 */
8601 			DHD_ERROR(("%s: skb_copy(DMA) failed\n", __FUNCTION__));
8602 			goto fail;
8603 		}
8604 
8605 		/*
8606 		 * Now we have copied the SKB to GFP_DMA memory, make the
8607 		 * rest of the code operate on this new SKB. Hold on to
8608 		 * the original SKB. If we don't get the pkt id or flow ring
8609 		 * space we'll free the Zone memory and return "no resource"
8610 		 * so the caller would re-queue the original SKB.
8611 		 */
8612 		pkt_to_free = PKTBUF;
8613 		PKTBUF = skb;
8614 	}
8615 #endif	/* BCMINTERNAL && LINUX */
8616 
8617 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) {
8618 		PKTFREE(dhd->osh, PKTBUF, TRUE);
8619 		PKTBUF = big_pktbuf;
8620 	}
8621 
8622 	DHD_RING_LOCK(ring->ring_lock, flags);
8623 
8624 	/* Create a unique 32-bit packet id */
8625 	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
8626 		PKTBUF, PKTTYPE_DATA_TX);
8627 #if defined(DHD_PCIE_PKTID)
8628 	if (pktid == DHD_PKTID_INVALID) {
8629 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
8630 		/*
8631 		 * If we return error here, the caller would queue the packet
8632 		 * again. So we'll just free the skb allocated in DMA Zone.
8633 		 * Since we have not freed the original SKB yet the caller would
8634 		 * requeue the same.
8635 		 */
8636 		goto err_no_res_pktfree;
8637 	}
8638 #endif /* DHD_PCIE_PKTID */
8639 
8640 	/* Reserve space in the circular buffer */
8641 	txdesc = (host_txbuf_post_t *)
8642 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8643 	if (txdesc == NULL) {
8644 		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
8645 			__FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
8646 		goto err_free_pktid;
8647 	}
8648 	txdesc->flags = 0;
8649 
8650 	/* Extract the data pointer and length information */
8651 	pktdata = PKTDATA(dhd->osh, PKTBUF);
8652 	pktlen  = PKTLEN(dhd->osh, PKTBUF);
8653 
8654 	/* TODO: XXX: re-look into dropped packets */
8655 	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
8656 
8657 	dhd_handle_pktdata(dhd, ifidx, PKTBUF, pktdata, pktid,
8658 		pktlen, NULL, &dhd_udr, TRUE, FALSE, TRUE);
8659 
8660 #if defined(BCMINTERNAL) && defined(LINUX)
8661 	/*
8662 	 * We have got all the resources, pktid and ring space
8663 	 * so we can safely free the original SKB here.
8664 	 */
8665 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
8666 		PKTCFREE(dhd->osh, pkt_to_free, FALSE);
8667 #endif	/* BCMINTERNAL && LINUX */
8668 
8669 	/* Ethernet header - contains ethertype field
8670 	* Copy before we cache flush packet using DMA_MAP
8671 	*/
8672 	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
8673 
8674 #ifdef DHD_AWDL
8675 	/* the awdl ifidx will always have a non-zero value
8676 	 * if the awdl iface is created. This is because the
8677 	 * primary iface (usually eth1) will always have ifidx of 0.
8678 	 * Hence we can check for non-zero value of awdl ifidx to
8679 	 * see if awdl iface is created or not
8680 	 */
8681 	if (dhd->awdl_llc_enabled &&
8682 		dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) {
8683 		if (host_sfh_llc_reqd) {
8684 			/* if FW supports host sfh llc insertion
8685 			 * then BOTH sfh and llc needs to be inserted
8686 			 * in which case the host LLC only path
8687 			 * in FW will not be exercised - which is the
8688 			 * objective of this feature. Hence in such a
8689 			 * case disable awdl llc insertion
8690 			 */
8691 			DHD_ERROR_RLMT(("%s: FW supports host sfh + llc, this is"
8692 				"is incompatible with awdl llc insertion"
8693 				" disable host sfh llc support in FW and try\n",
8694 				__FUNCTION__));
8695 		} else {
8696 			if (dhd_ether_to_awdl_llc_hdr(dhd, (struct ether_header *)pktdata,
8697 				PKTBUF) == BCME_OK) {
8698 			llc_inserted = TRUE;
8699 			/* in work item change ether type to len by
8700 			 * re-copying the ether header
8701 			 */
8702 			memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF),
8703 				ETHER_HDR_LEN);
8704 			} else {
8705 				goto err_rollback_idx;
8706 			}
8707 		}
8708 	}
8709 #endif /* DHD_AWDL */
8710 
8711 #ifdef HOST_SFH_LLC
8712 	if (host_sfh_llc_reqd) {
8713 		if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata,
8714 				PKTBUF) == BCME_OK) {
8715 			/* adjust the data pointer and length information */
8716 			pktdata = PKTDATA(dhd->osh, PKTBUF);
8717 			pktlen  = PKTLEN(dhd->osh, PKTBUF);
8718 			txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC;
8719 		} else {
8720 			goto err_rollback_idx;
8721 		}
8722 	} else
8723 #endif /* HOST_SFH_LLC */
8724 	{
8725 		/* Extract the ethernet header and adjust the data pointer and length */
8726 		pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN;
8727 		pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8728 	}
8729 
8730 	/* Map the data pointer to a DMA-able address */
8731 	pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
8732 
8733 	if (PHYSADDRISZERO(pa)) {
8734 		DHD_ERROR(("%s: Something really bad, unless 0 is "
8735 			"a valid phyaddr for pa\n", __FUNCTION__));
8736 		ASSERT(0);
8737 		/* XXX if ASSERT() doesn't work like as Android platform,
8738 		 * try to requeue the packet to the backup queue.
8739 		 */
8740 		goto err_rollback_idx;
8741 	}
8742 
8743 #ifdef DMAMAP_STATS
8744 	dhd->dma_stats.txdata++;
8745 	dhd->dma_stats.txdata_sz += pktlen;
8746 #endif /* DMAMAP_STATS */
8747 	/* No need to lock. Save the rest of the packet's metadata */
8748 	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
8749 	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
8750 
8751 #ifdef TXP_FLUSH_NITEMS
8752 	if (ring->pend_items_count == 0)
8753 		ring->start_addr = (void *)txdesc;
8754 	ring->pend_items_count++;
8755 #endif
8756 #ifdef DHD_HMAPTEST
8757 	if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) {
8758 		/* scratch area */
8759 		dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va
8760 			+ dhd->prot->hmaptest.offset;
8761 		/* replace pa with our pa for txbuf post only */
8762 		dhd->prot->hmap_tx_buf_len = pktlen;
8763 		if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) >
8764 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
8765 			DHD_ERROR(("hmaptest: ERROR Txpost outside HMAPTEST buffer\n"));
8766 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
8767 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
8768 			dhd->prot->hmaptest.in_progress = FALSE;
8769 		} else {
8770 			/* copy pktdata to our va */
8771 			memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen);
8772 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va,
8773 				dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0);
8774 
8775 			dhd->prot->hmap_tx_buf_pa = pa;
8776 			/* store pktid for later mapping in txcpl */
8777 			dhd->prot->hmaptest_tx_pktid = pktid;
8778 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED;
8779 			DHD_ERROR(("hmaptest: d11read txpost scratch txbuf pktid=0x%08x\n", pktid));
8780 			DHD_ERROR(("hmaptest: d11read txpost txbuf va=0x%p pa.lo=0x%08x len=%d\n",
8781 				dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen));
8782 		}
8783 	}
8784 #endif /* DHD_HMAPTEST */
8785 
8786 #ifdef DHD_PKTTS
8787 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
8788 	if (dhd_get_pktts_enab(dhd) &&
8789 		dhd->pkt_metadata_buflen) {
8790 		/* Allocate memory for Meta data */
8791 		meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len,
8792 			DMA_ALIGN_LEN, &meta_data_buf._alloced,
8793 			&meta_data_buf.pa, &meta_data_buf.dmah);
8794 
8795 		if (meta_data_buf.va == NULL) {
8796 			DHD_ERROR_RLMT(("%s: dhd_dma_buf_alloc failed \r\n", __FUNCTION__));
8797 			DHD_ERROR_RLMT((" ... Proceeding without metadata buffer \r\n"));
8798 		} else {
8799 			DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map,
8800 				(void *)meta_data_buf.va,
8801 				meta_data_buf.pa,
8802 				(uint16)meta_data_buf._alloced,
8803 				meta_data_buf.dmah,
8804 				pktid);
8805 		}
8806 		memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
8807 		DHD_TRACE(("Meta data Buffer VA: %p  PA: %llx dmah: %p\r\n",
8808 			meta_data_buf.va, addr, meta_data_buf.dmah));
8809 
8810 		txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF);
8811 		txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF);
8812 		txdesc->metadata_buf_len = meta_data_buf_len;
8813 	}
8814 #endif /* DHD_PKTTS */
8815 
8816 	/* Form the Tx descriptor message buffer */
8817 
8818 	/* Common message hdr */
8819 	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
8820 	txdesc->cmn_hdr.if_id = ifidx;
8821 	txdesc->cmn_hdr.flags = ring->current_phase;
8822 
8823 	txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3;
8824 	prio = (uint8)PKTPRIO(PKTBUF);
8825 
8826 #ifdef EXT_STA
8827 	txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK <<
8828 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8829 	txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) &
8830 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK)
8831 		<< BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8832 #endif
8833 
8834 	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
8835 	txdesc->seg_cnt = 1;
8836 
8837 	txdesc->data_len = htol16((uint16) pktlen);
8838 	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
8839 	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
8840 
8841 	if (!host_sfh_llc_reqd)
8842 	{
8843 		/* Move data pointer to keep ether header in local PKTBUF for later reference */
8844 		PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8845 	}
8846 
8847 	txdesc->ext_flags = 0;
8848 
8849 #ifdef DHD_TIMESYNC
8850 	txdesc->rate = 0;
8851 
8852 	if (!llc_inserted && dhd->prot->tx_ts_log_enabled) {
8853 		dhd_pkt_parse_t parse;
8854 
8855 		dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse);
8856 
8857 		if (parse.proto == IP_PROT_ICMP) {
8858 			if (dhd->prot->no_retry)
8859 				txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY;
8860 			if (dhd->prot->no_aggr)
8861 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR;
8862 			if (dhd->prot->fixed_rate)
8863 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8864 		}
8865 	}
8866 #endif /* DHD_TIMESYNC */
8867 
8868 #ifdef DHD_SBN
8869 	if (dhd_udr) {
8870 		txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8871 	}
8872 #endif /* DHD_SBN */
8873 
8874 #ifdef DHD_TX_PROFILE
8875 	if (!llc_inserted &&
8876 		dhd->tx_profile_enab && dhd->num_profiles > 0)
8877 	{
8878 		uint8 offset;
8879 
8880 		for (offset = 0; offset < dhd->num_profiles; offset++) {
8881 			if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF),
8882 				PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]),
8883 				host_sfh_llc_reqd)) {
8884 				/* mask so other reserved bits are not modified. */
8885 				txdesc->rate |=
8886 					(((uint8)dhd->protocol_filters[offset].profile_index) &
8887 					BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK);
8888 
8889 				/* so we can use the rate field for our purposes */
8890 				txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE;
8891 
8892 				break;
8893 			}
8894 		}
8895 	}
8896 #endif /* defined(DHD_TX_PROFILE) */
8897 
8898 	/* Handle Tx metadata */
8899 	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
8900 	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
8901 		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
8902 		prot->tx_metadata_offset, headroom));
8903 
8904 	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
8905 		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
8906 
8907 		/* Adjust the data pointer to account for meta data in DMA_MAP */
8908 		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8909 
8910 		meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
8911 			prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
8912 
8913 		if (PHYSADDRISZERO(meta_pa)) {
8914 			/* Unmap the data pointer to a DMA-able address */
8915 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
8916 #ifdef TXP_FLUSH_NITEMS
8917 			/* update pend_items_count */
8918 			ring->pend_items_count--;
8919 #endif /* TXP_FLUSH_NITEMS */
8920 
8921 			DHD_ERROR(("%s: Something really bad, unless 0 is "
8922 				"a valid phyaddr for meta_pa\n", __FUNCTION__));
8923 			ASSERT(0);
8924 			/* XXX if ASSERT() doesn't work like as Android platform,
8925 			 * try to requeue the packet to the backup queue.
8926 			 */
8927 			goto err_rollback_idx;
8928 		}
8929 
8930 		/* Adjust the data pointer back to original value */
8931 		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8932 
8933 		txdesc->metadata_buf_len = prot->tx_metadata_offset;
8934 		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
8935 		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
8936 	} else {
8937 #ifdef DHD_HP2P
8938 		if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8939 			dhd_update_hp2p_txdesc(dhd, txdesc);
8940 		} else
8941 #endif /* DHD_HP2P */
8942 #ifdef DHD_PKTTS
8943 		if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) {
8944 #else
8945 		if (1) {
8946 #endif /* DHD_PKTTS */
8947 			txdesc->metadata_buf_len = htol16(0);
8948 			txdesc->metadata_buf_addr.high_addr = 0;
8949 			txdesc->metadata_buf_addr.low_addr = 0;
8950 		}
8951 	}
8952 
8953 #ifdef AGG_H2D_DB
8954 	OSL_ATOMIC_INC(dhd->osh, &ring->inflight);
8955 #endif /* AGG_H2D_DB */
8956 
8957 #ifdef DHD_PKTID_AUDIT_RING
8958 	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
8959 #endif /* DHD_PKTID_AUDIT_RING */
8960 
8961 	txdesc->cmn_hdr.request_id = htol32(pktid);
8962 
8963 	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
8964 		txdesc->cmn_hdr.request_id));
8965 
8966 #ifdef DHD_LBUF_AUDIT
8967 	PKTAUDIT(dhd->osh, PKTBUF);
8968 #endif
8969 
8970 	/* Update the write pointer in TCM & ring bell */
8971 #if defined(TXP_FLUSH_NITEMS)
8972 #if defined(DHD_HP2P)
8973 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8974 		dhd_calc_hp2p_burst(dhd, ring, flowid);
8975 	} else
8976 #endif /* HP2P */
8977 	{
8978 		if ((ring->pend_items_count == prot->txp_threshold) ||
8979 				((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
8980 #ifdef AGG_H2D_DB
8981 			if (agg_h2d_db_enab) {
8982 				dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
8983 				if ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring)) {
8984 					dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, TRUE);
8985 				}
8986 			} else
8987 #endif /* AGG_H2D_DB */
8988 			{
8989 				dhd_prot_txdata_write_flush(dhd, flowid);
8990 			}
8991 
8992 		}
8993 	}
8994 #else
8995 	/* update ring's WR index and ring doorbell to dongle */
8996 	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
8997 #endif /* TXP_FLUSH_NITEMS */
8998 
8999 #ifdef TX_STATUS_LATENCY_STATS
9000 	/* set the time when pkt is queued to flowring */
9001 	DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
9002 #elif defined(DHD_PKTTS)
9003 	if (dhd_get_pktts_enab(dhd) == TRUE) {
9004 		/* set the time when pkt is queued to flowring */
9005 		DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
9006 	}
9007 #endif /* TX_STATUS_LATENCY_STATS */
9008 
9009 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9010 
9011 	OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
9012 
9013 	/*
9014 	 * Take a wake lock, do not sleep if we have atleast one packet
9015 	 * to finish.
9016 	 */
9017 	DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
9018 
9019 #ifdef PCIE_INB_DW
9020 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9021 #endif
9022 #ifdef TX_STATUS_LATENCY_STATS
9023 	flow_ring_node->flow_info.num_tx_pkts++;
9024 #endif /* TX_STATUS_LATENCY_STATS */
9025 	return BCME_OK;
9026 
9027 err_rollback_idx:
9028 	/* roll back write pointer for unprocessed message */
9029 	if (ring->wr == 0) {
9030 		ring->wr = ring->max_items - 1;
9031 	} else {
9032 		ring->wr--;
9033 		if (ring->wr == 0) {
9034 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
9035 			ring->current_phase = ring->current_phase ?
9036 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
9037 		}
9038 	}
9039 
9040 err_free_pktid:
9041 #if defined(DHD_PCIE_PKTID)
9042 	{
9043 		void *dmah;
9044 		void *secdma;
9045 		/* Free up the PKTID. physaddr and pktlen will be garbage. */
9046 		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
9047 			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
9048 	}
9049 
9050 err_no_res_pktfree:
9051 #endif /* DHD_PCIE_PKTID */
9052 
9053 #if defined(BCMINTERNAL) && defined(LINUX)
9054 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
9055 		PKTCFREE(dhd->osh, PKTBUF, FALSE);
9056 #endif	/* BCMINTERNAL && LINUX */
9057 
9058 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9059 
9060 fail:
9061 #ifdef PCIE_INB_DW
9062 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9063 #endif
9064 	return BCME_NORESOURCE;
9065 } /* dhd_prot_txdata */
9066 
9067 #ifdef AGG_H2D_DB
9068 static void
9069 dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid)
9070 {
9071 	flow_ring_table_t *flow_ring_table;
9072 	flow_ring_node_t *flow_ring_node;
9073 	msgbuf_ring_t *ring;
9074 
9075 	if (dhd->flow_ring_table == NULL) {
9076 		return;
9077 	}
9078 
9079 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9080 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9081 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9082 
9083 	if (ring->pend_items_count) {
9084 		dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr,
9085 				ring->pend_items_count);
9086 		ring->pend_items_count = 0;
9087 		ring->start_addr = NULL;
9088 	}
9089 
9090 }
9091 #endif /* AGG_H2D_DB */
9092 
9093 /* called with a ring_lock */
9094 /** optimization to write "n" tx items at a time to ring */
9095 void
9096 BCMFASTPATH(dhd_prot_txdata_write_flush)(dhd_pub_t *dhd, uint16 flowid)
9097 {
9098 #ifdef TXP_FLUSH_NITEMS
9099 	flow_ring_table_t *flow_ring_table;
9100 	flow_ring_node_t *flow_ring_node;
9101 	msgbuf_ring_t *ring;
9102 
9103 	if (dhd->flow_ring_table == NULL) {
9104 		return;
9105 	}
9106 
9107 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9108 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9109 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9110 
9111 	if (ring->pend_items_count) {
9112 		/* update ring's WR index and ring doorbell to dongle */
9113 		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
9114 			ring->pend_items_count);
9115 		ring->pend_items_count = 0;
9116 		ring->start_addr = NULL;
9117 		dhd->prot->tx_h2d_db_cnt++;
9118 	}
9119 #endif /* TXP_FLUSH_NITEMS */
9120 }
9121 
9122 #undef PKTBUF	/* Only defined in the above routine */
9123 
9124 int
9125 BCMFASTPATH(dhd_prot_hdrpull)(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
9126 {
9127 	return 0;
9128 }
9129 
9130 /** post a set of receive buffers to the dongle */
9131 static void
9132 BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid,
9133 	uint32 rxcnt)
9134 /* XXX function name could be more descriptive, eg dhd_prot_post_rxbufs */
9135 {
9136 	dhd_prot_t *prot = dhd->prot;
9137 
9138 	if (prot->rxbufpost >= rxcnt) {
9139 		prot->rxbufpost -= (uint16)rxcnt;
9140 	} else {
9141 		/* XXX: I have seen this assert hitting.
9142 		 * Will be removed once rootcaused.
9143 		 */
9144 		/* ASSERT(0); */
9145 		prot->rxbufpost = 0;
9146 	}
9147 
9148 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
9149 		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
9150 	} else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
9151 		/* Ring DoorBell after processing the rx packets,
9152 		 * so that dongle will sync the DMA indices.
9153 		 */
9154 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
9155 	}
9156 
9157 	return;
9158 }
9159 
9160 #ifdef DHD_HMAPTEST
9161 
9162 static void
9163 dhd_msgbuf_hmaptest_cmplt(dhd_pub_t *dhd)
9164 {
9165 	dhd_prot_t *prot = dhd->prot;
9166 	uint64 end_usec;
9167 	char *readbuf;
9168 	uint32 len = dhd->prot->hmaptest.len;
9169 	uint32 i;
9170 
9171 	end_usec = OSL_SYSUPTIME_US();
9172 	end_usec -= prot->hmaptest.start_usec;
9173 	DHD_ERROR(("hmaptest cmplt: %d bytes in %llu usec, %u kBps\n",
9174 		len, end_usec, (len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
9175 
9176 	prot->hmaptest.in_progress = FALSE;
9177 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9178 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9179 	} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9180 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9181 	} else {
9182 		return;
9183 	}
9184 	readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset;
9185 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9186 		dhd->prot->hmaptest.mem.len);
9187 	if (prot->hmaptest.is_write) {
9188 		DHD_ERROR(("hmaptest cmplt: FW has written at 0x%p\n", readbuf));
9189 		DHD_ERROR(("hmaptest cmplt: pattern = \n"));
9190 		len = ALIGN_SIZE(len, (sizeof(int32)));
9191 		for (i = 0; i < len; i += (sizeof(int32))) {
9192 			DHD_ERROR(("0x%08x\n", *(int *)(readbuf + i)));
9193 		}
9194 		DHD_ERROR(("\n\n"));
9195 	}
9196 
9197 }
9198 /* program HMAPTEST window and window config registers
9199  * Reference for HMAP implementation in OS's that can easily leverage it
9200  * this function can be used as reference for programming HMAP windows
9201  * the function to program HMAP windows and enable it
9202  * can be called at init time or hmap iovar
9203  */
9204 static void
9205 dhdmsgbuf_set_hmaptest_windows(dhd_pub_t *dhd)
9206 {
9207 	uint32 nwindows = 0;
9208 	uint32 scratch_len;
9209 	uint64 scratch_lin, w1_start;
9210 	dmaaddr_t scratch_pa;
9211 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9212 	dhd_prot_t *prot = dhd->prot;
9213 	uint corerev = dhd->bus->sih->buscorerev;
9214 
9215 	scratch_pa = prot->hmaptest.mem.pa;
9216 	scratch_len = prot->hmaptest.mem.len;
9217 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9218 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9219 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9220 	/* windows are 4kb aligned and window length is 512 byte aligned
9221 	 * window start ends with 0x1000 and window length ends with 0xe00
9222 	 * make the sandbox buffer 4kb aligned and size also 4kb aligned for hmap test
9223 	 * window0 = 0 - sandbox_start
9224 	 * window1 = sandbox_end + 1 - 0xffffffff
9225 	 * window2 = 0x100000000 - 0x1fffffe00
9226 	 * window 3 is programmed only for valid test cases
9227 	 * window3 = sandbox_start - sandbox_end
9228 	 */
9229 	w1_start  = scratch_lin +  scratch_len;
9230 		DHD_ERROR(("hmaptest: window 0 offset lower=0x%p upper=0x%p length=0x%p\n",
9231 		&(hmapwindows[0].baseaddr_lo), &(hmapwindows[0].baseaddr_hi),
9232 		&(hmapwindows[0].windowlength)));
9233 	DHD_ERROR(("hmaptest: window 1 offset lower=0x%p upper=0x%p length=0x%p\n",
9234 		&(hmapwindows[1].baseaddr_lo), &(hmapwindows[1].baseaddr_hi),
9235 		&(hmapwindows[1].windowlength)));
9236 	DHD_ERROR(("hmaptest: window 2 offset lower=0x%p upper=0x%p length=0x%p\n",
9237 		&(hmapwindows[2].baseaddr_lo), &(hmapwindows[2].baseaddr_hi),
9238 			&(hmapwindows[2].windowlength)));
9239 	DHD_ERROR(("hmaptest: window 3 offset lower=0x%p upper=0x%p length=0x%p\n",
9240 		&(hmapwindows[3].baseaddr_lo), &(hmapwindows[3].baseaddr_hi),
9241 		&(hmapwindows[3].windowlength)));
9242 		DHD_ERROR(("hmaptest: w0 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9243 			0, 0, (uint64) scratch_lin));
9244 		DHD_ERROR(("hmaptest: w1 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9245 			(uint32)(w1_start & 0xffffffff),
9246 			(uint32)((w1_start >> 32) & 0xffffffff),
9247 			(uint64)(0x100000000 - w1_start)));
9248 		DHD_ERROR(("hmaptest: w2 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9249 			0, 1, (uint64)0xfffffe00));
9250 		/* setting window0 */
9251 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9252 			(uintptr_t)(&(hmapwindows[0].baseaddr_lo)), ~0, 0x0);
9253 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9254 			(uintptr_t)(&(hmapwindows[0].baseaddr_hi)), ~0, 0x0);
9255 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9256 			(uintptr_t)(&(hmapwindows[0].windowlength)), ~0,
9257 			(uint64)scratch_lin);
9258 		/* setting window1 */
9259 		w1_start  = scratch_lin +  scratch_len;
9260 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9261 			(uintptr_t)(&(hmapwindows[1].baseaddr_lo)), ~0,
9262 			(uint32)(w1_start & 0xffffffff));
9263 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9264 			(uintptr_t)(&(hmapwindows[1].baseaddr_hi)), ~0,
9265 			(uint32)((w1_start >> 32) & 0xffffffff));
9266 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9267 			(uintptr_t)(&(hmapwindows[1].windowlength)), ~0,
9268 			(0x100000000 - w1_start));
9269 		/* setting window2 */
9270 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9271 			(uintptr_t)(&(hmapwindows[2].baseaddr_lo)), ~0, 0x0);
9272 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9273 			(uintptr_t)(&(hmapwindows[2].baseaddr_hi)), ~0, 0x1);
9274 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9275 			(uintptr_t)(&(hmapwindows[2].windowlength)), ~0, 0xfffffe00);
9276 		nwindows = 3;
9277 		/* program only windows 0-2 with section1 +section2 */
9278 		/* setting window config */
9279 		/* set bit 8:15 in windowconfig to enable n windows in order */
9280 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9281 			(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, (nwindows << 8));
9282 }
9283 
9284 /* stop HMAPTEST does not check corerev
9285  * caller has to ensure corerev check
9286  */
9287 int
9288 dhdmsgbuf_hmaptest_stop(dhd_pub_t *dhd)
9289 {
9290 	uint32 window_config, nwindows, i;
9291 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9292 	uint corerev = dhd->bus->sih->buscorerev;
9293 
9294 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9295 	dhd->prot->hmaptest.in_progress = FALSE;
9296 
9297 	/* Reference for HMAP Implementation
9298 	 * Disable HMAP windows.
9299 	 * As windows were programmed in bus:hmap set call
9300 	 * disabling in hmaptest_stop.
9301 	 */
9302 	DHD_ERROR(("hmap: disable hmap windows\n"));
9303 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9304 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9305 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9306 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9307 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, 0);
9308 	/* clear all windows */
9309 	for (i = 0; i < nwindows; i++) {
9310 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9311 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), ~0, 0);
9312 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9313 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), ~0, 0);
9314 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9315 			(uintptr_t)(&(hmapwindows[i].windowlength)), ~0, 0);
9316 	}
9317 
9318 	return BCME_OK;
9319 }
9320 
9321 /* HMAP iovar intercept process */
9322 int
9323 dhdmsgbuf_hmap(dhd_pub_t *dhd, pcie_hmap_t *hmap_params, bool set)
9324 {
9325 
9326 	uint32 scratch_len;
9327 	uint64 scratch_lin, w1_start;
9328 	dmaaddr_t scratch_pa;
9329 	uint32 addr_lo, addr_hi, window_length, window_config, nwindows, i;
9330 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9331 
9332 	dhd_prot_t *prot = dhd->prot;
9333 	dhd_bus_t *bus = dhd->bus;
9334 	uint corerev = bus->sih->buscorerev;
9335 	scratch_pa = prot->hmaptest.mem.pa;
9336 	scratch_len = prot->hmaptest.mem.len;
9337 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9338 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9339 	w1_start  = scratch_lin +  scratch_len;
9340 	DHD_ERROR(("HMAP:  pcicorerev = %d\n", corerev));
9341 
9342 	if (corerev < 24) {
9343 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9344 		return BCME_UNSUPPORTED;
9345 	}
9346 	if (set) {
9347 		if (hmap_params->enable) {
9348 			dhdmsgbuf_set_hmaptest_windows(dhd);
9349 		} else {
9350 			dhdmsgbuf_hmaptest_stop(dhd); /* stop will clear all programmed windows */
9351 		}
9352 	}
9353 
9354 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9355 		dhd->prot->hmaptest.mem.len);
9356 
9357 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9358 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9359 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9360 	prot->hmap_enabled = nwindows ? TRUE : FALSE;
9361 
9362 	/* getting window config */
9363 	/* set bit 8:15 in windowconfig to enable n windows in order */
9364 	DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled")));
9365 	DHD_ERROR(("hmap: window config = 0x%08x\n", window_config));
9366 	DHD_ERROR(("hmap: Windows\n"));
9367 
9368 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9369 	/* getting windows */
9370 	if (nwindows > 8)
9371 		return BCME_ERROR;
9372 	for (i = 0; i < nwindows; i++) {
9373 		addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9374 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), 0, 0);
9375 		addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9376 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), 0, 0);
9377 		window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9378 			(uintptr_t)(&(hmapwindows[i].windowlength)), 0, 0);
9379 
9380 		DHD_ERROR(("hmap: window %d address lower=0x%08x upper=0x%08x length=0x%08x\n",
9381 			i, addr_lo, addr_hi, window_length));
9382 	}
9383 	addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9384 		(uint)(PCI_HMAP_VIOLATION_ADDR_U(corerev)), 0, 0);
9385 	addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9386 		(uint)(PCI_HMAP_VIOLATION_ADDR_L(corerev)), 0, 0);
9387 	window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9388 		(uint)(PCI_HMAP_VIOLATION_INFO(corerev)), 0, 0);
9389 	DHD_ERROR(("hmap: violation regs\n"));
9390 	DHD_ERROR(("hmap: violationaddr_hi =0x%08x\n", addr_hi));
9391 	DHD_ERROR(("hmap: violationaddr_lo =0x%08x\n", addr_lo));
9392 	DHD_ERROR(("hmap: violation_info   =0x%08x\n", window_length));
9393 	DHD_ERROR(("hmap: Buffer allocated for HMAPTEST Start=0x%0llx len =0x%08x End =0x%0llx\n",
9394 		(uint64) scratch_lin, scratch_len, (uint64) w1_start));
9395 
9396 	return BCME_OK;
9397 }
9398 
9399 /* hmaptest iovar process
9400  * This iovar triggers HMAPTEST with given params
9401  * on chips that have HMAP
9402  * DHD programs hmap window registers with host addresses here.
9403  */
9404 int
9405 dhdmsgbuf_hmaptest(dhd_pub_t *dhd, pcie_hmaptest_t *hmaptest_params)
9406 {
9407 
9408 	dhd_prot_t *prot = dhd->prot;
9409 	int ret = BCME_OK;
9410 	uint32 offset = 0;
9411 	uint64 scratch_lin;
9412 	dhd_bus_t *bus = dhd->bus;
9413 	uint corerev = bus->sih->buscorerev;
9414 
9415 	if (prot->hmaptest.in_progress) {
9416 		DHD_ERROR(("HMAPTEST already running. Try again.\n"));
9417 		return BCME_BUSY;
9418 	}
9419 
9420 	prot->hmaptest.in_progress = TRUE;
9421 
9422 	if (corerev < 24) {
9423 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9424 		return BCME_UNSUPPORTED;
9425 	}
9426 	prot->hmaptest.accesstype = hmaptest_params->accesstype;
9427 	prot->hmaptest.is_write = hmaptest_params->is_write;
9428 	prot->hmaptest.len = hmaptest_params->xfer_len;
9429 	prot->hmaptest.offset = hmaptest_params->host_offset;
9430 	offset = prot->hmaptest.offset;
9431 
9432 	DHD_ERROR(("hmaptest: is_write =%d accesstype=%d offset =%d len=%d value=0x%08x\n",
9433 		prot->hmaptest.is_write, prot->hmaptest.accesstype,
9434 		offset, prot->hmaptest.len, hmaptest_params->value));
9435 
9436 	DHD_ERROR(("hmaptest  dma_lo=0x%08x hi=0x%08x pa\n",
9437 		(uint32)PHYSADDRLO(prot->hmaptest.mem.pa),
9438 		(uint32)PHYSADDRHI(prot->hmaptest.mem.pa)));
9439 
9440 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9441 		if (prot->hmaptest.is_write) {
9442 			/* if d11 is writing then post rxbuf from scratch area */
9443 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE;
9444 		} else {
9445 			/* if d11 is reading then post txbuf from scratch area */
9446 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE;
9447 		}
9448 
9449 	} else {
9450 		uint32 pattern = 0xdeadbeef;
9451 		uint32 i;
9452 		uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ));
9453 		char *fillbuf = (char *)dhd->prot->hmaptest.mem.va
9454 			+ offset;
9455 		if ((fillbuf + maxbuflen) >
9456 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
9457 			DHD_ERROR(("hmaptest: M2m/ARM ERROR offset + len outside buffer\n"));
9458 			dhd->prot->hmaptest.in_progress = FALSE;
9459 			return BCME_BADARG;
9460 		}
9461 
9462 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9463 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9464 		} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9465 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9466 		} else {
9467 			prot->hmaptest.in_progress = FALSE;
9468 			DHD_ERROR(("hmaptest: accesstype error\n"));
9469 			return BCME_BADARG;
9470 		}
9471 
9472 		/* fill a pattern at offset */
9473 		maxbuflen = ALIGN_SIZE(maxbuflen, (sizeof(uint32)));
9474 		memset(fillbuf, 0, maxbuflen);
9475 		DHD_ERROR(("hmaptest: dhd write pattern at addr=0x%p\n",
9476 			fillbuf));
9477 		DHD_ERROR(("pattern = %08x, %u times",
9478 			pattern, (uint32)(maxbuflen / sizeof(uint32))));
9479 		for (i = 0; i < maxbuflen; i += sizeof(uint32)) {
9480 			*(uint32 *)(fillbuf + i) = pattern;
9481 		}
9482 		OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9483 			dhd->prot->hmaptest.mem.len);
9484 		DHD_ERROR(("\n\n"));
9485 
9486 	}
9487 
9488 	/*
9489 	 * Do not calculate address from scratch buffer + offset,
9490 	 * if user supplied absolute address
9491 	 */
9492 	if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) {
9493 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9494 			DHD_ERROR(("hmaptest: accesstype D11 does not support absolute addr\n"));
9495 			return BCME_UNSUPPORTED;
9496 		}
9497 	} else {
9498 		scratch_lin  = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff)
9499 			| (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32);
9500 		scratch_lin += offset;
9501 		hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff));
9502 		hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff));
9503 	}
9504 
9505 	DHD_INFO(("HMAPTEST Started...\n"));
9506 	prot->hmaptest.start_usec = OSL_SYSUPTIME_US();
9507 	return ret;
9508 
9509 }
9510 
9511 #endif /* DHD_HMAPTEST */
9512 
9513 /* called before an ioctl is sent to the dongle */
9514 static void
9515 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
9516 {
9517 	dhd_prot_t *prot = dhd->prot;
9518 	int slen = 0;
9519 
9520 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
9521 		pcie_bus_tput_params_t *tput_params;
9522 
9523 		slen = strlen("pcie_bus_tput") + 1;
9524 		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
9525 		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
9526 			sizeof(tput_params->host_buf_addr));
9527 		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
9528 	}
9529 
9530 #ifdef DHD_HMAPTEST
9531 	if (buf != NULL && !strcmp(buf, "bus:hmap")) {
9532 		pcie_hmap_t *hmap_params;
9533 		slen = strlen("bus:hmap") + 1;
9534 		hmap_params = (pcie_hmap_t*)((char *)buf + slen);
9535 		dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR));
9536 	}
9537 
9538 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9539 		pcie_hmaptest_t *hmaptest_params;
9540 
9541 		slen = strlen("bus:hmaptest") + 1;
9542 		hmaptest_params = (pcie_hmaptest_t*)((char *)buf + slen);
9543 		dhdmsgbuf_hmaptest(dhd, hmaptest_params);
9544 	}
9545 #endif /* DHD_HMAPTEST */
9546 }
9547 
9548 /* called after an ioctl returns from dongle */
9549 static void
9550 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
9551 	int ifidx, int ret, int len)
9552 {
9553 
9554 #ifdef DHD_HMAPTEST
9555 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9556 		dhd_msgbuf_hmaptest_cmplt(dhd);
9557 	}
9558 #endif /* DHD_HMAPTEST */
9559 
9560 	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
9561 		int slen;
9562 		/* Intercept the wme_dp ioctl here */
9563 		if (!strcmp(buf, "wme_dp")) {
9564 			int val = 0;
9565 			slen = strlen("wme_dp") + 1;
9566 			if (len >= (int)(slen + sizeof(int)))
9567 				bcopy(((char *)buf + slen), &val, sizeof(int));
9568 			dhd->wme_dp = (uint8) ltoh32(val);
9569 		}
9570 
9571 #ifdef DHD_AWDL
9572 		/* Intercept the awdl_peer_op ioctl here */
9573 		if (!strcmp(buf, "awdl_peer_op")) {
9574 			slen = strlen("awdl_peer_op") + 1;
9575 			dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen);
9576 		}
9577 		/* Intercept the awdl ioctl here, delete flow rings if awdl is
9578 		 * disabled
9579 		 */
9580 		if (!strcmp(buf, "awdl")) {
9581 			int val = 0;
9582 			slen = strlen("awdl") + 1;
9583 			if (len >= (int)(slen + sizeof(int))) {
9584 				bcopy(((char *)buf + slen), &val, sizeof(int));
9585 				val = ltoh32(val);
9586 				if (val == TRUE) {
9587 					/**
9588 					 * Though we are updating the link status when we recieve
9589 					 * WLC_E_LINK from dongle, it is not gaurenteed always.
9590 					 * So intercepting the awdl command fired from app to
9591 					 * update the status.
9592 					 */
9593 					dhd_update_interface_link_status(dhd, (uint8)ifidx, TRUE);
9594 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
9595 					/* reset AWDL stats data structures when AWDL is enabled */
9596 					dhd_clear_awdl_stats(dhd);
9597 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
9598 				} else if (val == FALSE) {
9599 					dhd_update_interface_link_status(dhd, (uint8)ifidx, FALSE);
9600 					dhd_del_all_sta(dhd, (uint8)ifidx);
9601 					dhd_awdl_peer_op(dhd, (uint8)ifidx, NULL, 0);
9602 
9603 				}
9604 			}
9605 
9606 		}
9607 
9608 		/* store the awdl min extension count and presence mode values
9609 		 * set by the user, same will be inserted in the LLC header for
9610 		 * each tx packet on the awdl iface
9611 		*/
9612 		slen = strlen("awdl_extcounts");
9613 		if (!strncmp(buf, "awdl_extcounts", slen)) {
9614 			awdl_extcount_t *extcnt = NULL;
9615 			slen = slen + 1;
9616 			if ((len - slen) >= sizeof(*extcnt)) {
9617 				extcnt = (awdl_extcount_t *)((char *)buf + slen);
9618 				dhd->awdl_minext = extcnt->minExt;
9619 			}
9620 		}
9621 
9622 		slen = strlen("awdl_presencemode");
9623 		if (!strncmp(buf, "awdl_presencemode", slen)) {
9624 			slen = slen + 1;
9625 			if ((len - slen) >= sizeof(uint8)) {
9626 				dhd->awdl_presmode = *((uint8 *)((char *)buf + slen));
9627 			}
9628 		}
9629 #endif /* DHD_AWDL */
9630 	}
9631 
9632 }
9633 
9634 #ifdef DHD_PM_CONTROL_FROM_FILE
9635 extern bool g_pm_control;
9636 #endif /* DHD_PM_CONTROL_FROM_FILE */
9637 
9638 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
9639 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
9640 {
9641 	int ret = -1;
9642 	uint8 action;
9643 
9644 	if (dhd->bus->is_linkdown) {
9645 		DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
9646 		goto done;
9647 	}
9648 
9649 	if (dhd_query_bus_erros(dhd)) {
9650 		DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
9651 		goto done;
9652 	}
9653 
9654 	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
9655 		DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
9656 			" bus state: %d, sent hang: %d\n", __FUNCTION__,
9657 			dhd->busstate, dhd->hang_was_sent));
9658 		goto done;
9659 	}
9660 
9661 	if (dhd->busstate == DHD_BUS_SUSPEND) {
9662 		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
9663 		goto done;
9664 	}
9665 
9666 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9667 
9668 #ifdef DHD_PCIE_REG_ACCESS
9669 #ifdef BOARD_HIKEY
9670 #ifndef PCIE_LNK_SPEED_GEN1
9671 #define PCIE_LNK_SPEED_GEN1		0x1
9672 #endif
9673 	/* BUG_ON if link speed is GEN1 in Hikey for 4389B0 */
9674 	if (dhd->bus->sih->buscorerev == 72) {
9675 		if (dhd_get_pcie_linkspeed(dhd) == PCIE_LNK_SPEED_GEN1) {
9676 			DHD_ERROR(("%s: ******* Link Speed is GEN1 *********\n", __FUNCTION__));
9677 			BUG_ON(1);
9678 		}
9679 	}
9680 #endif /* BOARD_HIKEY */
9681 #endif /* DHD_PCIE_REG_ACCESS */
9682 
9683 	if (ioc->cmd == WLC_SET_PM) {
9684 #ifdef DHD_PM_CONTROL_FROM_FILE
9685 		if (g_pm_control == TRUE) {
9686 			DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
9687 				__FUNCTION__, buf ? *(char *)buf : 0));
9688 			goto done;
9689 		}
9690 #endif /* DHD_PM_CONTROL_FROM_FILE */
9691 #ifdef DHD_PM_OVERRIDE
9692 		{
9693 			extern bool g_pm_override;
9694 			if (g_pm_override == TRUE) {
9695 				DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n",
9696 					__FUNCTION__, buf ? *(char *)buf : 0));
9697 				goto done;
9698 			}
9699 		}
9700 #endif /* DHD_PM_OVERRIDE */
9701 		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
9702 	}
9703 
9704 	ASSERT(len <= WLC_IOCTL_MAXLEN);
9705 
9706 	if (len > WLC_IOCTL_MAXLEN)
9707 		goto done;
9708 
9709 	action = ioc->set;
9710 
9711 	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
9712 
9713 #if defined(EXT_STA)
9714 	wl_dbglog_ioctl_add(ioc, len, NULL);
9715 #endif
9716 	if (action & WL_IOCTL_ACTION_SET) {
9717 		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9718 	} else {
9719 		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9720 		if (ret > 0)
9721 			ioc->used = ret;
9722 	}
9723 
9724 	/* Too many programs assume ioctl() returns 0 on success */
9725 	if (ret >= 0) {
9726 		ret = 0;
9727 	} else {
9728 #ifndef DETAIL_DEBUG_LOG_FOR_IOCTL
9729 		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
9730 #endif /* !DETAIL_DEBUG_LOG_FOR_IOCTL */
9731 		dhd->dongle_error = ret;
9732 	}
9733 
9734 	dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
9735 
9736 done:
9737 	return ret;
9738 
9739 } /* dhd_prot_ioctl */
9740 
9741 /** test / loopback */
9742 
9743 /*
9744  * XXX: This will fail with new PCIe Split header Full Dongle using fixed
9745  * sized messages in control submission ring. We seem to be sending the lpbk
9746  * data via the control message, wherein the lpbk data may be larger than 1
9747  * control message that is being committed.
9748  */
9749 int
9750 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
9751 {
9752 	unsigned long flags;
9753 	dhd_prot_t *prot = dhd->prot;
9754 	uint16 alloced = 0;
9755 
9756 	ioct_reqst_hdr_t *ioct_rqst;
9757 
9758 	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
9759 	uint16 msglen = len + hdrlen;
9760 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9761 
9762 	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
9763 	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
9764 
9765 #ifdef PCIE_INB_DW
9766 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
9767 		return BCME_ERROR;
9768 #endif /* PCIE_INB_DW */
9769 
9770 	DHD_RING_LOCK(ring->ring_lock, flags);
9771 
9772 	ioct_rqst = (ioct_reqst_hdr_t *)
9773 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9774 
9775 	if (ioct_rqst == NULL) {
9776 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9777 #ifdef PCIE_INB_DW
9778 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9779 #endif
9780 		return 0;
9781 	}
9782 
9783 	{
9784 		uint8 *ptr;
9785 		uint16 i;
9786 
9787 		ptr = (uint8 *)ioct_rqst; /* XXX: failure!!! */
9788 		for (i = 0; i < msglen; i++) {
9789 			ptr[i] = i % 256;
9790 		}
9791 	}
9792 
9793 	/* Common msg buf hdr */
9794 	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9795 	ring->seqnum++;
9796 
9797 	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
9798 	ioct_rqst->msg.if_id = 0;
9799 	ioct_rqst->msg.flags = ring->current_phase;
9800 
9801 	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
9802 
9803 	/* update ring's WR index and ring doorbell to dongle */
9804 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
9805 
9806 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9807 
9808 #ifdef PCIE_INB_DW
9809 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9810 #endif
9811 
9812 	return 0;
9813 }
9814 
9815 /** test / loopback */
9816 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
9817 {
9818 	if (dmaxfer == NULL)
9819 		return;
9820 
9821 	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9822 	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
9823 }
9824 
9825 /** test / loopback */
9826 int
9827 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
9828 {
9829 	dhd_prot_t *prot = dhdp->prot;
9830 	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
9831 	dmaxref_mem_map_t *dmap = NULL;
9832 
9833 	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
9834 	if (!dmap) {
9835 		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
9836 		goto mem_alloc_fail;
9837 	}
9838 	dmap->srcmem = &(dmaxfer->srcmem);
9839 	dmap->dstmem = &(dmaxfer->dstmem);
9840 
9841 	DMAXFER_FREE(dhdp, dmap);
9842 	return BCME_OK;
9843 
9844 mem_alloc_fail:
9845 	if (dmap) {
9846 		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
9847 	}
9848 	return BCME_NOMEM;
9849 } /* dhd_prepare_schedule_dmaxfer_free */
9850 
9851 /** test / loopback */
9852 void
9853 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
9854 {
9855 
9856 	dhd_dma_buf_free(dhdp, dmmap->srcmem);
9857 	dhd_dma_buf_free(dhdp, dmmap->dstmem);
9858 
9859 	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
9860 
9861 	dhdp->bus->dmaxfer_complete = TRUE;
9862 	dhd_os_dmaxfer_wake(dhdp);
9863 } /* dmaxfer_free_prev_dmaaddr */
9864 
9865 /** test / loopback */
9866 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
9867 	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
9868 {
9869 	uint i = 0, j = 0;
9870 	if (!dmaxfer)
9871 		return BCME_ERROR;
9872 
9873 	/* First free up existing buffers */
9874 	dmaxfer_free_dmaaddr(dhd, dmaxfer);
9875 
9876 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
9877 		return BCME_NOMEM;
9878 	}
9879 
9880 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
9881 		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9882 		return BCME_NOMEM;
9883 	}
9884 
9885 	dmaxfer->len = len;
9886 
9887 	/* Populate source with a pattern like below
9888 	 * 0x00000000
9889 	 * 0x01010101
9890 	 * 0x02020202
9891 	 * 0x03030303
9892 	 * 0x04040404
9893 	 * 0x05050505
9894 	 * ...
9895 	 * 0xFFFFFFFF
9896 	 */
9897 	while (i < dmaxfer->len) {
9898 		((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
9899 		i++;
9900 		if (i % 4 == 0) {
9901 			j++;
9902 		}
9903 	}
9904 
9905 	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
9906 
9907 	dmaxfer->srcdelay = srcdelay;
9908 	dmaxfer->destdelay = destdelay;
9909 
9910 	return BCME_OK;
9911 } /* dmaxfer_prepare_dmaaddr */
9912 
9913 static void
9914 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
9915 {
9916 	dhd_prot_t *prot = dhd->prot;
9917 	uint64 end_usec;
9918 	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
9919 	int buf_free_scheduled;
9920 	int err = 0;
9921 
9922 	BCM_REFERENCE(cmplt);
9923 	end_usec = OSL_SYSUPTIME_US();
9924 
9925 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9926 	/* restore interrupt poll period to the previous existing value */
9927 	dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period);
9928 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9929 
9930 	DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
9931 	prot->dmaxfer.status = cmplt->compl_hdr.status;
9932 	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9933 	if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM &&
9934 		prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM &&
9935 		prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM &&
9936 		prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) {
9937 		err = memcmp(prot->dmaxfer.srcmem.va,
9938 			prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9939 	}
9940 	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
9941 		if (err ||
9942 		        cmplt->compl_hdr.status != BCME_OK) {
9943 		        DHD_ERROR(("DMA loopback failed\n"));
9944 			/* it is observed that some times the completion
9945 			 * header status is set as OK, but the memcmp fails
9946 			 * hence always explicitly set the dmaxfer status
9947 			 * as error if this happens.
9948 			 */
9949 			prot->dmaxfer.status = BCME_ERROR;
9950 			prhex("XFER SRC: ",
9951 			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
9952 			prhex("XFER DST: ",
9953 			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9954 		}
9955 		else {
9956 			switch (prot->dmaxfer.d11_lpbk) {
9957 			case M2M_DMA_LPBK: {
9958 				DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
9959 				} break;
9960 			case D11_LPBK: {
9961 				DHD_ERROR(("DMA successful with d11 loopback\n"));
9962 				} break;
9963 			case BMC_LPBK: {
9964 				DHD_ERROR(("DMA successful with bmc loopback\n"));
9965 				} break;
9966 			case M2M_NON_DMA_LPBK: {
9967 				DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
9968 				} break;
9969 			case D11_HOST_MEM_LPBK: {
9970 				DHD_ERROR(("DMA successful d11 host mem loopback\n"));
9971 				} break;
9972 			case BMC_HOST_MEM_LPBK: {
9973 				DHD_ERROR(("DMA successful bmc host mem loopback\n"));
9974 				} break;
9975 			case M2M_WRITE_TO_RAM: {
9976 				DHD_ERROR(("DMA successful pcie m2m write to ram\n"));
9977 				} break;
9978 			case M2M_READ_FROM_RAM: {
9979 				DHD_ERROR(("DMA successful pcie m2m read from ram\n"));
9980 				prhex("XFER DST: ",
9981 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9982 				} break;
9983 			case D11_WRITE_TO_RAM: {
9984 				DHD_ERROR(("DMA successful D11 write to ram\n"));
9985 				} break;
9986 			case D11_READ_FROM_RAM: {
9987 				DHD_ERROR(("DMA successful D11 read from ram\n"));
9988 				prhex("XFER DST: ",
9989 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9990 				} break;
9991 			default: {
9992 				DHD_ERROR(("Invalid loopback option\n"));
9993 				} break;
9994 			}
9995 
9996 			if (DHD_LPBKDTDUMP_ON()) {
9997 				/* debug info print of the Tx and Rx buffers */
9998 				dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
9999 					prot->dmaxfer.len, DHD_INFO_VAL);
10000 				dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
10001 					prot->dmaxfer.len, DHD_INFO_VAL);
10002 			}
10003 		}
10004 	}
10005 
10006 	buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
10007 	end_usec -= prot->dmaxfer.start_usec;
10008 	if (end_usec) {
10009 		prot->dmaxfer.time_taken = end_usec;
10010 		DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
10011 			prot->dmaxfer.len, (unsigned long)end_usec,
10012 			(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
10013 	}
10014 	dhd->prot->dmaxfer.in_progress = FALSE;
10015 
10016 	if (buf_free_scheduled != BCME_OK) {
10017 		dhd->bus->dmaxfer_complete = TRUE;
10018 		dhd_os_dmaxfer_wake(dhd);
10019 	}
10020 }
10021 
10022 /** Test functionality.
10023  * Transfers bytes from host to dongle and to host again using DMA
10024  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
10025  * by a spinlock.
10026  */
10027 int
10028 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
10029 	uint d11_lpbk, uint core_num, uint32 mem_addr)
10030 {
10031 	unsigned long flags;
10032 	int ret = BCME_OK;
10033 	dhd_prot_t *prot = dhd->prot;
10034 	pcie_dma_xfer_params_t *dmap;
10035 	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
10036 	uint16 alloced = 0;
10037 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10038 
10039 	/* XXX: prot->dmaxfer.in_progress is not protected by lock */
10040 	if (prot->dmaxfer.in_progress) {
10041 		DHD_ERROR(("DMA is in progress...\n"));
10042 		return BCME_ERROR;
10043 	}
10044 
10045 	if (d11_lpbk >= MAX_LPBK) {
10046 		DHD_ERROR(("loopback mode should be either"
10047 			" 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
10048 		return BCME_ERROR;
10049 	}
10050 
10051 #ifdef PCIE_INB_DW
10052 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
10053 		return BCME_ERROR;
10054 	}
10055 #endif /* PCIE_INB_DW */
10056 
10057 	prot->dmaxfer.in_progress = TRUE;
10058 	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
10059 	        &prot->dmaxfer)) != BCME_OK) {
10060 		prot->dmaxfer.in_progress = FALSE;
10061 #ifdef PCIE_INB_DW
10062 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10063 #endif
10064 		return ret;
10065 	}
10066 	DHD_RING_LOCK(ring->ring_lock, flags);
10067 	dmap = (pcie_dma_xfer_params_t *)
10068 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10069 
10070 	if (dmap == NULL) {
10071 		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
10072 		prot->dmaxfer.in_progress = FALSE;
10073 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10074 #ifdef PCIE_INB_DW
10075 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10076 #endif
10077 		return BCME_NOMEM;
10078 	}
10079 
10080 	/* Common msg buf hdr */
10081 	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
10082 	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
10083 	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10084 	dmap->cmn_hdr.flags = ring->current_phase;
10085 	ring->seqnum++;
10086 
10087 	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
10088 	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
10089 	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
10090 	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
10091 	dmap->xfer_len = htol32(prot->dmaxfer.len);
10092 	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
10093 	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
10094 	prot->dmaxfer.d11_lpbk = d11_lpbk;
10095 	if (d11_lpbk == M2M_WRITE_TO_RAM) {
10096 		dmap->host_ouput_buf_addr.high = 0x0;
10097 		dmap->host_ouput_buf_addr.low = mem_addr;
10098 	} else if (d11_lpbk == M2M_READ_FROM_RAM) {
10099 		dmap->host_input_buf_addr.high = 0x0;
10100 		dmap->host_input_buf_addr.low = mem_addr;
10101 	} else if (d11_lpbk == D11_WRITE_TO_RAM) {
10102 		dmap->host_ouput_buf_addr.high = 0x0;
10103 		dmap->host_ouput_buf_addr.low = mem_addr;
10104 	} else if (d11_lpbk == D11_READ_FROM_RAM) {
10105 		dmap->host_input_buf_addr.high = 0x0;
10106 		dmap->host_input_buf_addr.low = mem_addr;
10107 	}
10108 	dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
10109 			<< PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
10110 			((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
10111 			 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
10112 	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
10113 
10114 	/* update ring's WR index and ring doorbell to dongle */
10115 	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
10116 
10117 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10118 
10119 	DHD_ERROR(("DMA loopback Started... on core[%d]\n", core_num));
10120 #ifdef PCIE_INB_DW
10121 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10122 #endif
10123 
10124 	return BCME_OK;
10125 } /* dhdmsgbuf_dmaxfer_req */
10126 
10127 int
10128 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
10129 {
10130 	dhd_prot_t *prot = dhd->prot;
10131 
10132 	if (prot->dmaxfer.in_progress)
10133 		result->status = DMA_XFER_IN_PROGRESS;
10134 	else if (prot->dmaxfer.status == 0)
10135 		result->status = DMA_XFER_SUCCESS;
10136 	else
10137 		result->status = DMA_XFER_FAILED;
10138 
10139 	result->type = prot->dmaxfer.d11_lpbk;
10140 	result->error_code = prot->dmaxfer.status;
10141 	result->num_bytes = prot->dmaxfer.len;
10142 	result->time_taken = prot->dmaxfer.time_taken;
10143 	if (prot->dmaxfer.time_taken) {
10144 		/* throughput in kBps */
10145 		result->tput =
10146 			(prot->dmaxfer.len * (1000 * 1000 / 1024)) /
10147 			(uint32)prot->dmaxfer.time_taken;
10148 	}
10149 
10150 	return BCME_OK;
10151 }
10152 
10153 /** Called in the process of submitting an ioctl to the dongle */
10154 static int
10155 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10156 {
10157 	int ret = 0;
10158 	uint copylen = 0;
10159 
10160 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10161 
10162 	if (dhd->bus->is_linkdown) {
10163 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10164 			__FUNCTION__));
10165 		return -EIO;
10166 	}
10167 
10168 	if (dhd->busstate == DHD_BUS_DOWN) {
10169 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10170 		return -EIO;
10171 	}
10172 
10173 	/* don't talk to the dongle if fw is about to be reloaded */
10174 	if (dhd->hang_was_sent) {
10175 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10176 			__FUNCTION__));
10177 		return -EIO;
10178 	}
10179 
10180 	if (cmd == WLC_GET_VAR && buf)
10181 	{
10182 		if (!len || !*(uint8 *)buf) {
10183 			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
10184 			ret = BCME_BADARG;
10185 			goto done;
10186 		}
10187 
10188 		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
10189 		copylen = MIN(len, BCME_STRLEN);
10190 
10191 		if ((len >= strlen("bcmerrorstr")) &&
10192 			(!strcmp((char *)buf, "bcmerrorstr"))) {
10193 			strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
10194 			goto done;
10195 		} else if ((len >= strlen("bcmerror")) &&
10196 			!strcmp((char *)buf, "bcmerror")) {
10197 			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
10198 			goto done;
10199 		}
10200 	}
10201 
10202 	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
10203 	    action, ifidx, cmd, len));
10204 #ifdef REPORT_FATAL_TIMEOUTS
10205 	/*
10206 	 * These timers "should" be started before sending H2D interrupt.
10207 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10208 	 * responds back immediately. From the DPC we would stop the cmd, bus
10209 	 * timers. But the process context could have switched out leading to
10210 	 * a situation where the timers are Not started yet, but are actually stopped.
10211 	 *
10212 	 * Disable preemption from the time we start the timer until we are done
10213 	 * with seding H2D interrupts.
10214 	 */
10215 	OSL_DISABLE_PREEMPTION(dhd->osh);
10216 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10217 	dhd_start_cmd_timer(dhd);
10218 	dhd_start_bus_timer(dhd);
10219 #endif /* REPORT_FATAL_TIMEOUTS */
10220 
10221 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10222 
10223 #ifdef REPORT_FATAL_TIMEOUTS
10224 	/* For some reason if we fail to ring door bell, stop the timers */
10225 	if (ret < 0) {
10226 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10227 		dhd_stop_cmd_timer(dhd);
10228 		dhd_stop_bus_timer(dhd);
10229 		OSL_ENABLE_PREEMPTION(dhd->osh);
10230 		goto done;
10231 	}
10232 	OSL_ENABLE_PREEMPTION(dhd->osh);
10233 #else
10234 	if (ret < 0) {
10235 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10236 		goto done;
10237 	}
10238 #endif /* REPORT_FATAL_TIMEOUTS */
10239 
10240 	/* wait for IOCTL completion message from dongle and get first fragment */
10241 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10242 
10243 done:
10244 	return ret;
10245 }
10246 
10247 void
10248 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
10249 {
10250 	uint32 intstatus;
10251 	dhd_prot_t *prot = dhd->prot;
10252 	dhd->rxcnt_timeout++;
10253 	dhd->rx_ctlerrs++;
10254 	DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
10255 		"trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
10256 		dhd->is_sched_error ? " due to scheduling problem" : "",
10257 		dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
10258 		prot->ioctl_state, dhd->busstate, prot->ioctl_received));
10259 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
10260 		/* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
10261 		 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
10262 		 * Customer informs that it is hard to find any clue from the
10263 		 * host memory dump since the important tasklet or workqueue information
10264 		 * is already disappered due the latency while printing out the timestamp
10265 		 * logs for debugging scan timeout issue.
10266 		 * For this reason, customer requestes us to trigger Kernel Panic rather than
10267 		 * taking a SOCRAM dump.
10268 		 */
10269 		if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
10270 			/* change g_assert_type to trigger Kernel panic */
10271 			g_assert_type = 2;
10272 			/* use ASSERT() to trigger panic */
10273 			ASSERT(0);
10274 		}
10275 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
10276 
10277 	if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
10278 			prot->curr_ioctl_cmd == WLC_GET_VAR) {
10279 		char iovbuf[32];
10280 		int dump_size = 128;
10281 		uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
10282 		memset(iovbuf, 0, sizeof(iovbuf));
10283 		strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
10284 		iovbuf[sizeof(iovbuf) - 1] = '\0';
10285 		DHD_ERROR(("Current IOVAR (%s): %s\n",
10286 			prot->curr_ioctl_cmd == WLC_SET_VAR ?
10287 			"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
10288 		DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
10289 		prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
10290 		DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
10291 	}
10292 
10293 	/* Check the PCIe link status by reading intstatus register */
10294 	intstatus = si_corereg(dhd->bus->sih,
10295 		dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10296 	if (intstatus == (uint32)-1) {
10297 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10298 		dhd->bus->is_linkdown = TRUE;
10299 	}
10300 
10301 	dhd_bus_dump_console_buffer(dhd->bus);
10302 	dhd_prot_debug_info_print(dhd);
10303 }
10304 
10305 /**
10306  * Waits for IOCTL completion message from the dongle, copies this into caller
10307  * provided parameter 'buf'.
10308  */
10309 static int
10310 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
10311 {
10312 	dhd_prot_t *prot = dhd->prot;
10313 	int timeleft;
10314 	unsigned long flags;
10315 	int ret = 0;
10316 	static uint cnt = 0;
10317 
10318 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10319 
10320 	if (dhd_query_bus_erros(dhd)) {
10321 		ret = -EIO;
10322 		goto out;
10323 	}
10324 #ifdef GDB_PROXY
10325 	/* Loop while timeout is caused by firmware stop in GDB */
10326 	{
10327 		uint32 prev_stop_count;
10328 		do {
10329 			prev_stop_count = dhd->gdb_proxy_stop_count;
10330 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10331 		} while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) ||
10332 			(dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK)));
10333 	}
10334 #else
10335 	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10336 #endif /* GDB_PROXY */
10337 
10338 #ifdef DHD_RECOVER_TIMEOUT
10339 	if (prot->ioctl_received == 0) {
10340 		uint32 intstatus = si_corereg(dhd->bus->sih,
10341 			dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10342 		int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
10343 		if ((intstatus) && (intstatus != (uint32)-1) &&
10344 			(timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
10345 			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
10346 				" host_irq_disabled=%d\n",
10347 				__FUNCTION__, intstatus, host_irq_disbled));
10348 			dhd_pcie_intr_count_dump(dhd);
10349 			dhd_print_tasklet_status(dhd);
10350 			dhd_prot_process_ctrlbuf(dhd);
10351 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10352 			/* Clear Interrupts */
10353 			dhdpcie_bus_clear_intstatus(dhd->bus);
10354 		}
10355 	}
10356 #endif /* DHD_RECOVER_TIMEOUT */
10357 
10358 	if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10359 		cnt++;
10360 		if (cnt <= dhd->conf->ctrl_resched) {
10361 			uint buscorerev = dhd->bus->sih->buscorerev;
10362 			uint32 intstatus = 0, intmask = 0;
10363 			intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
10364 			intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
10365 			if (intstatus) {
10366 				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
10367 					__FUNCTION__, cnt, intstatus, intmask));
10368 				dhd->bus->intstatus = intstatus;
10369 				dhd->bus->ipend = TRUE;
10370 				dhd->bus->dpc_sched = TRUE;
10371 				dhd_sched_dpc(dhd);
10372 				timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
10373 			}
10374 		}
10375 	} else {
10376 		cnt = 0;
10377 	}
10378 
10379 	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10380 		if (dhd->check_trap_rot) {
10381 			/* check dongle trap first */
10382 			DHD_ERROR(("Check dongle trap in the case of iovar timeout\n"));
10383 			dhd_bus_checkdied(dhd->bus, NULL, 0);
10384 
10385 			if (dhd->dongle_trap_occured) {
10386 #ifdef SUPPORT_LINKDOWN_RECOVERY
10387 #ifdef CONFIG_ARCH_MSM
10388 				dhd->bus->no_cfg_restore = 1;
10389 #endif /* CONFIG_ARCH_MSM */
10390 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10391 				ret = -EREMOTEIO;
10392 				goto out;
10393 			}
10394 		}
10395 		/* check if resumed on time out related to scheduling issue */
10396 		dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
10397 
10398 		dhd->iovar_timeout_occured = TRUE;
10399 		dhd_msgbuf_iovar_timeout_dump(dhd);
10400 
10401 #ifdef DHD_FW_COREDUMP
10402 		/* Collect socram dump */
10403 		if (dhd->memdump_enabled) {
10404 			/* collect core dump */
10405 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
10406 			dhd_bus_mem_dump(dhd);
10407 		}
10408 #endif /* DHD_FW_COREDUMP */
10409 
10410 #ifdef DHD_EFI
10411 		/*
10412 		* for ioctl timeout, recovery is triggered only for EFI case, because
10413 		* in linux, dhd daemon will itself trap the FW,
10414 		* so if recovery is triggered
10415 		* then there is a race between FLR and daemon initiated trap
10416 		*/
10417 		dhd_schedule_reset(dhd);
10418 #endif /* DHD_EFI */
10419 
10420 #ifdef SUPPORT_LINKDOWN_RECOVERY
10421 #ifdef CONFIG_ARCH_MSM
10422 		dhd->bus->no_cfg_restore = 1;
10423 #endif /* CONFIG_ARCH_MSM */
10424 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10425 		ret = -ETIMEDOUT;
10426 		goto out;
10427 	} else {
10428 		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
10429 			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
10430 				__FUNCTION__, prot->ioctl_received));
10431 			ret = -EINVAL;
10432 			goto out;
10433 		}
10434 		dhd->rxcnt_timeout = 0;
10435 		dhd->rx_ctlpkts++;
10436 		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
10437 			__FUNCTION__, prot->ioctl_resplen));
10438 	}
10439 
10440 	if (dhd->prot->ioctl_resplen > len)
10441 		dhd->prot->ioctl_resplen = (uint16)len;
10442 	if (buf)
10443 		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
10444 
10445 	ret = (int)(dhd->prot->ioctl_status);
10446 
10447 out:
10448 	DHD_GENERAL_LOCK(dhd, flags);
10449 	dhd->prot->ioctl_state = 0;
10450 	dhd->prot->ioctl_resplen = 0;
10451 	dhd->prot->ioctl_received = IOCTL_WAIT;
10452 	dhd->prot->curr_ioctl_cmd = 0;
10453 	DHD_GENERAL_UNLOCK(dhd, flags);
10454 
10455 	return ret;
10456 } /* dhd_msgbuf_wait_ioctl_cmplt */
10457 
10458 static int
10459 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10460 {
10461 	int ret = 0;
10462 
10463 	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
10464 
10465 	if (dhd->bus->is_linkdown) {
10466 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10467 			__FUNCTION__));
10468 		return -EIO;
10469 	}
10470 
10471 	if (dhd->busstate == DHD_BUS_DOWN) {
10472 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10473 		return -EIO;
10474 	}
10475 
10476 	/* don't talk to the dongle if fw is about to be reloaded */
10477 	if (dhd->hang_was_sent) {
10478 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10479 			__FUNCTION__));
10480 		return -EIO;
10481 	}
10482 
10483 	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
10484 		action, ifidx, cmd, len));
10485 
10486 #ifdef REPORT_FATAL_TIMEOUTS
10487 	/*
10488 	 * These timers "should" be started before sending H2D interrupt.
10489 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10490 	 * responds back immediately. From the DPC we would stop the cmd, bus
10491 	 * timers. But the process context could have switched out leading to
10492 	 * a situation where the timers are Not started yet, but are actually stopped.
10493 	 *
10494 	 * Disable preemption from the time we start the timer until we are done
10495 	 * with seding H2D interrupts.
10496 	 */
10497 	OSL_DISABLE_PREEMPTION(dhd->osh);
10498 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10499 	dhd_start_cmd_timer(dhd);
10500 	dhd_start_bus_timer(dhd);
10501 #endif /* REPORT_FATAL_TIMEOUTS */
10502 
10503 	/* Fill up msgbuf for ioctl req */
10504 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10505 
10506 #ifdef REPORT_FATAL_TIMEOUTS
10507 	/* For some reason if we fail to ring door bell, stop the timers */
10508 	if (ret < 0) {
10509 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10510 		dhd_stop_cmd_timer(dhd);
10511 		dhd_stop_bus_timer(dhd);
10512 		OSL_ENABLE_PREEMPTION(dhd->osh);
10513 		goto done;
10514 	}
10515 
10516 	OSL_ENABLE_PREEMPTION(dhd->osh);
10517 #else
10518 	if (ret < 0) {
10519 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10520 		goto done;
10521 	}
10522 #endif /* REPORT_FATAL_TIMEOUTS */
10523 
10524 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10525 
10526 done:
10527 	return ret;
10528 }
10529 
10530 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
10531 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
10532 {
10533 	return 0;
10534 }
10535 
10536 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
10537 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
10538                              void *params, int plen, void *arg, int len, bool set)
10539 {
10540 	return BCME_UNSUPPORTED;
10541 }
10542 
10543 #ifdef DHD_DUMP_PCIE_RINGS
10544 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
10545 	unsigned long *file_posn, bool file_write)
10546 {
10547 	dhd_prot_t *prot;
10548 	msgbuf_ring_t *ring;
10549 	int ret = 0;
10550 	uint16 h2d_flowrings_total;
10551 	uint16 flowid;
10552 
10553 	if (!(dhd) || !(dhd->prot)) {
10554 		goto exit;
10555 	}
10556 	prot = dhd->prot;
10557 
10558 	/* Below is the same ring dump sequence followed in parser as well. */
10559 	ring = &prot->h2dring_ctrl_subn;
10560 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10561 		goto exit;
10562 
10563 	ring = &prot->h2dring_rxp_subn;
10564 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10565 		goto exit;
10566 
10567 	ring = &prot->d2hring_ctrl_cpln;
10568 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10569 		goto exit;
10570 
10571 	ring = &prot->d2hring_tx_cpln;
10572 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10573 		goto exit;
10574 
10575 	ring = &prot->d2hring_rx_cpln;
10576 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10577 		goto exit;
10578 
10579 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
10580 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
10581 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
10582 			goto exit;
10583 		}
10584 	}
10585 
10586 #ifdef EWP_EDL
10587 	if (dhd->dongle_edl_support) {
10588 		ring = prot->d2hring_edl;
10589 		if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
10590 			goto exit;
10591 	}
10592 	else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
10593 #else
10594 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
10595 #endif /* EWP_EDL */
10596 	{
10597 		ring = prot->h2dring_info_subn;
10598 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10599 			goto exit;
10600 
10601 		ring = prot->d2hring_info_cpln;
10602 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10603 			goto exit;
10604 	}
10605 
10606 exit :
10607 	return ret;
10608 }
10609 
10610 /* Write to file */
10611 static
10612 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
10613 	const void *user_buf, unsigned long *file_posn)
10614 {
10615 	int ret = 0;
10616 
10617 	if (ring == NULL) {
10618 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10619 			__FUNCTION__));
10620 		return BCME_ERROR;
10621 	}
10622 	if (file) {
10623 		ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
10624 				((unsigned long)(ring->max_items) * (ring->item_len)));
10625 		if (ret < 0) {
10626 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10627 			ret = BCME_ERROR;
10628 		}
10629 	} else if (user_buf) {
10630 		ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
10631 			((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
10632 	}
10633 	return ret;
10634 }
10635 
10636 #ifdef EWP_EDL
10637 /* Write to file */
10638 static
10639 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
10640 	unsigned long *file_posn)
10641 {
10642 	int ret = 0, nitems = 0;
10643 	char *buf = NULL, *ptr = NULL;
10644 	uint8 *msg_addr = NULL;
10645 	uint16	rd = 0;
10646 
10647 	if (ring == NULL) {
10648 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10649 			__FUNCTION__));
10650 		ret = BCME_ERROR;
10651 		goto done;
10652 	}
10653 
10654 	buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10655 	if (buf == NULL) {
10656 		DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
10657 		ret = BCME_ERROR;
10658 		goto done;
10659 	}
10660 	ptr = buf;
10661 
10662 	for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
10663 		msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
10664 		memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
10665 		ptr += D2HRING_EDL_HDR_SIZE;
10666 	}
10667 	if (file) {
10668 		ret = dhd_os_write_file_posn(file, file_posn, buf,
10669 				(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
10670 		if (ret < 0) {
10671 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10672 			goto done;
10673 		}
10674 	}
10675 	else {
10676 		ret = dhd_export_debug_data(buf, NULL, user_buf,
10677 			(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
10678 	}
10679 
10680 done:
10681 	if (buf) {
10682 		MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10683 	}
10684 	return ret;
10685 }
10686 #endif /* EWP_EDL */
10687 #endif /* DHD_DUMP_PCIE_RINGS */
10688 
10689 /** Add prot dump output to a buffer */
10690 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10691 {
10692 #if defined(BCM_ROUTER_DHD)
10693 	bcm_bprintf(b, "DHD Router: 1GMAC HotBRC forwarding mode\n");
10694 #endif /* BCM_ROUTER_DHD */
10695 
10696 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
10697 		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
10698 	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
10699 		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
10700 	else
10701 		bcm_bprintf(b, "\nd2h_sync: NONE:");
10702 	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
10703 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
10704 
10705 	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
10706 		dhd->dma_h2d_ring_upd_support,
10707 		dhd->dma_d2h_ring_upd_support,
10708 		dhd->prot->rw_index_sz);
10709 	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10710 		h2d_max_txpost, dhd->prot->h2d_max_txpost);
10711 #if defined(DHD_HTPUT_TUNABLES)
10712 	bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n",
10713 		h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost);
10714 #endif /* DHD_HTPUT_TUNABLES */
10715 	bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
10716 	bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
10717 	bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
10718 	bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt);
10719 #ifdef DHD_DMA_INDICES_SEQNUM
10720 	bcm_bprintf(b, "host_seqnum %u dngl_seqnum %u\n", dhd_prot_read_seqnum(dhd, TRUE),
10721 		dhd_prot_read_seqnum(dhd, FALSE));
10722 #endif /* DHD_DMA_INDICES_SEQNUM */
10723 	bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt);
10724 #ifdef AGG_H2D_DB
10725 	bcm_bprintf(b, "agg_h2d_db_enab:%d agg_h2d_db_timeout:%d agg_h2d_db_inflight_thresh:%d\n",
10726 		agg_h2d_db_enab, agg_h2d_db_timeout, agg_h2d_db_inflight_thresh);
10727 	bcm_bprintf(b, "agg_h2d_db: timer_db_cnt:%d direct_db_cnt:%d\n",
10728 		dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt);
10729 	dhd_agg_inflight_stats_dump(dhd, b);
10730 #endif /* AGG_H2D_DB */
10731 }
10732 
10733 /* Update local copy of dongle statistics */
10734 void dhd_prot_dstats(dhd_pub_t *dhd)
10735 {
10736 	return;
10737 }
10738 
10739 /** Called by upper DHD layer */
10740 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
10741 	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
10742 {
10743 	return 0;
10744 }
10745 
10746 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
10747 int
10748 dhd_post_dummy_msg(dhd_pub_t *dhd)
10749 {
10750 	unsigned long flags;
10751 	hostevent_hdr_t *hevent = NULL;
10752 	uint16 alloced = 0;
10753 
10754 	dhd_prot_t *prot = dhd->prot;
10755 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10756 
10757 #ifdef PCIE_INB_DW
10758 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10759 		return BCME_ERROR;
10760 #endif /* PCIE_INB_DW */
10761 	DHD_RING_LOCK(ring->ring_lock, flags);
10762 
10763 	hevent = (hostevent_hdr_t *)
10764 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10765 
10766 	if (hevent == NULL) {
10767 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10768 #ifdef PCIE_INB_DW
10769 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10770 #endif
10771 		return -1;
10772 	}
10773 
10774 	/* CMN msg header */
10775 	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10776 	ring->seqnum++;
10777 	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
10778 	hevent->msg.if_id = 0;
10779 	hevent->msg.flags = ring->current_phase;
10780 
10781 	/* Event payload */
10782 	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
10783 
10784 	/* Since, we are filling the data directly into the bufptr obtained
10785 	 * from the msgbuf, we can directly call the write_complete
10786 	 */
10787 	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
10788 
10789 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10790 
10791 #ifdef PCIE_INB_DW
10792 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10793 #endif
10794 
10795 	return 0;
10796 }
10797 
10798 /**
10799  * If exactly_nitems is true, this function will allocate space for nitems or fail
10800  * If exactly_nitems is false, this function will allocate space for nitems or less
10801  */
10802 static void *
10803 BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
10804 	uint16 nitems, uint16 * alloced, bool exactly_nitems)
10805 {
10806 	void * ret_buf;
10807 
10808 	if (nitems == 0) {
10809 		DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name));
10810 		return NULL;
10811 	}
10812 
10813 	/* Alloc space for nitems in the ring */
10814 	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10815 
10816 	if (ret_buf == NULL) {
10817 		/* if alloc failed , invalidate cached read ptr */
10818 		if (dhd->dma_d2h_ring_upd_support) {
10819 			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
10820 		} else {
10821 			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
10822 #ifdef SUPPORT_LINKDOWN_RECOVERY
10823 			/* Check if ring->rd is valid */
10824 			if (ring->rd >= ring->max_items) {
10825 				DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
10826 				dhd->bus->read_shm_fail = TRUE;
10827 				return NULL;
10828 			}
10829 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10830 		}
10831 
10832 		/* Try allocating once more */
10833 		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10834 
10835 		if (ret_buf == NULL) {
10836 			DHD_INFO(("%s: Ring space not available  \n", ring->name));
10837 			return NULL;
10838 		}
10839 	}
10840 
10841 	if (ret_buf == HOST_RING_BASE(ring)) {
10842 		DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name));
10843 		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
10844 	}
10845 
10846 	/* Return alloced space */
10847 	return ret_buf;
10848 }
10849 
10850 /**
10851  * Non inline ioct request.
10852  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
10853  * Form a separate request buffer where a 4 byte cmn header is added in the front
10854  * buf contents from parent function is copied to remaining section of this buffer
10855  */
10856 static int
10857 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
10858 {
10859 	dhd_prot_t *prot = dhd->prot;
10860 	ioctl_req_msg_t *ioct_rqst;
10861 	void * ioct_buf;	/* For ioctl payload */
10862 	uint16  rqstlen, resplen;
10863 	unsigned long flags;
10864 	uint16 alloced = 0;
10865 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10866 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10867 	ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10868 	ktime_t begin_time, end_time;
10869 	s64 diff_ns;
10870 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10871 
10872 	if (dhd_query_bus_erros(dhd)) {
10873 		return -EIO;
10874 	}
10875 
10876 	rqstlen = len;
10877 	resplen = len;
10878 
10879 	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
10880 	/* 8K allocation of dongle buffer fails */
10881 	/* dhd doesnt give separate input & output buf lens */
10882 	/* so making the assumption that input length can never be more than 2k */
10883 	rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
10884 
10885 #ifdef PCIE_INB_DW
10886 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10887 		return BCME_ERROR;
10888 
10889 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10890 	preempt_disable();
10891 	begin_time = ktime_get();
10892 	R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr));
10893 	end_time = ktime_get();
10894 	preempt_enable();
10895 	diff_ns = ktime_to_ns(ktime_sub(end_time, begin_time));
10896 	/* Check if the delta is greater than 1 msec */
10897 	if (diff_ns > (1 * NSEC_PER_MSEC)) {
10898 		DHD_ERROR(("%s: found latency over 1ms (%lld ns), ds state=%d\n", __func__,
10899 		       diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus)));
10900 	}
10901 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10902 #endif /* PCIE_INB_DW */
10903 
10904 	DHD_RING_LOCK(ring->ring_lock, flags);
10905 
10906 	if (prot->ioctl_state) {
10907 		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
10908 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10909 #ifdef PCIE_INB_DW
10910 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10911 #endif
10912 		return BCME_BUSY;
10913 	} else {
10914 		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
10915 	}
10916 
10917 	/* Request for cbuf space */
10918 	ioct_rqst = (ioctl_req_msg_t*)
10919 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10920 	if (ioct_rqst == NULL) {
10921 		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
10922 		prot->ioctl_state = 0;
10923 		prot->curr_ioctl_cmd = 0;
10924 		prot->ioctl_received = IOCTL_WAIT;
10925 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10926 #ifdef PCIE_INB_DW
10927 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10928 #endif
10929 		return -1;
10930 	}
10931 
10932 	/* Common msg buf hdr */
10933 	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
10934 	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
10935 	ioct_rqst->cmn_hdr.flags = ring->current_phase;
10936 	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
10937 	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10938 	ring->seqnum++;
10939 
10940 	ioct_rqst->cmd = htol32(cmd);
10941 	prot->curr_ioctl_cmd = cmd;
10942 	ioct_rqst->output_buf_len = htol16(resplen);
10943 	prot->ioctl_trans_id++;
10944 	ioct_rqst->trans_id = prot->ioctl_trans_id;
10945 
10946 	/* populate ioctl buffer info */
10947 	ioct_rqst->input_buf_len = htol16(rqstlen);
10948 	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
10949 	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
10950 	/* copy ioct payload */
10951 	ioct_buf = (void *) prot->ioctbuf.va;
10952 
10953 	prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
10954 
10955 	if (buf)
10956 		memcpy(ioct_buf, buf, len);
10957 
10958 	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
10959 
10960 	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
10961 		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
10962 
10963 	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
10964 		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
10965 		ioct_rqst->trans_id));
10966 
10967 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
10968 	dhd_prot_ioctl_trace(dhd, ioct_rqst, buf, len);
10969 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
10970 
10971 	/* update ring's WR index and ring doorbell to dongle */
10972 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
10973 
10974 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10975 
10976 #ifdef PCIE_INB_DW
10977 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10978 #endif
10979 
10980 	return 0;
10981 } /* dhd_fillup_ioct_reqst */
10982 
10983 /**
10984  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
10985  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
10986  * information is posted to the dongle.
10987  *
10988  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
10989  * each flowring in pool of flowrings.
10990  *
10991  * returns BCME_OK=0 on success
10992  * returns non-zero negative error value on failure.
10993  */
10994 static int
10995 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
10996 	uint16 max_items, uint16 item_len, uint16 ringid)
10997 {
10998 	int dma_buf_alloced = BCME_NOMEM;
10999 	uint32 dma_buf_len;
11000 	dhd_prot_t *prot = dhd->prot;
11001 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11002 	dhd_dma_buf_t *dma_buf = NULL;
11003 
11004 	ASSERT(ring);
11005 	ASSERT(name);
11006 	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
11007 
11008 	/* Init name */
11009 	strlcpy((char *)ring->name, name, sizeof(ring->name));
11010 
11011 	ring->idx = ringid;
11012 
11013 #if defined(DHD_HTPUT_TUNABLES)
11014 	/* Use HTPUT max items */
11015 	if (DHD_IS_FLOWRING(ringid, max_flowrings) &&
11016 		DHD_IS_FLOWID_HTPUT(dhd, DHD_RINGID_TO_FLOWID(ringid))) {
11017 		max_items = prot->h2d_htput_max_txpost;
11018 	}
11019 #endif /* DHD_HTPUT_TUNABLES */
11020 
11021 	dma_buf_len = max_items * item_len;
11022 
11023 	ring->max_items = max_items;
11024 	ring->item_len = item_len;
11025 
11026 	/* A contiguous space may be reserved for all flowrings */
11027 	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11028 		/* Carve out from the contiguous DMA-able flowring buffer */
11029 		uint16 flowid;
11030 		uint32 base_offset;
11031 		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
11032 
11033 		dma_buf = &ring->dma_buf;
11034 
11035 		flowid = DHD_RINGID_TO_FLOWID(ringid);
11036 		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
11037 
11038 		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
11039 
11040 		dma_buf->len = dma_buf_len;
11041 		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
11042 		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
11043 		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
11044 
11045 		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
11046 		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
11047 
11048 		dma_buf->dmah   = rsv_buf->dmah;
11049 		dma_buf->secdma = rsv_buf->secdma;
11050 
11051 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11052 	} else {
11053 #ifdef EWP_EDL
11054 		if (ring == dhd->prot->d2hring_edl) {
11055 			/* For EDL ring, memory is alloced during attach,
11056 			* so just need to copy the dma_buf to the ring's dma_buf
11057 			*/
11058 			memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
11059 			dma_buf = &ring->dma_buf;
11060 			if (dma_buf->va == NULL) {
11061 				return BCME_NOMEM;
11062 			}
11063 		} else
11064 #endif /* EWP_EDL */
11065 		{
11066 			/* Allocate a dhd_dma_buf */
11067 			dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
11068 			if (dma_buf_alloced != BCME_OK) {
11069 				return BCME_NOMEM;
11070 			}
11071 		}
11072 	}
11073 
11074 	/* CAUTION: Save ring::base_addr in little endian format! */
11075 	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
11076 
11077 	ring->ring_lock = osl_spin_lock_init(dhd->osh);
11078 
11079 	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
11080 		"ring start %p buf phys addr  %x:%x \n",
11081 		ring->name, ring->max_items, ring->item_len,
11082 		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
11083 		ltoh32(ring->base_addr.low_addr)));
11084 
11085 	return BCME_OK;
11086 } /* dhd_prot_ring_attach */
11087 
11088 /**
11089  * dhd_prot_ring_init - Post the common ring information to dongle.
11090  *
11091  * Used only for common rings.
11092  *
11093  * The flowrings information is passed via the create flowring control message
11094  * (tx_flowring_create_request_t) sent over the H2D control submission common
11095  * ring.
11096  */
11097 static void
11098 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11099 {
11100 	ring->wr = 0;
11101 	ring->rd = 0;
11102 	ring->curr_rd = 0;
11103 
11104 	/* CAUTION: ring::base_addr already in Little Endian */
11105 	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
11106 		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
11107 	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
11108 		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
11109 	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
11110 		sizeof(uint16), RING_ITEM_LEN, ring->idx);
11111 
11112 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11113 		sizeof(uint16), RING_WR_UPD, ring->idx);
11114 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11115 		sizeof(uint16), RING_RD_UPD, ring->idx);
11116 
11117 	/* ring inited */
11118 	ring->inited = TRUE;
11119 
11120 } /* dhd_prot_ring_init */
11121 
11122 /**
11123  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
11124  * Reset WR and RD indices to 0.
11125  */
11126 static void
11127 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11128 {
11129 	DHD_TRACE(("%s\n", __FUNCTION__));
11130 
11131 	dhd_dma_buf_reset(dhd, &ring->dma_buf);
11132 
11133 	ring->rd = ring->wr = 0;
11134 	ring->curr_rd = 0;
11135 	ring->inited = FALSE;
11136 	ring->create_pending = FALSE;
11137 }
11138 
11139 /**
11140  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
11141  * hanging off the msgbuf_ring.
11142  */
11143 static void
11144 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11145 {
11146 	dhd_prot_t *prot = dhd->prot;
11147 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11148 	ASSERT(ring);
11149 
11150 	ring->inited = FALSE;
11151 	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
11152 
11153 	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
11154 	 * memory, then simply stop using it.
11155 	 */
11156 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11157 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11158 		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
11159 	} else {
11160 #ifdef EWP_EDL
11161 		if (ring == dhd->prot->d2hring_edl) {
11162 			/* For EDL ring, do not free ring mem here,
11163 			* it is done in dhd_detach
11164 			*/
11165 			memset(&ring->dma_buf, 0, sizeof(ring->dma_buf));
11166 		} else
11167 #endif /* EWP_EDL */
11168 		{
11169 			dhd_dma_buf_free(dhd, &ring->dma_buf);
11170 		}
11171 	}
11172 
11173 	osl_spin_lock_deinit(dhd->osh, ring->ring_lock);
11174 
11175 } /* dhd_prot_ring_detach */
11176 
11177 /* Fetch number of H2D flowrings given the total number of h2d rings */
11178 uint16
11179 dhd_get_max_flow_rings(dhd_pub_t *dhd)
11180 {
11181 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
11182 		return dhd->bus->max_tx_flowrings;
11183 	else
11184 		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
11185 }
11186 
11187 /**
11188  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
11189  *
11190  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
11191  * Dongle includes common rings when it advertizes the number of H2D rings.
11192  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
11193  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
11194  *
11195  * dhd_prot_ring_attach is invoked to perform the actual initialization and
11196  * attaching the DMA-able buffer.
11197  *
11198  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
11199  * initialized msgbuf_ring_t object.
11200  *
11201  * returns BCME_OK=0 on success
11202  * returns non-zero negative error value on failure.
11203  */
11204 static int
11205 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
11206 {
11207 	uint16 flowid;
11208 	msgbuf_ring_t *ring;
11209 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11210 	dhd_prot_t *prot = dhd->prot;
11211 	char ring_name[RING_NAME_MAX_LENGTH];
11212 
11213 	if (prot->h2d_flowrings_pool != NULL)
11214 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
11215 
11216 	ASSERT(prot->h2d_rings_total == 0);
11217 
11218 	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
11219 	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
11220 
11221 	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
11222 		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
11223 			__FUNCTION__, prot->h2d_rings_total));
11224 		return BCME_ERROR;
11225 	}
11226 
11227 	/* Subtract number of H2D common rings, to determine number of flowrings */
11228 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11229 
11230 	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
11231 
11232 	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
11233 	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
11234 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11235 
11236 	if (prot->h2d_flowrings_pool == NULL) {
11237 		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
11238 			__FUNCTION__, h2d_flowrings_total));
11239 		goto fail;
11240 	}
11241 
11242 	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
11243 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11244 		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
11245 		/* For HTPUT case max_items will be changed inside dhd_prot_ring_attach */
11246 		if (dhd_prot_ring_attach(dhd, ring, ring_name,
11247 		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
11248 		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
11249 			goto attach_fail;
11250 		}
11251 	}
11252 
11253 	return BCME_OK;
11254 
11255 attach_fail:
11256 	/* XXX: On a per project basis, one may decide whether to continue with
11257 	 * "fewer" flowrings, and what value of fewer suffices.
11258 	 */
11259 	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
11260 
11261 fail:
11262 	prot->h2d_rings_total = 0;
11263 	return BCME_NOMEM;
11264 
11265 } /* dhd_prot_flowrings_pool_attach */
11266 
11267 /**
11268  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
11269  * Invokes dhd_prot_ring_reset to perform the actual reset.
11270  *
11271  * The DMA-able buffer is not freed during reset and neither is the flowring
11272  * pool freed.
11273  *
11274  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
11275  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
11276  * from a previous flowring pool instantiation will be reused.
11277  *
11278  * This will avoid a fragmented DMA-able memory condition, if multiple
11279  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
11280  * cycle.
11281  */
11282 static void
11283 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
11284 {
11285 	uint16 flowid, h2d_flowrings_total;
11286 	msgbuf_ring_t *ring;
11287 	dhd_prot_t *prot = dhd->prot;
11288 
11289 	if (prot->h2d_flowrings_pool == NULL) {
11290 		ASSERT(prot->h2d_rings_total == 0);
11291 		return;
11292 	}
11293 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11294 	/* Reset each flowring in the flowring pool */
11295 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11296 		dhd_prot_ring_reset(dhd, ring);
11297 		ring->inited = FALSE;
11298 	}
11299 
11300 	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
11301 }
11302 
11303 /**
11304  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
11305  * DMA-able buffers for flowrings.
11306  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
11307  * de-initialization of each msgbuf_ring_t.
11308  */
11309 static void
11310 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
11311 {
11312 	int flowid;
11313 	msgbuf_ring_t *ring;
11314 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11315 	dhd_prot_t *prot = dhd->prot;
11316 
11317 	if (prot->h2d_flowrings_pool == NULL) {
11318 		ASSERT(prot->h2d_rings_total == 0);
11319 		return;
11320 	}
11321 
11322 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11323 	/* Detach the DMA-able buffer for each flowring in the flowring pool */
11324 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11325 		dhd_prot_ring_detach(dhd, ring);
11326 	}
11327 
11328 	MFREE(prot->osh, prot->h2d_flowrings_pool,
11329 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11330 
11331 	prot->h2d_rings_total = 0;
11332 
11333 } /* dhd_prot_flowrings_pool_detach */
11334 
11335 /**
11336  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
11337  * msgbuf_ring from the flowring pool, and assign it.
11338  *
11339  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
11340  * ring information to the dongle, a flowring's information is passed via a
11341  * flowring create control message.
11342  *
11343  * Only the ring state (WR, RD) index are initialized.
11344  */
11345 static msgbuf_ring_t *
11346 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
11347 {
11348 	msgbuf_ring_t *ring;
11349 	dhd_prot_t *prot = dhd->prot;
11350 
11351 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11352 	ASSERT(flowid < prot->h2d_rings_total);
11353 	ASSERT(prot->h2d_flowrings_pool != NULL);
11354 
11355 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11356 
11357 	/* ASSERT flow_ring->inited == FALSE */
11358 
11359 	ring->wr = 0;
11360 	ring->rd = 0;
11361 	ring->curr_rd = 0;
11362 	ring->inited = TRUE;
11363 	/**
11364 	 * Every time a flowring starts dynamically, initialize current_phase with 0
11365 	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
11366 	 */
11367 	ring->current_phase = 0;
11368 	return ring;
11369 }
11370 
11371 /**
11372  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
11373  * msgbuf_ring back to the flow_ring pool.
11374  */
11375 void
11376 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
11377 {
11378 	msgbuf_ring_t *ring;
11379 	dhd_prot_t *prot = dhd->prot;
11380 
11381 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11382 	ASSERT(flowid < prot->h2d_rings_total);
11383 	ASSERT(prot->h2d_flowrings_pool != NULL);
11384 
11385 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11386 
11387 	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
11388 	/* ASSERT flow_ring->inited == TRUE */
11389 
11390 	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11391 
11392 	ring->wr = 0;
11393 	ring->rd = 0;
11394 	ring->inited = FALSE;
11395 
11396 	ring->curr_rd = 0;
11397 }
11398 
11399 #ifdef AGG_H2D_DB
11400 void
11401 dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flowid)
11402 {
11403 	dhd_prot_t *prot = dhd->prot;
11404 	msgbuf_ring_t *ring;
11405 	uint16 inflight;
11406 	bool db_req = FALSE;
11407 	bool flush;
11408 
11409 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11410 	flush = !!ring->pend_items_count;
11411 	dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
11412 
11413 	inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight);
11414 	if (flush && inflight) {
11415 		if (inflight <= agg_h2d_db_inflight_thresh) {
11416 			db_req = TRUE;
11417 		}
11418 		dhd_agg_inflights_stats_update(dhd, inflight);
11419 		dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, db_req);
11420 	}
11421 }
11422 #endif /* AGG_H2D_DB */
11423 
11424 /* Assumes only one index is updated at a time */
11425 /* FIXME Need to fix it */
11426 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
11427 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
11428 /* If exactly_nitems is false, this function will allocate space for nitems or less */
11429 static void *
11430 BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
11431 	bool exactly_nitems)
11432 {
11433 	void *ret_ptr = NULL;
11434 	uint16 ring_avail_cnt;
11435 
11436 	ASSERT(nitems <= ring->max_items);
11437 
11438 	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
11439 
11440 	if ((ring_avail_cnt == 0) ||
11441 	       (exactly_nitems && (ring_avail_cnt < nitems) &&
11442 	       ((ring->max_items - ring->wr) >= nitems))) {
11443 		DHD_MSGBUF_INFO(("Space not available: ring %s items %d write %d read %d\n",
11444 			ring->name, nitems, ring->wr, ring->rd));
11445 		return NULL;
11446 	}
11447 	*alloced = MIN(nitems, ring_avail_cnt);
11448 
11449 	/* Return next available space */
11450 	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
11451 
11452 	/* Update write index */
11453 	if ((ring->wr + *alloced) == ring->max_items)
11454 		ring->wr = 0;
11455 	else if ((ring->wr + *alloced) < ring->max_items)
11456 		ring->wr += *alloced;
11457 	else {
11458 		/* Should never hit this */
11459 		ASSERT(0);
11460 		return NULL;
11461 	}
11462 
11463 	return ret_ptr;
11464 } /* dhd_prot_get_ring_space */
11465 
11466 #ifdef AGG_H2D_DB
11467 
11468 static void
11469 dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11470 		uint16 nitems)
11471 {
11472 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11473 	unsigned long flags_bus;
11474 
11475 #ifdef DHD_FAKE_TX_STATUS
11476 	/* if fake tx status is enabled, we should not update
11477 	 * dongle side rd/wr index for the tx flowring
11478 	 * and also should not ring the doorbell
11479 	 */
11480 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11481 		return;
11482 	}
11483 #endif /* DHD_FAKE_TX_STATUS */
11484 
11485 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11486 
11487 	/* cache flush */
11488 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11489 
11490 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11491 			dhd_prot_dma_indx_set(dhd, ring->wr,
11492 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11493 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11494 			dhd_prot_dma_indx_set(dhd, ring->wr,
11495 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11496 	} else {
11497 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11498 				sizeof(uint16), RING_WR_UPD, ring->idx);
11499 	}
11500 
11501 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11502 }
11503 
11504 static void
11505 dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db)
11506 {
11507 	dhd_prot_t *prot = dhd->prot;
11508 	flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
11509 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
11510 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
11511 	uint32 db_index;
11512 	uint corerev;
11513 
11514 	if (ring_db == TRUE) {
11515 		dhd_msgbuf_agg_h2d_db_timer_cancel(dhd);
11516 		prot->agg_h2d_db_info.direct_db_cnt++;
11517 		/* raise h2d interrupt */
11518 		if (IDMA_ACTIVE(dhd) || (IFRM_ACTIVE(dhd))) {
11519 			db_index = IDMA_IDX0;
11520 			/* this api is called in wl down path..in that case sih is freed already */
11521 			if (dhd->bus->sih) {
11522 				corerev = dhd->bus->sih->buscorerev;
11523 				/* We need to explictly configure the type of DMA for
11524 				 * core rev >= 24
11525 				 */
11526 				if (corerev >= 24) {
11527 					db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11528 				}
11529 			}
11530 			prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11531 		} else {
11532 			prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11533 		}
11534 	} else {
11535 		dhd_msgbuf_agg_h2d_db_timer_start(prot);
11536 	}
11537 }
11538 
11539 #endif /* AGG_H2D_DB */
11540 
11541 /**
11542  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
11543  * new messages in a H2D ring. The messages are flushed from cache prior to
11544  * posting the new WR index. The new WR index will be updated in the DMA index
11545  * array or directly in the dongle's ring state memory.
11546  * A PCIE doorbell will be generated to wake up the dongle.
11547  * This is a non-atomic function, make sure the callers
11548  * always hold appropriate locks.
11549  */
11550 static void
11551 BCMFASTPATH(__dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11552 	uint16 nitems)
11553 {
11554 	dhd_prot_t *prot = dhd->prot;
11555 	uint32 db_index;
11556 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11557 	uint corerev;
11558 
11559 	/* cache flush */
11560 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11561 
11562 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11563 			dhd_prot_dma_indx_set(dhd, ring->wr,
11564 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11565 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11566 			dhd_prot_dma_indx_set(dhd, ring->wr,
11567 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11568 	} else {
11569 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11570 				sizeof(uint16), RING_WR_UPD, ring->idx);
11571 	}
11572 
11573 	/* raise h2d interrupt */
11574 	if (IDMA_ACTIVE(dhd) ||
11575 		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
11576 		db_index = IDMA_IDX0;
11577 		/* this api is called in wl down path..in that case sih is freed already */
11578 		if (dhd->bus->sih) {
11579 			corerev = dhd->bus->sih->buscorerev;
11580 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11581 			if (corerev >= 24) {
11582 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11583 			}
11584 		}
11585 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11586 	} else {
11587 		prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11588 	}
11589 }
11590 
11591 static void
11592 BCMFASTPATH(dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11593 	uint16 nitems)
11594 {
11595 	unsigned long flags_bus;
11596 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11597 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11598 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11599 }
11600 
11601 static void
11602 BCMFASTPATH(dhd_prot_ring_doorbell)(dhd_pub_t *dhd, uint32 value)
11603 {
11604 	unsigned long flags_bus;
11605 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11606 	dhd->prot->mb_ring_fn(dhd->bus, value);
11607 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11608 }
11609 
11610 /**
11611  * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
11612  * which will hold DHD_BUS_LP_STATE_LOCK to update WR pointer, Ring DB and also update
11613  * bus_low_power_state to indicate D3_INFORM sent in the same BUS_LP_STATE_LOCK.
11614  */
11615 static void
11616 BCMFASTPATH(dhd_prot_ring_write_complete_mbdata)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
11617 	uint16 nitems, uint32 mb_data)
11618 {
11619 	unsigned long flags_bus;
11620 
11621 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11622 
11623 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11624 
11625 	/* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
11626 	if (mb_data == H2D_HOST_D3_INFORM) {
11627 		__DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus);
11628 	}
11629 
11630 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11631 }
11632 
11633 /**
11634  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
11635  * from a D2H ring. The new RD index will be updated in the DMA Index array or
11636  * directly in dongle's ring state memory.
11637  */
11638 static void
11639 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
11640 {
11641 	dhd_prot_t *prot = dhd->prot;
11642 	uint32 db_index;
11643 	uint corerev;
11644 
11645 	/* update read index */
11646 	/* If dma'ing h2d indices supported
11647 	 * update the r -indices in the
11648 	 * host memory o/w in TCM
11649 	 */
11650 	if (IDMA_ACTIVE(dhd)) {
11651 		dhd_prot_dma_indx_set(dhd, ring->rd,
11652 			D2H_DMA_INDX_RD_UPD, ring->idx);
11653 		db_index = IDMA_IDX1;
11654 		if (dhd->bus->sih) {
11655 			corerev = dhd->bus->sih->buscorerev;
11656 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11657 			if (corerev >= 24) {
11658 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11659 			}
11660 		}
11661 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
11662 	} else if (dhd->dma_h2d_ring_upd_support) {
11663 		dhd_prot_dma_indx_set(dhd, ring->rd,
11664 		                      D2H_DMA_INDX_RD_UPD, ring->idx);
11665 	} else {
11666 		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11667 			sizeof(uint16), RING_RD_UPD, ring->idx);
11668 	}
11669 }
11670 
11671 static int
11672 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
11673 	uint16 ring_type, uint32 req_id)
11674 {
11675 	unsigned long flags;
11676 	d2h_ring_create_req_t  *d2h_ring;
11677 	uint16 alloced = 0;
11678 	int ret = BCME_OK;
11679 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11680 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11681 
11682 #ifdef PCIE_INB_DW
11683 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11684 		return BCME_ERROR;
11685 #endif /* PCIE_INB_DW */
11686 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11687 
11688 	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
11689 
11690 	if (ring_to_create == NULL) {
11691 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11692 		ret = BCME_ERROR;
11693 		goto err;
11694 	}
11695 
11696 	/* Request for ring buffer space */
11697 	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
11698 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11699 		&alloced, FALSE);
11700 
11701 	if (d2h_ring == NULL) {
11702 		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
11703 			__FUNCTION__));
11704 		ret = BCME_NOMEM;
11705 		goto err;
11706 	}
11707 	ring_to_create->create_req_id = (uint16)req_id;
11708 	ring_to_create->create_pending = TRUE;
11709 
11710 	/* Common msg buf hdr */
11711 	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
11712 	d2h_ring->msg.if_id = 0;
11713 	d2h_ring->msg.flags = ctrl_ring->current_phase;
11714 	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11715 	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
11716 	DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
11717 			ring_to_create->idx, max_h2d_rings));
11718 
11719 	d2h_ring->ring_type = ring_type;
11720 	d2h_ring->max_items = htol16(ring_to_create->max_items);
11721 	d2h_ring->len_item = htol16(ring_to_create->item_len);
11722 	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11723 	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11724 
11725 	d2h_ring->flags = 0;
11726 	d2h_ring->msg.epoch =
11727 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11728 	ctrl_ring->seqnum++;
11729 
11730 #ifdef EWP_EDL
11731 	if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
11732 		DHD_ERROR(("%s: sending d2h EDL ring create: "
11733 			"\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
11734 			__FUNCTION__, ltoh16(d2h_ring->max_items),
11735 			ltoh16(d2h_ring->len_item),
11736 			ltoh16(d2h_ring->ring_id),
11737 			d2h_ring->ring_ptr.low_addr,
11738 			d2h_ring->ring_ptr.high_addr));
11739 	}
11740 #endif /* EWP_EDL */
11741 
11742 	/* Update the flow_ring's WRITE index */
11743 	dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
11744 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11745 
11746 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11747 
11748 #ifdef PCIE_INB_DW
11749 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11750 #endif
11751 
11752 	return ret;
11753 err:
11754 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11755 
11756 #ifdef PCIE_INB_DW
11757 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11758 #endif
11759 	return ret;
11760 }
11761 
11762 static int
11763 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
11764 {
11765 	unsigned long flags;
11766 	h2d_ring_create_req_t  *h2d_ring;
11767 	uint16 alloced = 0;
11768 	uint8 i = 0;
11769 	int ret = BCME_OK;
11770 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11771 
11772 #ifdef PCIE_INB_DW
11773 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11774 		return BCME_ERROR;
11775 #endif /* PCIE_INB_DW */
11776 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11777 
11778 	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
11779 
11780 	if (ring_to_create == NULL) {
11781 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11782 		ret = BCME_ERROR;
11783 		goto err;
11784 	}
11785 
11786 	/* Request for ring buffer space */
11787 	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
11788 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11789 		&alloced, FALSE);
11790 
11791 	if (h2d_ring == NULL) {
11792 		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
11793 			__FUNCTION__));
11794 		ret = BCME_NOMEM;
11795 		goto err;
11796 	}
11797 	ring_to_create->create_req_id = (uint16)id;
11798 	ring_to_create->create_pending = TRUE;
11799 
11800 	/* Common msg buf hdr */
11801 	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
11802 	h2d_ring->msg.if_id = 0;
11803 	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11804 	h2d_ring->msg.flags = ctrl_ring->current_phase;
11805 	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
11806 	h2d_ring->ring_type = ring_type;
11807 	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
11808 	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
11809 	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
11810 	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11811 	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11812 
11813 	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
11814 		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
11815 	}
11816 
11817 	h2d_ring->flags = 0;
11818 	h2d_ring->msg.epoch =
11819 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11820 	ctrl_ring->seqnum++;
11821 
11822 	/* Update the flow_ring's WRITE index */
11823 	dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
11824 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11825 
11826 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11827 
11828 #ifdef PCIE_INB_DW
11829 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11830 #endif
11831 	return ret;
11832 err:
11833 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11834 
11835 #ifdef PCIE_INB_DW
11836 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11837 #endif
11838 	return ret;
11839 }
11840 
11841 /**
11842  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
11843  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
11844  * See dhd_prot_dma_indx_init()
11845  */
11846 void
11847 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
11848 {
11849 	uint8 *ptr;
11850 	uint16 offset;
11851 	dhd_prot_t *prot = dhd->prot;
11852 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11853 
11854 	switch (type) {
11855 		case H2D_DMA_INDX_WR_UPD:
11856 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11857 			offset = DHD_H2D_RING_OFFSET(ringid);
11858 			break;
11859 
11860 		case D2H_DMA_INDX_RD_UPD:
11861 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11862 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11863 			break;
11864 
11865 		case H2D_IFRM_INDX_WR_UPD:
11866 			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
11867 			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
11868 			break;
11869 
11870 		default:
11871 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11872 				__FUNCTION__));
11873 			return;
11874 	}
11875 
11876 	ASSERT(prot->rw_index_sz != 0);
11877 	ptr += offset * prot->rw_index_sz;
11878 
11879 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11880 	*(uint16*)ptr = htol16(new_index);
11881 
11882 	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
11883 
11884 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11885 		__FUNCTION__, new_index, type, ringid, ptr, offset));
11886 
11887 } /* dhd_prot_dma_indx_set */
11888 
11889 /**
11890  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
11891  * array.
11892  * Dongle DMAes an entire array to host memory (if the feature is enabled).
11893  * See dhd_prot_dma_indx_init()
11894  */
11895 static uint16
11896 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
11897 {
11898 	uint8 *ptr;
11899 	uint16 data;
11900 	uint16 offset;
11901 	dhd_prot_t *prot = dhd->prot;
11902 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11903 
11904 	switch (type) {
11905 		case H2D_DMA_INDX_WR_UPD:
11906 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11907 			offset = DHD_H2D_RING_OFFSET(ringid);
11908 			break;
11909 
11910 		case H2D_DMA_INDX_RD_UPD:
11911 #ifdef DHD_DMA_INDICES_SEQNUM
11912 			if (prot->h2d_dma_indx_rd_copy_buf) {
11913 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf);
11914 			} else
11915 #endif /* DHD_DMA_INDICES_SEQNUM */
11916 			{
11917 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
11918 			}
11919 			offset = DHD_H2D_RING_OFFSET(ringid);
11920 			break;
11921 
11922 		case D2H_DMA_INDX_WR_UPD:
11923 #ifdef DHD_DMA_INDICES_SEQNUM
11924 			if (prot->d2h_dma_indx_wr_copy_buf) {
11925 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf);
11926 			} else
11927 #endif /* DHD_DMA_INDICES_SEQNUM */
11928 			{
11929 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
11930 			}
11931 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11932 			break;
11933 
11934 		case D2H_DMA_INDX_RD_UPD:
11935 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11936 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11937 			break;
11938 
11939 		default:
11940 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11941 				__FUNCTION__));
11942 			return 0;
11943 	}
11944 
11945 	ASSERT(prot->rw_index_sz != 0);
11946 	ptr += offset * prot->rw_index_sz;
11947 
11948 	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
11949 
11950 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11951 	data = LTOH16(*((uint16*)ptr));
11952 
11953 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11954 		__FUNCTION__, data, type, ringid, ptr, offset));
11955 
11956 	return (data);
11957 
11958 } /* dhd_prot_dma_indx_get */
11959 
11960 #ifdef DHD_DMA_INDICES_SEQNUM
11961 void
11962 dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num)
11963 {
11964 	uint8 *ptr;
11965 	dhd_prot_t *prot = dhd->prot;
11966 
11967 	/* Update host sequence number in first four bytes of scratchbuf */
11968 	ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11969 	*(uint32*)ptr = htol32(seq_num);
11970 	OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len);
11971 
11972 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, seq_num, ptr));
11973 
11974 } /* dhd_prot_dma_indx_set */
11975 
11976 uint32
11977 dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host)
11978 {
11979 	uint8 *ptr;
11980 	dhd_prot_t *prot = dhd->prot;
11981 	uint32 data;
11982 
11983 	OSL_CACHE_INV((void *)ptr, d2h_dma_scratch_buf.len);
11984 
11985 	/* First four bytes of scratchbuf contains the host sequence number.
11986 	 * Next four bytes of scratchbuf contains the Dongle sequence number.
11987 	 */
11988 	if (host) {
11989 		ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11990 		data = LTOH32(*((uint32*)ptr));
11991 	} else {
11992 		ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32));
11993 		data = LTOH32(*((uint32*)ptr));
11994 	}
11995 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, data, ptr));
11996 	return data;
11997 } /* dhd_prot_dma_indx_set */
11998 
11999 void
12000 dhd_prot_save_dmaidx(dhd_pub_t *dhd)
12001 {
12002 	dhd_prot_t *prot = dhd->prot;
12003 	uint32 dngl_seqnum;
12004 
12005 	dngl_seqnum = dhd_prot_read_seqnum(dhd, FALSE);
12006 
12007 	DHD_TRACE(("%s: host_seqnum %u dngl_seqnum %u\n", __FUNCTION__,
12008 			prot->host_seqnum, dngl_seqnum));
12009 	if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) {
12010 		if (prot->host_seqnum == dngl_seqnum) {
12011 			memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz,
12012 				prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz);
12013 			memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz,
12014 				prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz);
12015 			dhd_prot_write_host_seqnum(dhd, prot->host_seqnum);
12016 			/* Ring DoorBell */
12017 			dhd_prot_ring_doorbell(dhd, DHD_DMA_INDX_SEQ_H2D_DB_MAGIC);
12018 			prot->host_seqnum++;
12019 			prot->host_seqnum %= D2H_EPOCH_MODULO;
12020 		}
12021 	}
12022 }
12023 
12024 int
12025 dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, uint8 type)
12026 {
12027 	dhd_prot_t *prot = dhd->prot;
12028 
12029 	switch (type) {
12030 		case D2H_DMA_INDX_WR_BUF:
12031 			prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12032 			if (prot->d2h_dma_indx_wr_copy_buf == NULL) {
12033 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12034 					__FUNCTION__, buf_sz));
12035 				goto ret_no_mem;
12036 			}
12037 			prot->d2h_dma_indx_wr_copy_bufsz = buf_sz;
12038 		break;
12039 
12040 		case H2D_DMA_INDX_RD_BUF:
12041 			prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12042 			if (prot->h2d_dma_indx_rd_copy_buf == NULL) {
12043 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12044 					__FUNCTION__, buf_sz));
12045 				goto ret_no_mem;
12046 			}
12047 			prot->h2d_dma_indx_rd_copy_bufsz = buf_sz;
12048 			break;
12049 
12050 		default:
12051 			break;
12052 	}
12053 	return BCME_OK;
12054 ret_no_mem:
12055 	return BCME_NOMEM;
12056 
12057 }
12058 #endif /* DHD_DMA_INDICES_SEQNUM */
12059 
12060 /**
12061  * An array of DMA read/write indices, containing information about host rings, can be maintained
12062  * either in host memory or in device memory, dependent on preprocessor options. This function is,
12063  * dependent on these options, called during driver initialization. It reserves and initializes
12064  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
12065  * address of these host memory blocks are communicated to the dongle later on. By reading this host
12066  * memory, the dongle learns about the state of the host rings.
12067  */
12068 
12069 static INLINE int
12070 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
12071 	dhd_dma_buf_t *dma_buf, uint32 bufsz)
12072 {
12073 	int rc;
12074 
12075 	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
12076 		return BCME_OK;
12077 
12078 	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
12079 
12080 	return rc;
12081 }
12082 
12083 int
12084 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
12085 {
12086 	uint32 bufsz;
12087 	dhd_prot_t *prot = dhd->prot;
12088 	dhd_dma_buf_t *dma_buf;
12089 
12090 	if (prot == NULL) {
12091 		DHD_ERROR(("prot is not inited\n"));
12092 		return BCME_ERROR;
12093 	}
12094 
12095 	/* Dongle advertizes 2B or 4B RW index size */
12096 	ASSERT(rw_index_sz != 0);
12097 	prot->rw_index_sz = rw_index_sz;
12098 
12099 	bufsz = rw_index_sz * length;
12100 
12101 	switch (type) {
12102 		case H2D_DMA_INDX_WR_BUF:
12103 			dma_buf = &prot->h2d_dma_indx_wr_buf;
12104 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12105 				goto ret_no_mem;
12106 			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
12107 				dma_buf->len, rw_index_sz, length));
12108 			break;
12109 
12110 		case H2D_DMA_INDX_RD_BUF:
12111 			dma_buf = &prot->h2d_dma_indx_rd_buf;
12112 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12113 				goto ret_no_mem;
12114 			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
12115 				dma_buf->len, rw_index_sz, length));
12116 			break;
12117 
12118 		case D2H_DMA_INDX_WR_BUF:
12119 			dma_buf = &prot->d2h_dma_indx_wr_buf;
12120 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12121 				goto ret_no_mem;
12122 			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
12123 				dma_buf->len, rw_index_sz, length));
12124 			break;
12125 
12126 		case D2H_DMA_INDX_RD_BUF:
12127 			dma_buf = &prot->d2h_dma_indx_rd_buf;
12128 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12129 				goto ret_no_mem;
12130 			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
12131 				dma_buf->len, rw_index_sz, length));
12132 			break;
12133 
12134 		case H2D_IFRM_INDX_WR_BUF:
12135 			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
12136 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12137 				goto ret_no_mem;
12138 			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
12139 				dma_buf->len, rw_index_sz, length));
12140 			break;
12141 
12142 		default:
12143 			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
12144 			return BCME_BADOPTION;
12145 	}
12146 
12147 	return BCME_OK;
12148 
12149 ret_no_mem:
12150 	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
12151 		__FUNCTION__, type, bufsz));
12152 	return BCME_NOMEM;
12153 
12154 } /* dhd_prot_dma_indx_init */
12155 
12156 /**
12157  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
12158  * from, or NULL if there are no more messages to read.
12159  */
12160 static uint8*
12161 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
12162 {
12163 	uint16 wr;
12164 	uint16 rd;
12165 	uint16 depth;
12166 	uint16 items;
12167 	void  *read_addr = NULL; /* address of next msg to be read in ring */
12168 	uint16 d2h_wr = 0;
12169 
12170 	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
12171 		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
12172 		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
12173 
12174 	/* Remember the read index in a variable.
12175 	 * This is becuase ring->rd gets updated in the end of this function
12176 	 * So if we have to print the exact read index from which the
12177 	 * message is read its not possible.
12178 	 */
12179 	ring->curr_rd = ring->rd;
12180 
12181 	/* update write pointer */
12182 	if (dhd->dma_d2h_ring_upd_support) {
12183 		/* DMAing write/read indices supported */
12184 		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
12185 		ring->wr = d2h_wr;
12186 	} else {
12187 		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
12188 	}
12189 
12190 	wr = ring->wr;
12191 	rd = ring->rd;
12192 	depth = ring->max_items;
12193 
12194 	/* check for avail space, in number of ring items */
12195 	items = READ_AVAIL_SPACE(wr, rd, depth);
12196 	if (items == 0)
12197 		return NULL;
12198 
12199 	/*
12200 	 * Note that there are builds where Assert translates to just printk
12201 	 * so, even if we had hit this condition we would never halt. Now
12202 	 * dhd_prot_process_msgtype can get into an big loop if this
12203 	 * happens.
12204 	 */
12205 	if (items > ring->max_items) {
12206 		DHD_ERROR(("\r\n======================= \r\n"));
12207 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
12208 			__FUNCTION__, ring, ring->name, ring->max_items, items));
12209 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
12210 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
12211 			dhd->busstate, dhd->bus->wait_for_d3_ack));
12212 		DHD_ERROR(("\r\n======================= \r\n"));
12213 #ifdef SUPPORT_LINKDOWN_RECOVERY
12214 		if (wr >= ring->max_items) {
12215 			dhd->bus->read_shm_fail = TRUE;
12216 		}
12217 #else
12218 #ifdef DHD_FW_COREDUMP
12219 		if (dhd->memdump_enabled) {
12220 			/* collect core dump */
12221 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
12222 			dhd_bus_mem_dump(dhd);
12223 
12224 		}
12225 #endif /* DHD_FW_COREDUMP */
12226 #endif /* SUPPORT_LINKDOWN_RECOVERY */
12227 
12228 		*available_len = 0;
12229 		dhd_schedule_reset(dhd);
12230 
12231 		return NULL;
12232 	}
12233 
12234 	/* if space is available, calculate address to be read */
12235 	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
12236 
12237 	/* update read pointer */
12238 	if ((ring->rd + items) >= ring->max_items)
12239 		ring->rd = 0;
12240 	else
12241 		ring->rd += items;
12242 
12243 	ASSERT(ring->rd < ring->max_items);
12244 
12245 	/* convert items to bytes : available_len must be 32bits */
12246 	*available_len = (uint32)(items * ring->item_len);
12247 
12248 	/* XXX Double cache invalidate for ARM with L2 cache/prefetch */
12249 	OSL_CACHE_INV(read_addr, *available_len);
12250 
12251 	/* return read address */
12252 	return read_addr;
12253 
12254 } /* dhd_prot_get_read_addr */
12255 
12256 /**
12257  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
12258  * make sure the callers always hold appropriate locks.
12259  */
12260 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
12261 {
12262 	h2d_mailbox_data_t *h2d_mb_data;
12263 	uint16 alloced = 0;
12264 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
12265 	unsigned long flags;
12266 	int num_post = 1;
12267 	int i;
12268 
12269 	DHD_MSGBUF_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
12270 		__FUNCTION__, mb_data));
12271 	if (!ctrl_ring->inited) {
12272 		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
12273 		return BCME_ERROR;
12274 	}
12275 
12276 #ifdef PCIE_INB_DW
12277 	if ((INBAND_DW_ENAB(dhd->bus)) &&
12278 		(dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
12279 			DW_DEVICE_DS_DEV_SLEEP)) {
12280 		if (mb_data == H2D_HOST_CONS_INT) {
12281 			/* One additional device_wake post needed */
12282 			num_post = 2;
12283 		}
12284 	}
12285 #endif /* PCIE_INB_DW */
12286 
12287 	for (i = 0; i < num_post; i ++) {
12288 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12289 		/* Request for ring buffer space */
12290 		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
12291 			ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
12292 			&alloced, FALSE);
12293 
12294 		if (h2d_mb_data == NULL) {
12295 			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
12296 				__FUNCTION__));
12297 			DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12298 			return BCME_NOMEM;
12299 		}
12300 
12301 		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
12302 		/* Common msg buf hdr */
12303 		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
12304 		h2d_mb_data->msg.flags = ctrl_ring->current_phase;
12305 
12306 		h2d_mb_data->msg.epoch =
12307 			ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12308 		ctrl_ring->seqnum++;
12309 
12310 		/* Update flow create message */
12311 		h2d_mb_data->mail_box_data = htol32(mb_data);
12312 #ifdef PCIE_INB_DW
12313 		/* post device_wake first */
12314 		if ((num_post == 2) && (i == 0)) {
12315 			h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
12316 		} else
12317 #endif /* PCIE_INB_DW */
12318 		{
12319 			h2d_mb_data->mail_box_data = htol32(mb_data);
12320 		}
12321 
12322 		DHD_MSGBUF_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
12323 
12324 		/* upd wrt ptr and raise interrupt */
12325 		dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
12326 			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
12327 
12328 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12329 
12330 #ifdef PCIE_INB_DW
12331 		/* Add a delay if device_wake is posted */
12332 		if ((num_post == 2) && (i == 0)) {
12333 			OSL_DELAY(1000);
12334 		}
12335 #endif /* PCIE_INB_DW */
12336 	}
12337 	return 0;
12338 }
12339 
12340 /** Creates a flow ring and informs dongle of this event */
12341 int
12342 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12343 {
12344 	tx_flowring_create_request_t *flow_create_rqst;
12345 	msgbuf_ring_t *flow_ring;
12346 	dhd_prot_t *prot = dhd->prot;
12347 	unsigned long flags;
12348 	uint16 alloced = 0;
12349 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
12350 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
12351 
12352 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
12353 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
12354 	if (flow_ring == NULL) {
12355 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
12356 			__FUNCTION__, flow_ring_node->flowid));
12357 		return BCME_NOMEM;
12358 	}
12359 
12360 #ifdef PCIE_INB_DW
12361 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12362 		return BCME_ERROR;
12363 #endif /* PCIE_INB_DW */
12364 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12365 
12366 	/* Request for ctrl_ring buffer space */
12367 	flow_create_rqst = (tx_flowring_create_request_t *)
12368 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
12369 
12370 	if (flow_create_rqst == NULL) {
12371 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
12372 		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
12373 			__FUNCTION__, flow_ring_node->flowid));
12374 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12375 #ifdef PCIE_INB_DW
12376 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12377 #endif
12378 		return BCME_NOMEM;
12379 	}
12380 
12381 	flow_ring_node->prot_info = (void *)flow_ring;
12382 
12383 	/* Common msg buf hdr */
12384 	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
12385 	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12386 	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
12387 	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
12388 
12389 	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12390 	ctrl_ring->seqnum++;
12391 
12392 	/* Update flow create message */
12393 	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
12394 	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12395 	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
12396 	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
12397 	/* CAUTION: ring::base_addr already in Little Endian */
12398 	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
12399 	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
12400 	flow_create_rqst->max_items = htol16(flow_ring->max_items);
12401 	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
12402 	flow_create_rqst->if_flags = 0;
12403 
12404 #ifdef DHD_HP2P
12405 	/* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
12406 	/* and traffic is not multicast */
12407 	/* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
12408 	if (dhd->hp2p_capable && dhd->hp2p_ring_more &&
12409 		flow_ring_node->flow_info.tid == HP2P_PRIO &&
12410 		(dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
12411 		!ETHER_ISMULTI(flow_create_rqst->da)) {
12412 		flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
12413 		flow_ring_node->hp2p_ring = TRUE;
12414 		/* Allow multiple HP2P Flow if mf override is enabled */
12415 		if (!dhd->hp2p_mf_enable) {
12416 			dhd->hp2p_ring_more = FALSE;
12417 		}
12418 
12419 		DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
12420 				__FUNCTION__, flow_ring_node->flow_info.tid,
12421 				flow_ring_node->flowid));
12422 	}
12423 #endif /* DHD_HP2P */
12424 
12425 	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
12426 	 * currently it is not used for priority. so uses solely for ifrm mask
12427 	 */
12428 	if (IFRM_ACTIVE(dhd))
12429 		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
12430 
12431 	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
12432 		" prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid,
12433 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
12434 		flow_ring_node->flow_info.ifindex, flow_ring->max_items));
12435 
12436 	/* Update the flow_ring's WRITE index */
12437 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
12438 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12439 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12440 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
12441 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12442 			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
12443 	} else {
12444 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
12445 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
12446 	}
12447 
12448 	/* update control subn ring's WR index and ring doorbell to dongle */
12449 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
12450 
12451 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12452 
12453 #ifdef PCIE_INB_DW
12454 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12455 #endif
12456 	return BCME_OK;
12457 } /* dhd_prot_flow_ring_create */
12458 
12459 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
12460 static void
12461 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
12462 {
12463 	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
12464 
12465 	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
12466 		ltoh16(flow_create_resp->cmplt.status),
12467 		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
12468 
12469 	dhd_bus_flow_ring_create_response(dhd->bus,
12470 		ltoh16(flow_create_resp->cmplt.flow_ring_id),
12471 		ltoh16(flow_create_resp->cmplt.status));
12472 }
12473 
12474 #if !defined(BCM_ROUTER_DHD)
12475 static void
12476 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
12477 {
12478 	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
12479 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12480 		ltoh16(resp->cmplt.status),
12481 		ltoh16(resp->cmplt.ring_id),
12482 		ltoh32(resp->cmn_hdr.request_id)));
12483 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
12484 		(ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
12485 		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
12486 		return;
12487 	}
12488 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12489 		!dhd->prot->h2dring_info_subn->create_pending) {
12490 		DHD_ERROR(("info ring create status for not pending submit ring\n"));
12491 	}
12492 #ifdef BTLOG
12493 	if (dhd->prot->h2dring_btlog_subn &&
12494 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12495 		!dhd->prot->h2dring_btlog_subn->create_pending) {
12496 		DHD_ERROR(("btlog ring create status for not pending submit ring\n"));
12497 	}
12498 #endif	/* BTLOG */
12499 
12500 	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12501 		DHD_ERROR(("info/btlog ring create failed with status %d\n",
12502 			ltoh16(resp->cmplt.status)));
12503 		return;
12504 	}
12505 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12506 		dhd->prot->h2dring_info_subn->create_pending = FALSE;
12507 		dhd->prot->h2dring_info_subn->inited = TRUE;
12508 		DHD_ERROR(("info buffer post after ring create\n"));
12509 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
12510 	}
12511 #ifdef BTLOG
12512 	if (dhd->prot->h2dring_btlog_subn &&
12513 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12514 		dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
12515 		dhd->prot->h2dring_btlog_subn->inited = TRUE;
12516 		DHD_ERROR(("btlog buffer post after ring create\n"));
12517 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
12518 	}
12519 #endif	/* BTLOG */
12520 }
12521 #endif /* !BCM_ROUTER_DHD */
12522 
12523 static void
12524 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
12525 {
12526 	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
12527 	DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12528 		ltoh16(resp->cmplt.status),
12529 		ltoh16(resp->cmplt.ring_id),
12530 		ltoh32(resp->cmn_hdr.request_id)));
12531 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
12532 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
12533 #ifdef DHD_HP2P
12534 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
12535 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
12536 #endif /* DHD_HP2P */
12537 		TRUE) {
12538 		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
12539 		return;
12540 	}
12541 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
12542 #ifdef EWP_EDL
12543 		if (!dhd->dongle_edl_support)
12544 #endif
12545 		{
12546 
12547 			if (!dhd->prot->d2hring_info_cpln->create_pending) {
12548 				DHD_ERROR(("info ring create status for not pending cpl ring\n"));
12549 				return;
12550 			}
12551 
12552 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12553 				DHD_ERROR(("info cpl ring create failed with status %d\n",
12554 					ltoh16(resp->cmplt.status)));
12555 				return;
12556 			}
12557 			dhd->prot->d2hring_info_cpln->create_pending = FALSE;
12558 			dhd->prot->d2hring_info_cpln->inited = TRUE;
12559 		}
12560 #ifdef EWP_EDL
12561 		else {
12562 			if (!dhd->prot->d2hring_edl->create_pending) {
12563 				DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
12564 				return;
12565 			}
12566 
12567 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12568 				DHD_ERROR(("edl cpl ring create failed with status %d\n",
12569 					ltoh16(resp->cmplt.status)));
12570 				return;
12571 			}
12572 			dhd->prot->d2hring_edl->create_pending = FALSE;
12573 			dhd->prot->d2hring_edl->inited = TRUE;
12574 		}
12575 #endif /* EWP_EDL */
12576 	}
12577 
12578 #ifdef BTLOG
12579 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) {
12580 		if (!dhd->prot->d2hring_btlog_cpln->create_pending) {
12581 			DHD_ERROR(("btlog ring create status for not pending cpl ring\n"));
12582 			return;
12583 		}
12584 
12585 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12586 			DHD_ERROR(("btlog cpl ring create failed with status %d\n",
12587 				ltoh16(resp->cmplt.status)));
12588 			return;
12589 		}
12590 		dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
12591 		dhd->prot->d2hring_btlog_cpln->inited = TRUE;
12592 	}
12593 #endif	/* BTLOG */
12594 #ifdef DHD_HP2P
12595 	if (dhd->prot->d2hring_hp2p_txcpl &&
12596 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
12597 		if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
12598 			DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
12599 			return;
12600 		}
12601 
12602 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12603 			DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
12604 				ltoh16(resp->cmplt.status)));
12605 			return;
12606 		}
12607 		dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
12608 		dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
12609 	}
12610 	if (dhd->prot->d2hring_hp2p_rxcpl &&
12611 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
12612 		if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
12613 			DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
12614 			return;
12615 		}
12616 
12617 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12618 			DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
12619 				ltoh16(resp->cmplt.status)));
12620 			return;
12621 		}
12622 		dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
12623 		dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
12624 	}
12625 #endif /* DHD_HP2P */
12626 }
12627 
12628 static void
12629 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
12630 {
12631 	d2h_mailbox_data_t *d2h_data;
12632 
12633 	d2h_data = (d2h_mailbox_data_t *)buf;
12634 	DHD_MSGBUF_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
12635 		d2h_data->d2h_mailbox_data));
12636 	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
12637 }
12638 
12639 static void
12640 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
12641 {
12642 #ifdef DHD_TIMESYNC
12643 	host_timestamp_msg_cpl_t  *host_ts_cpl;
12644 	uint32 pktid;
12645 	dhd_prot_t *prot = dhd->prot;
12646 
12647 	host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
12648 	DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
12649 		host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
12650 
12651 	pktid = ltoh32(host_ts_cpl->msg.request_id);
12652 	if (prot->hostts_req_buf_inuse == FALSE) {
12653 		DHD_ERROR(("No Pending Host TS req, but completion\n"));
12654 		return;
12655 	}
12656 	prot->hostts_req_buf_inuse = FALSE;
12657 	if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
12658 		DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
12659 			pktid, DHD_H2D_HOSTTS_REQ_PKTID));
12660 		return;
12661 	}
12662 	dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
12663 		host_ts_cpl->cmplt.status);
12664 #else /* DHD_TIMESYNC */
12665 	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
12666 #endif /* DHD_TIMESYNC */
12667 
12668 }
12669 
12670 /** called on e.g. flow ring delete */
12671 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
12672 {
12673 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12674 	dhd_prot_ring_detach(dhd, flow_ring);
12675 	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
12676 }
12677 
12678 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d,
12679 	struct bcmstrbuf *strbuf, const char * fmt)
12680 {
12681 	const char *default_fmt =
12682 		"TRD:%d HLRD:%d HDRD:%d TWR:%d HLWR:%d HDWR:%d  BASE(VA) %p BASE(PA) %x:%x SIZE %d "
12683 		"WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
12684 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12685 	uint16 rd, wr, drd = 0, dwr = 0;
12686 	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
12687 
12688 	if (fmt == NULL) {
12689 		fmt = default_fmt;
12690 	}
12691 
12692 	if (dhd->bus->is_linkdown) {
12693 		DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
12694 		return;
12695 	}
12696 
12697 	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
12698 	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
12699 	if (dhd->dma_d2h_ring_upd_support) {
12700 		if (h2d) {
12701 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx);
12702 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12703 		} else {
12704 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
12705 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
12706 		}
12707 	}
12708 	bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr,
12709 		flow_ring->dma_buf.va,
12710 		ltoh32(flow_ring->base_addr.high_addr),
12711 		ltoh32(flow_ring->base_addr.low_addr),
12712 		flow_ring->item_len, flow_ring->max_items,
12713 		dma_buf_len);
12714 }
12715 
12716 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
12717 {
12718 	dhd_prot_t *prot = dhd->prot;
12719 	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
12720 		dhd->prot->device_ipc_version,
12721 		dhd->prot->host_ipc_version,
12722 		dhd->prot->active_ipc_version);
12723 
12724 	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
12725 		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
12726 	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
12727 		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
12728 #ifdef BTLOG
12729 	bcm_bprintf(strbuf, "max BTLOG bufs to post: %d, \t posted %d \n",
12730 		dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost);
12731 #endif	/* BTLOG */
12732 	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
12733 		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
12734 	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
12735 		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
12736 	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
12737 		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
12738 
12739 	bcm_bprintf(strbuf, "Total RX bufs posted: %d, \t RX cpl got %d \n",
12740 		dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl);
12741 
12742 	bcm_bprintf(strbuf, "Total TX packets: %lu, \t TX cpl got %lu \n",
12743 		dhd->actual_tx_pkts, dhd->tot_txcpl);
12744 
12745 	bcm_bprintf(strbuf,
12746 		"%14s %18s %18s %17s %17s %14s %14s %10s\n",
12747 		"Type", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
12748 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
12749 	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
12750 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf,
12751 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12752 	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
12753 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf,
12754 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12755 	bcm_bprintf(strbuf, "%14s", "H2DRxPost");
12756 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf,
12757 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12758 	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
12759 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf,
12760 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12761 	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
12762 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf,
12763 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12764 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
12765 		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
12766 		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf,
12767 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12768 		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
12769 		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf,
12770 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12771 	}
12772 	if (dhd->prot->d2hring_edl != NULL) {
12773 		bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
12774 		dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf,
12775 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12776 	}
12777 
12778 	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
12779 		OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
12780 		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
12781 		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
12782 		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
12783 
12784 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
12785 	dhd_prot_ioctl_dump(dhd->prot, strbuf);
12786 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
12787 #ifdef DHD_MMIO_TRACE
12788 	dhd_dump_bus_mmio_trace(dhd->bus, strbuf);
12789 #endif /* DHD_MMIO_TRACE */
12790 	dhd_dump_bus_ds_trace(dhd->bus, strbuf);
12791 #ifdef DHD_FLOW_RING_STATUS_TRACE
12792 	dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf);
12793 	dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf);
12794 #endif /* DHD_FLOW_RING_STATUS_TRACE */
12795 }
12796 
12797 int
12798 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12799 {
12800 	tx_flowring_delete_request_t *flow_delete_rqst;
12801 	dhd_prot_t *prot = dhd->prot;
12802 	unsigned long flags;
12803 	uint16 alloced = 0;
12804 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12805 
12806 #ifdef PCIE_INB_DW
12807 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12808 		return BCME_ERROR;
12809 #endif /* PCIE_INB_DW */
12810 
12811 	DHD_RING_LOCK(ring->ring_lock, flags);
12812 
12813 	/* Request for ring buffer space */
12814 	flow_delete_rqst = (tx_flowring_delete_request_t *)
12815 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12816 
12817 	if (flow_delete_rqst == NULL) {
12818 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12819 		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
12820 #ifdef PCIE_INB_DW
12821 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12822 #endif
12823 		return BCME_NOMEM;
12824 	}
12825 
12826 	/* Common msg buf hdr */
12827 	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
12828 	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12829 	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
12830 	flow_delete_rqst->msg.flags = ring->current_phase;
12831 
12832 	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12833 	ring->seqnum++;
12834 
12835 	/* Update Delete info */
12836 	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12837 	flow_delete_rqst->reason = htol16(BCME_OK);
12838 
12839 	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
12840 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
12841 		flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
12842 		flow_ring_node->flow_info.ifindex));
12843 
12844 	/* update ring's WR index and ring doorbell to dongle */
12845 	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
12846 
12847 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12848 
12849 #ifdef PCIE_INB_DW
12850 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12851 #endif
12852 	return BCME_OK;
12853 }
12854 
12855 static void
12856 BCMFASTPATH(dhd_prot_flow_ring_fastdelete)(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
12857 {
12858 	flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
12859 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
12860 	host_txbuf_cmpl_t txstatus;
12861 	host_txbuf_post_t *txdesc;
12862 	uint16 wr_idx;
12863 
12864 	DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
12865 		__FUNCTION__, flowid, rd_idx, ring->wr));
12866 
12867 	memset(&txstatus, 0, sizeof(txstatus));
12868 	txstatus.compl_hdr.flow_ring_id = flowid;
12869 	txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
12870 	wr_idx = ring->wr;
12871 
12872 	while (wr_idx != rd_idx) {
12873 		if (wr_idx)
12874 			wr_idx--;
12875 		else
12876 			wr_idx = ring->max_items - 1;
12877 		txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
12878 			(wr_idx * ring->item_len));
12879 		txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
12880 		dhd_prot_txstatus_process(dhd, &txstatus);
12881 	}
12882 }
12883 
12884 static void
12885 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
12886 {
12887 	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
12888 
12889 	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
12890 		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
12891 
12892 	if (dhd->fast_delete_ring_support) {
12893 		dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
12894 			flow_delete_resp->read_idx);
12895 	}
12896 	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
12897 		flow_delete_resp->cmplt.status);
12898 }
12899 
12900 static void
12901 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
12902 {
12903 #ifdef IDLE_TX_FLOW_MGMT
12904 	tx_idle_flowring_resume_response_t	*flow_resume_resp =
12905 		(tx_idle_flowring_resume_response_t *)msg;
12906 
12907 	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
12908 		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
12909 
12910 	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
12911 		flow_resume_resp->cmplt.status);
12912 #endif /* IDLE_TX_FLOW_MGMT */
12913 }
12914 
12915 static void
12916 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
12917 {
12918 #ifdef IDLE_TX_FLOW_MGMT
12919 	int16 status;
12920 	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
12921 		(tx_idle_flowring_suspend_response_t *)msg;
12922 	status = flow_suspend_resp->cmplt.status;
12923 
12924 	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
12925 		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
12926 		status));
12927 
12928 	if (status != BCME_OK) {
12929 
12930 		DHD_ERROR(("%s Error in Suspending Flow rings!!"
12931 			"Dongle will still be polling idle rings!!Status = %d \n",
12932 			__FUNCTION__, status));
12933 	}
12934 #endif /* IDLE_TX_FLOW_MGMT */
12935 }
12936 
12937 int
12938 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12939 {
12940 	tx_flowring_flush_request_t *flow_flush_rqst;
12941 	dhd_prot_t *prot = dhd->prot;
12942 	unsigned long flags;
12943 	uint16 alloced = 0;
12944 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12945 
12946 #ifdef PCIE_INB_DW
12947 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12948 		return BCME_ERROR;
12949 #endif /* PCIE_INB_DW */
12950 
12951 	DHD_RING_LOCK(ring->ring_lock, flags);
12952 
12953 	/* Request for ring buffer space */
12954 	flow_flush_rqst = (tx_flowring_flush_request_t *)
12955 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12956 	if (flow_flush_rqst == NULL) {
12957 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12958 		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
12959 #ifdef PCIE_INB_DW
12960 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12961 #endif
12962 		return BCME_NOMEM;
12963 	}
12964 
12965 	/* Common msg buf hdr */
12966 	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
12967 	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12968 	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
12969 	flow_flush_rqst->msg.flags = ring->current_phase;
12970 	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12971 	ring->seqnum++;
12972 
12973 	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12974 	flow_flush_rqst->reason = htol16(BCME_OK);
12975 
12976 	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
12977 
12978 	/* update ring's WR index and ring doorbell to dongle */
12979 	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
12980 
12981 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12982 
12983 #ifdef PCIE_INB_DW
12984 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12985 #endif
12986 	return BCME_OK;
12987 } /* dhd_prot_flow_ring_flush */
12988 
12989 static void
12990 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
12991 {
12992 	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
12993 
12994 	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
12995 		flow_flush_resp->cmplt.status));
12996 
12997 	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
12998 		flow_flush_resp->cmplt.status);
12999 }
13000 
13001 /**
13002  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
13003  * doorbell information is transferred to dongle via the d2h ring config control
13004  * message.
13005  */
13006 void
13007 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
13008 {
13009 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
13010 	uint16 ring_idx;
13011 	uint8 *msg_next;
13012 	void *msg_start;
13013 	uint16 alloced = 0;
13014 	unsigned long flags;
13015 	dhd_prot_t *prot = dhd->prot;
13016 	ring_config_req_t *ring_config_req;
13017 	bcmpcie_soft_doorbell_t *soft_doorbell;
13018 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
13019 	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
13020 
13021 #ifdef PCIE_INB_DW
13022 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
13023 		return BCME_ERROR;
13024 #endif /* PCIE_INB_DW */
13025 	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
13026 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
13027 	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
13028 
13029 	if (msg_start == NULL) {
13030 		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
13031 			__FUNCTION__, d2h_rings));
13032 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13033 #ifdef PCIE_INB_DW
13034 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13035 #endif
13036 		return;
13037 	}
13038 
13039 	msg_next = (uint8*)msg_start;
13040 
13041 	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
13042 
13043 		/* position the ring_config_req into the ctrl subm ring */
13044 		ring_config_req = (ring_config_req_t *)msg_next;
13045 
13046 		/* Common msg header */
13047 		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
13048 		ring_config_req->msg.if_id = 0;
13049 		ring_config_req->msg.flags = 0;
13050 
13051 		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
13052 		ctrl_ring->seqnum++;
13053 
13054 		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
13055 
13056 		/* Ring Config subtype and d2h ring_id */
13057 		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
13058 		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
13059 
13060 		/* Host soft doorbell configuration */
13061 		soft_doorbell = &prot->soft_doorbell[ring_idx];
13062 
13063 		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
13064 		ring_config_req->soft_doorbell.haddr.high =
13065 			htol32(soft_doorbell->haddr.high);
13066 		ring_config_req->soft_doorbell.haddr.low =
13067 			htol32(soft_doorbell->haddr.low);
13068 		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
13069 		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
13070 
13071 		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
13072 			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
13073 			ring_config_req->soft_doorbell.haddr.low,
13074 			ring_config_req->soft_doorbell.value));
13075 
13076 		msg_next = msg_next + ctrl_ring->item_len;
13077 	}
13078 
13079 	/* update control subn ring's WR index and ring doorbell to dongle */
13080 	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
13081 
13082 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13083 
13084 #ifdef PCIE_INB_DW
13085 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13086 #endif
13087 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
13088 }
13089 
13090 static void
13091 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
13092 {
13093 	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
13094 		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
13095 		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
13096 }
13097 
13098 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13099 void
13100 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
13101 {
13102 	uint32 *ext_data = dhd->extended_trap_data;
13103 	hnd_ext_trap_hdr_t *hdr;
13104 	const bcm_tlv_t *tlv;
13105 
13106 	if (ext_data == NULL) {
13107 		return;
13108 	}
13109 	/* First word is original trap_data */
13110 	ext_data++;
13111 
13112 	/* Followed by the extended trap data header */
13113 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13114 
13115 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13116 	if (tlv) {
13117 		memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
13118 	}
13119 }
13120 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
13121 
13122 typedef struct {
13123 	char name[HANG_INFO_TRAP_T_NAME_MAX];
13124 	uint32 offset;
13125 } hang_info_trap_t;
13126 
13127 #ifdef DHD_EWPR_VER2
13128 static hang_info_trap_t hang_info_trap_tbl[] = {
13129 	{"reason", 0},
13130 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13131 	{"stype", 0},
13132 	TRAP_T_NAME_OFFSET(type),
13133 	TRAP_T_NAME_OFFSET(epc),
13134 	{"resrvd", 0},
13135 	{"resrvd", 0},
13136 	{"resrvd", 0},
13137 	{"resrvd", 0},
13138 	{"", 0}
13139 };
13140 #else
13141 static hang_info_trap_t hang_info_trap_tbl[] = {
13142 	{"reason", 0},
13143 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13144 	{"stype", 0},
13145 	TRAP_T_NAME_OFFSET(type),
13146 	TRAP_T_NAME_OFFSET(epc),
13147 	TRAP_T_NAME_OFFSET(cpsr),
13148 	TRAP_T_NAME_OFFSET(spsr),
13149 	TRAP_T_NAME_OFFSET(r0),
13150 	TRAP_T_NAME_OFFSET(r1),
13151 	TRAP_T_NAME_OFFSET(r2),
13152 	TRAP_T_NAME_OFFSET(r3),
13153 	TRAP_T_NAME_OFFSET(r4),
13154 	TRAP_T_NAME_OFFSET(r5),
13155 	TRAP_T_NAME_OFFSET(r6),
13156 	TRAP_T_NAME_OFFSET(r7),
13157 	TRAP_T_NAME_OFFSET(r8),
13158 	TRAP_T_NAME_OFFSET(r9),
13159 	TRAP_T_NAME_OFFSET(r10),
13160 	TRAP_T_NAME_OFFSET(r11),
13161 	TRAP_T_NAME_OFFSET(r12),
13162 	TRAP_T_NAME_OFFSET(r13),
13163 	TRAP_T_NAME_OFFSET(r14),
13164 	TRAP_T_NAME_OFFSET(pc),
13165 	{"", 0}
13166 };
13167 #endif /* DHD_EWPR_VER2 */
13168 
13169 #define TAG_TRAP_IS_STATE(tag) \
13170 	((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
13171 	(tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
13172 	(tag == TAG_TRAP_CODE))
13173 
13174 static void
13175 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
13176 		int *bytes_written, int *cnt, char *cookie)
13177 {
13178 	uint8 *ptr;
13179 	int remain_len;
13180 	int i;
13181 
13182 	ptr = (uint8 *)src;
13183 
13184 	memset(dest, 0, len);
13185 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13186 
13187 	/* hang reason, hang info ver */
13188 	for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
13189 			i++, (*cnt)++) {
13190 		if (field_name) {
13191 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13192 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13193 					hang_info_trap_tbl[i].name, HANG_KEY_DEL);
13194 		}
13195 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13196 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13197 				hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
13198 
13199 	}
13200 
13201 	if (*cnt < HANG_FIELD_CNT_MAX) {
13202 		if (field_name) {
13203 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13204 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13205 					"cookie", HANG_KEY_DEL);
13206 		}
13207 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13208 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
13209 				cookie, HANG_KEY_DEL);
13210 		(*cnt)++;
13211 	}
13212 
13213 	if (*cnt < HANG_FIELD_CNT_MAX) {
13214 		if (field_name) {
13215 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13216 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13217 					hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
13218 					HANG_KEY_DEL);
13219 		}
13220 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13221 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13222 				hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
13223 				HANG_KEY_DEL);
13224 		(*cnt)++;
13225 	}
13226 
13227 	if (*cnt < HANG_FIELD_CNT_MAX) {
13228 		if (field_name) {
13229 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13230 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13231 					hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
13232 					HANG_KEY_DEL);
13233 		}
13234 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13235 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13236 				*(uint32 *)
13237 				(ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
13238 				HANG_KEY_DEL);
13239 		(*cnt)++;
13240 	}
13241 #ifdef DHD_EWPR_VER2
13242 	/* put 0 for HG03 ~ HG06 (reserved for future use) */
13243 	for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
13244 			i++, (*cnt)++) {
13245 		if (field_name) {
13246 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13247 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13248 				hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
13249 				HANG_KEY_DEL);
13250 		}
13251 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13252 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13253 			hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
13254 			HANG_KEY_DEL);
13255 	}
13256 #endif /* DHD_EWPR_VER2 */
13257 }
13258 #ifndef DHD_EWPR_VER2
13259 static void
13260 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
13261 		int *bytes_written, int *cnt, char *cookie)
13262 {
13263 	uint8 *ptr;
13264 	int remain_len;
13265 	int i;
13266 
13267 	ptr = (uint8 *)src;
13268 
13269 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13270 
13271 	for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
13272 			(hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
13273 			i++, (*cnt)++) {
13274 		if (field_name) {
13275 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13276 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
13277 					HANG_RAW_DEL, hang_info_trap_tbl[i].name);
13278 		}
13279 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13280 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13281 				HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
13282 	}
13283 }
13284 
13285 static void
13286 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13287 {
13288 	int remain_len;
13289 	int i = 0;
13290 	const uint32 *stack;
13291 	uint32 *ext_data = dhd->extended_trap_data;
13292 	hnd_ext_trap_hdr_t *hdr;
13293 	const bcm_tlv_t *tlv;
13294 	int remain_stack_cnt = 0;
13295 	uint32 dummy_data = 0;
13296 	int bigdata_key_stack_cnt = 0;
13297 
13298 	if (ext_data == NULL) {
13299 		return;
13300 	}
13301 	/* First word is original trap_data */
13302 	ext_data++;
13303 
13304 	/* Followed by the extended trap data header */
13305 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13306 
13307 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13308 
13309 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13310 
13311 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13312 	if (tlv) {
13313 		stack = (const uint32 *)tlv->data;
13314 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13315 				"%08x", *(uint32 *)(stack++));
13316 		(*cnt)++;
13317 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13318 			return;
13319 		}
13320 		for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
13321 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13322 			/* Raw data for bigdata use '_' and Key data for bigdata use space */
13323 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13324 				"%c%08x",
13325 				i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
13326 				*(uint32 *)(stack++));
13327 
13328 			(*cnt)++;
13329 			if ((*cnt >= HANG_FIELD_CNT_MAX) ||
13330 					(i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
13331 				return;
13332 			}
13333 		}
13334 	}
13335 
13336 	remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
13337 
13338 	for (i = 0; i < remain_stack_cnt; i++) {
13339 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13340 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13341 				HANG_RAW_DEL, dummy_data);
13342 		(*cnt)++;
13343 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13344 			return;
13345 		}
13346 	}
13347 	GCC_DIAGNOSTIC_POP();
13348 
13349 }
13350 
13351 static void
13352 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13353 {
13354 	int remain_len;
13355 	int i;
13356 	const uint32 *data;
13357 	uint32 *ext_data = dhd->extended_trap_data;
13358 	hnd_ext_trap_hdr_t *hdr;
13359 	const bcm_tlv_t *tlv;
13360 	int remain_trap_data = 0;
13361 	uint8 buf_u8[sizeof(uint32)] = { 0, };
13362 	const uint8 *p_u8;
13363 
13364 	if (ext_data == NULL) {
13365 		return;
13366 	}
13367 	/* First word is original trap_data */
13368 	ext_data++;
13369 
13370 	/* Followed by the extended trap data header */
13371 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13372 
13373 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13374 	if (tlv) {
13375 		/* header include tlv hader */
13376 		remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
13377 	}
13378 
13379 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13380 	if (tlv) {
13381 		/* header include tlv hader */
13382 		remain_trap_data -= (tlv->len + sizeof(uint16));
13383 	}
13384 
13385 	data = (const uint32 *)(hdr->data + (hdr->len  - remain_trap_data));
13386 
13387 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13388 
13389 	for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
13390 			i++, (*cnt)++) {
13391 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13392 		GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13393 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13394 				HANG_RAW_DEL, *(uint32 *)(data++));
13395 		GCC_DIAGNOSTIC_POP();
13396 	}
13397 
13398 	if (*cnt >= HANG_FIELD_CNT_MAX) {
13399 		return;
13400 	}
13401 
13402 	remain_trap_data -= (sizeof(uint32) * i);
13403 
13404 	if (remain_trap_data > sizeof(buf_u8)) {
13405 		DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
13406 		remain_trap_data =  sizeof(buf_u8);
13407 	}
13408 
13409 	if (remain_trap_data) {
13410 		p_u8 = (const uint8 *)data;
13411 		for (i = 0; i < remain_trap_data; i++) {
13412 			buf_u8[i] = *(const uint8 *)(p_u8++);
13413 		}
13414 
13415 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13416 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13417 				HANG_RAW_DEL, ltoh32_ua(buf_u8));
13418 		(*cnt)++;
13419 	}
13420 }
13421 #endif /* DHD_EWPR_VER2 */
13422 
13423 static void
13424 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
13425 {
13426 	uint32 i;
13427 	uint32 *ext_data = dhd->extended_trap_data;
13428 	hnd_ext_trap_hdr_t *hdr;
13429 	const bcm_tlv_t *tlv;
13430 
13431 	/* First word is original trap_data */
13432 	ext_data++;
13433 
13434 	/* Followed by the extended trap data header */
13435 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13436 
13437 	/* Dump a list of all tags found  before parsing data */
13438 	for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
13439 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
13440 		if (tlv) {
13441 			if (!TAG_TRAP_IS_STATE(i)) {
13442 				*subtype = i;
13443 				return;
13444 			}
13445 		}
13446 	}
13447 }
13448 #ifdef DHD_EWPR_VER2
13449 static void
13450 copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13451 {
13452 	int remain_len;
13453 	uint32 *ext_data = dhd->extended_trap_data;
13454 	hnd_ext_trap_hdr_t *hdr;
13455 	char *base64_out = NULL;
13456 	int base64_cnt;
13457 	int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
13458 
13459 	if (ext_data == NULL) {
13460 		return;
13461 	}
13462 	/* First word is original trap_data */
13463 	ext_data++;
13464 
13465 	/* Followed by the extended trap data header */
13466 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13467 
13468 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13469 
13470 	if (remain_len <= 0) {
13471 		DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
13472 		return;
13473 	}
13474 
13475 	if (remain_len < max_base64_len) {
13476 		DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
13477 			remain_len));
13478 		max_base64_len = remain_len;
13479 	}
13480 
13481 	base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
13482 	if (base64_out == NULL) {
13483 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
13484 			__FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
13485 		return;
13486 	}
13487 
13488 	if (hdr->len > 0) {
13489 		base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
13490 		if (base64_cnt == 0) {
13491 			DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
13492 		}
13493 	}
13494 
13495 	*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
13496 			base64_out);
13497 	(*cnt)++;
13498 	MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
13499 }
13500 #endif /* DHD_EWPR_VER2 */
13501 
13502 void
13503 copy_hang_info_trap(dhd_pub_t *dhd)
13504 {
13505 	trap_t tr;
13506 	int bytes_written;
13507 	int trap_subtype = 0;
13508 
13509 	if (!dhd || !dhd->hang_info) {
13510 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13511 			dhd, (dhd ? dhd->hang_info : NULL)));
13512 		return;
13513 	}
13514 
13515 	if (!dhd->dongle_trap_occured) {
13516 		DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
13517 		return;
13518 	}
13519 
13520 	memset(&tr, 0x00, sizeof(struct _trap_struct));
13521 
13522 	copy_ext_trap_sig(dhd, &tr);
13523 	get_hang_info_trap_subtype(dhd, &trap_subtype);
13524 
13525 	hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
13526 	hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
13527 
13528 	bytes_written = 0;
13529 	dhd->hang_info_cnt = 0;
13530 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13531 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13532 
13533 	copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13534 			&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13535 
13536 	DHD_INFO(("hang info head cnt: %d len: %d data: %s\n",
13537 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13538 
13539 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13540 
13541 #ifdef DHD_EWPR_VER2
13542 	/* stack info & trap info are included in etd data */
13543 
13544 	/* extended trap data dump */
13545 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13546 		copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13547 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13548 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13549 	}
13550 #else
13551 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13552 		copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13553 		DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
13554 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13555 	}
13556 
13557 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13558 		copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13559 				&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13560 		DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
13561 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13562 	}
13563 
13564 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13565 		copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13566 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13567 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13568 	}
13569 #endif /* DHD_EWPR_VER2 */
13570 }
13571 
13572 void
13573 copy_hang_info_linkdown(dhd_pub_t *dhd)
13574 {
13575 	int bytes_written = 0;
13576 	int remain_len;
13577 
13578 	if (!dhd || !dhd->hang_info) {
13579 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13580 			dhd, (dhd ? dhd->hang_info : NULL)));
13581 		return;
13582 	}
13583 
13584 	if (!dhd->bus->is_linkdown) {
13585 		DHD_ERROR(("%s: link down is not happened\n", __FUNCTION__));
13586 		return;
13587 	}
13588 
13589 	dhd->hang_info_cnt = 0;
13590 
13591 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13592 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13593 
13594 	/* hang reason code (0x8808) */
13595 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13596 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13597 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13598 				HANG_REASON_PCIE_LINK_DOWN_EP_DETECT, HANG_KEY_DEL);
13599 		dhd->hang_info_cnt++;
13600 	}
13601 
13602 	/* EWP version */
13603 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13604 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13605 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13606 				VENDOR_SEND_HANG_EXT_INFO_VER, HANG_KEY_DEL);
13607 		dhd->hang_info_cnt++;
13608 	}
13609 
13610 	/* cookie - dump time stamp */
13611 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13612 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13613 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c",
13614 				dhd->debug_dump_time_hang_str, HANG_KEY_DEL);
13615 		dhd->hang_info_cnt++;
13616 	}
13617 
13618 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13619 
13620 	/* dump PCIE RC registers */
13621 	dhd_dump_pcie_rc_regs_for_linkdown(dhd, &bytes_written);
13622 
13623 	DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
13624 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13625 
13626 }
13627 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13628 
13629 int
13630 dhd_prot_debug_info_print(dhd_pub_t *dhd)
13631 {
13632 	dhd_prot_t *prot = dhd->prot;
13633 	msgbuf_ring_t *ring;
13634 	uint16 rd, wr, drd, dwr;
13635 	uint32 dma_buf_len;
13636 	uint64 current_time;
13637 	ulong ring_tcm_rd_addr; /* dongle address */
13638 	ulong ring_tcm_wr_addr; /* dongle address */
13639 
13640 	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
13641 	DHD_ERROR(("DHD: %s\n", dhd_version));
13642 	DHD_ERROR(("Firmware: %s\n", fw_version));
13643 
13644 #ifdef DHD_FW_COREDUMP
13645 	DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
13646 	DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
13647 #endif /* DHD_FW_COREDUMP */
13648 
13649 	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
13650 	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
13651 		prot->device_ipc_version,
13652 		prot->host_ipc_version,
13653 		prot->active_ipc_version));
13654 	DHD_ERROR(("d2h_intr_method -> %s d2h_intr_control -> %s\n",
13655 			dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX",
13656 			dhd->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK"));
13657 	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
13658 		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
13659 	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
13660 		prot->max_infobufpost, prot->infobufpost));
13661 	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
13662 		prot->max_eventbufpost, prot->cur_event_bufs_posted));
13663 	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
13664 		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
13665 	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
13666 		prot->max_rxbufpost, prot->rxbufpost));
13667 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13668 		h2d_max_txpost, prot->h2d_max_txpost));
13669 #if defined(DHD_HTPUT_TUNABLES)
13670 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13671 		h2d_htput_max_txpost, prot->h2d_htput_max_txpost));
13672 #endif /* DHD_HTPUT_TUNABLES */
13673 
13674 	current_time = OSL_LOCALTIME_NS();
13675 	DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
13676 	DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
13677 		" ioctl_ack_time="SEC_USEC_FMT
13678 		" ioctl_cmplt_time="SEC_USEC_FMT"\n",
13679 		GET_SEC_USEC(prot->ioctl_fillup_time),
13680 		GET_SEC_USEC(prot->ioctl_ack_time),
13681 		GET_SEC_USEC(prot->ioctl_cmplt_time)));
13682 
13683 	/* Check PCIe INT registers */
13684 	if (!dhd_pcie_dump_int_regs(dhd)) {
13685 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
13686 		dhd->bus->is_linkdown = TRUE;
13687 	}
13688 
13689 	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
13690 
13691 	ring = &prot->h2dring_ctrl_subn;
13692 	dma_buf_len = ring->max_items * ring->item_len;
13693 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13694 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13695 	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13696 		"SIZE %d \r\n",
13697 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13698 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13699 	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13700 	if (dhd->dma_d2h_ring_upd_support) {
13701 		drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13702 		dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13703 		DHD_ERROR(("CtrlPost: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13704 	}
13705 	if (dhd->bus->is_linkdown) {
13706 		DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
13707 			" due to PCIe link down\r\n"));
13708 	} else {
13709 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13710 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13711 		DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13712 	}
13713 	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13714 
13715 	ring = &prot->d2hring_ctrl_cpln;
13716 	dma_buf_len = ring->max_items * ring->item_len;
13717 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13718 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13719 	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13720 		"SIZE %d \r\n",
13721 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13722 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13723 	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13724 	if (dhd->dma_d2h_ring_upd_support) {
13725 		drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13726 		dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13727 		DHD_ERROR(("CtrlCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13728 	}
13729 	if (dhd->bus->is_linkdown) {
13730 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
13731 			" due to PCIe link down\r\n"));
13732 	} else {
13733 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13734 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13735 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13736 	}
13737 	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13738 
13739 	ring = prot->h2dring_info_subn;
13740 	if (ring) {
13741 		dma_buf_len = ring->max_items * ring->item_len;
13742 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13743 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13744 		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13745 			"SIZE %d \r\n",
13746 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13747 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13748 			dma_buf_len));
13749 		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13750 		if (dhd->dma_d2h_ring_upd_support) {
13751 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13752 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13753 			DHD_ERROR(("InfoSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13754 		}
13755 		if (dhd->bus->is_linkdown) {
13756 			DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
13757 				" due to PCIe link down\r\n"));
13758 		} else {
13759 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13760 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13761 			DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13762 		}
13763 		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13764 	}
13765 	ring = prot->d2hring_info_cpln;
13766 	if (ring) {
13767 		dma_buf_len = ring->max_items * ring->item_len;
13768 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13769 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13770 		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13771 			"SIZE %d \r\n",
13772 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13773 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13774 			dma_buf_len));
13775 		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13776 		if (dhd->dma_d2h_ring_upd_support) {
13777 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13778 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13779 			DHD_ERROR(("InfoCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13780 		}
13781 		if (dhd->bus->is_linkdown) {
13782 			DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
13783 				" due to PCIe link down\r\n"));
13784 		} else {
13785 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13786 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13787 			DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13788 		}
13789 		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13790 	}
13791 #ifdef EWP_EDL
13792 	ring = prot->d2hring_edl;
13793 	if (ring) {
13794 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13795 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13796 		dma_buf_len = ring->max_items * ring->item_len;
13797 		DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13798 			"SIZE %d \r\n",
13799 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13800 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13801 			dma_buf_len));
13802 		DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13803 		if (dhd->dma_d2h_ring_upd_support) {
13804 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13805 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13806 			DHD_ERROR(("EdlRing: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13807 		}
13808 		if (dhd->bus->is_linkdown) {
13809 			DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
13810 				" due to PCIe link down\r\n"));
13811 		} else {
13812 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13813 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13814 			DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13815 		}
13816 		DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
13817 			ring->seqnum % D2H_EPOCH_MODULO));
13818 	}
13819 #endif /* EWP_EDL */
13820 
13821 	ring = &prot->d2hring_tx_cpln;
13822 	if (ring) {
13823 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13824 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13825 		dma_buf_len = ring->max_items * ring->item_len;
13826 		DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13827 			"SIZE %d \r\n",
13828 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13829 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13830 			dma_buf_len));
13831 		DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13832 		if (dhd->dma_d2h_ring_upd_support) {
13833 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13834 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13835 			DHD_ERROR(("TxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13836 		}
13837 		if (dhd->bus->is_linkdown) {
13838 			DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
13839 				" due to PCIe link down\r\n"));
13840 		} else {
13841 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13842 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13843 			DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13844 		}
13845 		DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13846 	}
13847 
13848 	ring = &prot->d2hring_rx_cpln;
13849 	if (ring) {
13850 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13851 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13852 		dma_buf_len = ring->max_items * ring->item_len;
13853 		DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13854 			"SIZE %d \r\n",
13855 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13856 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13857 			dma_buf_len));
13858 		DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13859 		if (dhd->dma_d2h_ring_upd_support) {
13860 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13861 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13862 			DHD_ERROR(("RxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13863 		}
13864 		if (dhd->bus->is_linkdown) {
13865 			DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
13866 				" due to PCIe link down\r\n"));
13867 		} else {
13868 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13869 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13870 			DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13871 		}
13872 		DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13873 	}
13874 
13875 	ring = &prot->h2dring_rxp_subn;
13876 	if (ring) {
13877 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13878 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13879 		dma_buf_len = ring->max_items * ring->item_len;
13880 		DHD_ERROR(("RxSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13881 			"SIZE %d \r\n",
13882 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13883 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13884 			dma_buf_len));
13885 		DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13886 		if (dhd->dma_d2h_ring_upd_support) {
13887 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13888 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13889 			DHD_ERROR(("RxSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13890 		}
13891 		if (dhd->bus->is_linkdown) {
13892 			DHD_ERROR(("RxSub: From Shared Mem: RD and WR are invalid"
13893 				" due to PCIe link down\r\n"));
13894 		} else {
13895 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13896 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13897 			DHD_ERROR(("RxSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13898 		}
13899 		DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13900 	}
13901 
13902 	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
13903 		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
13904 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
13905 	DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
13906 		__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
13907 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
13908 
13909 	DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
13910 	DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
13911 	DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
13912 	dhd_pcie_debug_info_dump(dhd);
13913 #ifdef DHD_LB_STATS
13914 	DHD_ERROR(("\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
13915 		dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt));
13916 	DHD_ERROR(("\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
13917 		dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt));
13918 #endif /* DHD_LB_STATS */
13919 #ifdef DHD_TIMESYNC
13920 	dhd_timesync_debug_info_print(dhd);
13921 #endif /* DHD_TIMESYNC */
13922 	return 0;
13923 }
13924 
13925 int
13926 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
13927 {
13928 	uint32 *ptr;
13929 	uint32 value;
13930 
13931 	if (dhd->prot->d2h_dma_indx_wr_buf.va) {
13932 		uint32 i;
13933 		uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
13934 
13935 		OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
13936 			dhd->prot->d2h_dma_indx_wr_buf.len);
13937 
13938 		ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
13939 
13940 		bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
13941 
13942 		bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%4p\n", ptr);
13943 		value = ltoh32(*ptr);
13944 		bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
13945 		ptr++;
13946 		value = ltoh32(*ptr);
13947 		bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
13948 
13949 		ptr++;
13950 		bcm_bprintf(b, "RPTR block Flow rings , 0x%4p\n", ptr);
13951 		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
13952 			value = ltoh32(*ptr);
13953 			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
13954 			ptr++;
13955 		}
13956 	}
13957 
13958 	if (dhd->prot->h2d_dma_indx_rd_buf.va) {
13959 		OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
13960 			dhd->prot->h2d_dma_indx_rd_buf.len);
13961 
13962 		ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
13963 
13964 		bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%4p\n", ptr);
13965 		value = ltoh32(*ptr);
13966 		bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
13967 		ptr++;
13968 		value = ltoh32(*ptr);
13969 		bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
13970 		ptr++;
13971 		value = ltoh32(*ptr);
13972 		bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
13973 	}
13974 
13975 	return 0;
13976 }
13977 
13978 uint32
13979 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
13980 {
13981 	dhd_prot_t *prot = dhd->prot;
13982 #if DHD_DBG_SHOW_METADATA
13983 	prot->metadata_dbg = val;
13984 #endif
13985 	return (uint32)prot->metadata_dbg;
13986 }
13987 
13988 uint32
13989 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
13990 {
13991 	dhd_prot_t *prot = dhd->prot;
13992 	return (uint32)prot->metadata_dbg;
13993 }
13994 
13995 uint32
13996 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
13997 {
13998 #if !(defined(BCM_ROUTER_DHD))
13999 	dhd_prot_t *prot = dhd->prot;
14000 	if (rx)
14001 		prot->rx_metadata_offset = (uint16)val;
14002 	else
14003 		prot->tx_metadata_offset = (uint16)val;
14004 #endif /* ! BCM_ROUTER_DHD */
14005 	return dhd_prot_metadatalen_get(dhd, rx);
14006 }
14007 
14008 uint32
14009 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
14010 {
14011 	dhd_prot_t *prot = dhd->prot;
14012 	if (rx)
14013 		return prot->rx_metadata_offset;
14014 	else
14015 		return prot->tx_metadata_offset;
14016 }
14017 
14018 /** optimization to write "n" tx items at a time to ring */
14019 uint32
14020 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
14021 {
14022 	dhd_prot_t *prot = dhd->prot;
14023 	if (set)
14024 		prot->txp_threshold = (uint16)val;
14025 	val = prot->txp_threshold;
14026 	return val;
14027 }
14028 
14029 #ifdef DHD_RX_CHAINING
14030 
14031 static INLINE void
14032 BCMFASTPATH(dhd_rxchain_reset)(rxchain_info_t *rxchain)
14033 {
14034 	rxchain->pkt_count = 0;
14035 }
14036 
14037 static void
14038 BCMFASTPATH(dhd_rxchain_frame)(dhd_pub_t *dhd, void *pkt, uint ifidx)
14039 {
14040 	uint8 *eh;
14041 	uint8 prio;
14042 	dhd_prot_t *prot = dhd->prot;
14043 	rxchain_info_t *rxchain = &prot->rxchain;
14044 
14045 	ASSERT(!PKTISCHAINED(pkt));
14046 	ASSERT(PKTCLINK(pkt) == NULL);
14047 	ASSERT(PKTCGETATTR(pkt) == 0);
14048 
14049 	eh = PKTDATA(dhd->osh, pkt);
14050 	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
14051 
14052 	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
14053 		rxchain->h_da, rxchain->h_prio))) {
14054 		/* Different flow - First release the existing chain */
14055 		dhd_rxchain_commit(dhd);
14056 	}
14057 
14058 	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
14059 	/* so that the chain can be handed off to CTF bridge as is. */
14060 	if (rxchain->pkt_count == 0) {
14061 		/* First packet in chain */
14062 		rxchain->pkthead = rxchain->pkttail = pkt;
14063 
14064 		/* Keep a copy of ptr to ether_da, ether_sa and prio */
14065 		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
14066 		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
14067 		rxchain->h_prio = prio;
14068 		rxchain->ifidx = ifidx;
14069 		rxchain->pkt_count++;
14070 	} else {
14071 		/* Same flow - keep chaining */
14072 		PKTSETCLINK(rxchain->pkttail, pkt);
14073 		rxchain->pkttail = pkt;
14074 		rxchain->pkt_count++;
14075 	}
14076 
14077 	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
14078 		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
14079 		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
14080 		PKTSETCHAINED(dhd->osh, pkt);
14081 		PKTCINCRCNT(rxchain->pkthead);
14082 		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
14083 	} else {
14084 		dhd_rxchain_commit(dhd);
14085 		return;
14086 	}
14087 
14088 	/* If we have hit the max chain length, dispatch the chain and reset */
14089 	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
14090 		dhd_rxchain_commit(dhd);
14091 	}
14092 }
14093 
14094 static void
14095 BCMFASTPATH(dhd_rxchain_commit)(dhd_pub_t *dhd)
14096 {
14097 	dhd_prot_t *prot = dhd->prot;
14098 	rxchain_info_t *rxchain = &prot->rxchain;
14099 
14100 	if (rxchain->pkt_count == 0)
14101 		return;
14102 
14103 	/* Release the packets to dhd_linux */
14104 	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
14105 
14106 	/* Reset the chain */
14107 	dhd_rxchain_reset(rxchain);
14108 }
14109 
14110 #endif /* DHD_RX_CHAINING */
14111 
14112 #ifdef IDLE_TX_FLOW_MGMT
14113 int
14114 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
14115 {
14116 	tx_idle_flowring_resume_request_t *flow_resume_rqst;
14117 	msgbuf_ring_t *flow_ring;
14118 	dhd_prot_t *prot = dhd->prot;
14119 	unsigned long flags;
14120 	uint16 alloced = 0;
14121 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14122 
14123 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
14124 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
14125 	if (flow_ring == NULL) {
14126 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
14127 			__FUNCTION__, flow_ring_node->flowid));
14128 		return BCME_NOMEM;
14129 	}
14130 
14131 #ifdef PCIE_INB_DW
14132 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14133 		return BCME_ERROR;
14134 #endif /* PCIE_INB_DW */
14135 
14136 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14137 
14138 	/* Request for ctrl_ring buffer space */
14139 	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
14140 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
14141 
14142 	if (flow_resume_rqst == NULL) {
14143 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
14144 		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
14145 			__FUNCTION__, flow_ring_node->flowid));
14146 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14147 #ifdef PCIE_INB_DW
14148 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14149 #endif
14150 		return BCME_NOMEM;
14151 	}
14152 
14153 	flow_ring_node->prot_info = (void *)flow_ring;
14154 
14155 	/* Common msg buf hdr */
14156 	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
14157 	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
14158 	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
14159 
14160 	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14161 	ctrl_ring->seqnum++;
14162 
14163 	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
14164 	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
14165 		__FUNCTION__, flow_ring_node->flowid));
14166 
14167 	/* Update the flow_ring's WRITE index */
14168 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
14169 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14170 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
14171 	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
14172 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14173 			H2D_IFRM_INDX_WR_UPD,
14174 			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
14175 	} else {
14176 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
14177 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
14178 	}
14179 
14180 	/* update control subn ring's WR index and ring doorbell to dongle */
14181 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
14182 
14183 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14184 
14185 #ifdef PCIE_INB_DW
14186 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14187 #endif
14188 	return BCME_OK;
14189 } /* dhd_prot_flow_ring_create */
14190 
14191 int
14192 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
14193 {
14194 	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
14195 	dhd_prot_t *prot = dhd->prot;
14196 	unsigned long flags;
14197 	uint16 index;
14198 	uint16 alloced = 0;
14199 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
14200 
14201 #ifdef PCIE_INB_DW
14202 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14203 		return BCME_ERROR;
14204 #endif /* PCIE_INB_DW */
14205 
14206 	DHD_RING_LOCK(ring->ring_lock, flags);
14207 
14208 	/* Request for ring buffer space */
14209 	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
14210 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
14211 
14212 	if (flow_suspend_rqst == NULL) {
14213 		DHD_RING_UNLOCK(ring->ring_lock, flags);
14214 		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
14215 #ifdef PCIE_INB_DW
14216 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14217 #endif
14218 		return BCME_NOMEM;
14219 	}
14220 
14221 	/* Common msg buf hdr */
14222 	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
14223 	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
14224 	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
14225 
14226 	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
14227 	ring->seqnum++;
14228 
14229 	/* Update flow id  info */
14230 	for (index = 0; index < count; index++)
14231 	{
14232 		flow_suspend_rqst->ring_id[index] = ringid[index];
14233 	}
14234 	flow_suspend_rqst->num = count;
14235 
14236 	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
14237 
14238 	/* update ring's WR index and ring doorbell to dongle */
14239 	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
14240 
14241 	DHD_RING_UNLOCK(ring->ring_lock, flags);
14242 
14243 #ifdef PCIE_INB_DW
14244 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14245 #endif
14246 
14247 	return BCME_OK;
14248 }
14249 #endif /* IDLE_TX_FLOW_MGMT */
14250 
14251 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
14252 static void
14253 dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len)
14254 {
14255 	struct dhd_prot *prot = dhd->prot;
14256 	uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE;
14257 
14258 	prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd;
14259 	prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id;
14260 	if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf)
14261 		memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf,
14262 			len > MAX_IOCTL_BUF_SIZE ? MAX_IOCTL_BUF_SIZE : len);
14263 	else
14264 		memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE);
14265 	prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US();
14266 	prot->ioctl_trace_count ++;
14267 }
14268 
14269 static void
14270 dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf)
14271 {
14272 	int dumpsz;
14273 	int i;
14274 
14275 	dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ?
14276 		prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE;
14277 	if (dumpsz == 0) {
14278 		bcm_bprintf(strbuf, "\nEmpty IOCTL TRACE\n");
14279 		return;
14280 	}
14281 	bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n");
14282 	bcm_bprintf(strbuf, "Timestamp us\t\tCMD\tTransID\tIOVAR\n");
14283 	for (i = 0; i < dumpsz; i ++) {
14284 		bcm_bprintf(strbuf, "%llu\t%d\t%d\t%s\n",
14285 			prot->ioctl_trace[i].timestamp,
14286 			prot->ioctl_trace[i].cmd,
14287 			prot->ioctl_trace[i].transid,
14288 			prot->ioctl_trace[i].ioctl_buf);
14289 	}
14290 }
14291 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
14292 
14293 static void dump_psmwd_v1(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14294 {
14295 	const hnd_ext_trap_psmwd_v1_t* psmwd = NULL;
14296 	uint32 i;
14297 	psmwd = (const hnd_ext_trap_psmwd_v1_t *)tlv;
14298 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1; i++) {
14299 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14300 	}
14301 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14302 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14303 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14304 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14305 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14306 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14307 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14308 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14309 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14310 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14311 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14312 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14313 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14314 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14315 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14316 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14317 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14318 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14319 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14320 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14321 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14322 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14323 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14324 
14325 }
14326 
14327 static void dump_psmwd_v2(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14328 {
14329 	const hnd_ext_trap_psmwd_t* psmwd = NULL;
14330 	uint32 i;
14331 	psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
14332 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2; i++) {
14333 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14334 	}
14335 
14336 	bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8);
14337 	bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba);
14338 	bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc);
14339 	bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be);
14340 	bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da);
14341 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14342 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14343 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14344 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14345 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14346 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14347 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14348 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14349 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14350 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14351 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14352 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14353 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14354 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14355 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14356 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14357 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14358 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14359 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14360 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14361 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14362 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14363 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14364 }
14365 
14366 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
14367 {
14368 	switch (tag) {
14369 	case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
14370 	case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
14371 	case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
14372 	case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
14373 	case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
14374 	case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
14375 	case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
14376 	case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
14377 	case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
14378 	case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
14379 	case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
14380 	case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
14381 	case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
14382 	case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
14383 	case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
14384 	case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
14385 	case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
14386 	case TAG_TRAP_MEM_BIT_FLIP: return "TAG_TRAP_MEM_BIT_FLIP";
14387 	case TAG_TRAP_LAST:
14388 	default:
14389 		return "Unknown";
14390 	}
14391 	return "Unknown";
14392 }
14393 
14394 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
14395 {
14396 	uint32 i;
14397 	uint32 *ext_data;
14398 	hnd_ext_trap_hdr_t *hdr;
14399 	const bcm_tlv_t *tlv;
14400 	const trap_t *tr;
14401 	const uint32 *stack;
14402 	const hnd_ext_trap_bp_err_t *bpe;
14403 	uint32 raw_len;
14404 
14405 	ext_data = dhdp->extended_trap_data;
14406 
14407 	/* return if there is no extended trap data */
14408 	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
14409 		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
14410 		return BCME_OK;
14411 	}
14412 
14413 	bcm_bprintf(b, "Extended trap data\n");
14414 
14415 	/* First word is original trap_data */
14416 	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
14417 	ext_data++;
14418 
14419 	/* Followed by the extended trap data header */
14420 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
14421 	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
14422 
14423 	/* Dump a list of all tags found  before parsing data */
14424 	bcm_bprintf(b, "\nTags Found:\n");
14425 	for (i = 0; i < TAG_TRAP_LAST; i++) {
14426 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
14427 		if (tlv)
14428 			bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
14429 	}
14430 
14431 	/* XXX debug dump */
14432 	if (raw) {
14433 		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
14434 		for (i = 0; i < raw_len; i++)
14435 		{
14436 			bcm_bprintf(b, "0x%08x ", ext_data[i]);
14437 			if (i % 4 == 3)
14438 				bcm_bprintf(b, "\n");
14439 		}
14440 		return BCME_OK;
14441 	}
14442 
14443 	/* Extract the various supported TLVs from the extended trap data */
14444 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
14445 	if (tlv) {
14446 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
14447 		bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
14448 	}
14449 
14450 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
14451 	if (tlv) {
14452 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
14453 		tr = (const trap_t *)tlv->data;
14454 
14455 		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
14456 		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
14457 		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
14458 		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
14459 		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
14460 		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
14461 	}
14462 
14463 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
14464 	if (tlv) {
14465 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
14466 		stack = (const uint32 *)tlv->data;
14467 		for (i = 0; i < (uint32)(tlv->len / 4); i++)
14468 		{
14469 			bcm_bprintf(b, "  0x%08x\n", *stack);
14470 			stack++;
14471 		}
14472 	}
14473 
14474 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
14475 	if (tlv) {
14476 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
14477 		bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
14478 		bcm_bprintf(b, " error: %x\n", bpe->error);
14479 		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
14480 		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
14481 		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
14482 		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
14483 		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
14484 		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
14485 		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
14486 		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
14487 		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
14488 		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
14489 		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
14490 		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
14491 		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
14492 		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
14493 	}
14494 
14495 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
14496 	if (tlv) {
14497 		const hnd_ext_trap_heap_err_t* hme;
14498 
14499 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
14500 		hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
14501 		bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
14502 		bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
14503 		bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
14504 		bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
14505 		bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
14506 
14507 		bcm_bprintf(b, " Histogram:\n");
14508 		for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
14509 			if (hme->heap_histogm[i] == 0xfffe)
14510 				bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
14511 			else if (hme->heap_histogm[i] == 0xffff)
14512 				bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
14513 			else
14514 				bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
14515 					hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
14516 					* hme->heap_histogm[i + 1]);
14517 		}
14518 
14519 		bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
14520 		for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
14521 			bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
14522 		}
14523 	}
14524 
14525 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
14526 	if (tlv) {
14527 		const hnd_ext_trap_pcie_mem_err_t* pqme;
14528 
14529 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
14530 		pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
14531 		bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
14532 		bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
14533 	}
14534 
14535 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
14536 	if (tlv) {
14537 		const hnd_ext_trap_wlc_mem_err_t* wsme;
14538 
14539 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
14540 		wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
14541 		bcm_bprintf(b, " instance: %d\n", wsme->instance);
14542 		bcm_bprintf(b, " associated: %d\n", wsme->associated);
14543 		bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14544 		bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14545 		bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14546 		bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14547 		bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14548 		bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14549 
14550 		if (tlv->len >= (sizeof(*wsme) * 2)) {
14551 			wsme++;
14552 			bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
14553 			bcm_bprintf(b, " associated: %d\n", wsme->associated);
14554 			bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14555 			bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14556 			bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14557 			bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14558 			bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14559 			bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14560 		}
14561 	}
14562 
14563 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
14564 	if (tlv) {
14565 		const hnd_ext_trap_phydbg_t* phydbg;
14566 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
14567 		phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
14568 		bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
14569 		bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
14570 		bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
14571 		bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
14572 		bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
14573 		bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
14574 		bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
14575 		bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
14576 		bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
14577 		bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
14578 		bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
14579 		bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
14580 		bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
14581 		bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
14582 		bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
14583 		bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
14584 		bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
14585 		bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
14586 		bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
14587 		bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
14588 		bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
14589 		bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
14590 		bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
14591 		bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
14592 		bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
14593 		bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
14594 		bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
14595 		for (i = 0; i < 3; i++)
14596 			bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
14597 	}
14598 
14599 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
14600 	if (tlv) {
14601 		const hnd_ext_trap_psmwd_t* psmwd;
14602 
14603 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
14604 		psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data;
14605 		bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
14606 		bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
14607 		bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
14608 		bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
14609 		bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
14610 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
14611 		if (psmwd->version == 1) {
14612 			dump_psmwd_v1(tlv, b);
14613 		}
14614 		if (psmwd->version == 2) {
14615 			dump_psmwd_v2(tlv, b);
14616 		}
14617 	}
14618 /* PHY TxErr MacDump */
14619 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH);
14620 	if (tlv) {
14621 		const hnd_ext_trap_macphytxerr_t* phytxerr = NULL;
14622 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len);
14623 		phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data;
14624 		bcm_bprintf(b, " version: 0x%x\n", phytxerr->version);
14625 		bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason);
14626 		bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E);
14627 		bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640);
14628 		bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642);
14629 		bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846);
14630 		bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848);
14631 		bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a);
14632 		bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a);
14633 		bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c);
14634 		bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856);
14635 		bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858);
14636 		bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490);
14637 		bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8);
14638 		bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason);
14639 		bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0);
14640 		bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1);
14641 		bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2);
14642 		bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0);
14643 		bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1);
14644 		bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0);
14645 		bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1);
14646 		bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2);
14647 		bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0);
14648 		bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1);
14649 		bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst);
14650 		bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm);
14651 		bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel);
14652 		bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos);
14653 		bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf);
14654 		bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval);
14655 		bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29);
14656 		bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a);
14657 	}
14658 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
14659 	if (tlv) {
14660 		const hnd_ext_trap_macsusp_t* macsusp;
14661 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
14662 		macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data;
14663 		bcm_bprintf(b, " version: %d\n", macsusp->version);
14664 		bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
14665 		bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
14666 		bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
14667 		bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
14668 		for (i = 0; i < 4; i++)
14669 			bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
14670 		for (i = 0; i < 8; i++)
14671 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
14672 		bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
14673 		bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
14674 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
14675 		bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
14676 		bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
14677 		bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
14678 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
14679 		bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
14680 		bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
14681 		bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
14682 		bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
14683 		bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
14684 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
14685 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
14686 	}
14687 
14688 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
14689 	if (tlv) {
14690 		const hnd_ext_trap_macenab_t* macwake;
14691 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
14692 		macwake = (const hnd_ext_trap_macenab_t *)tlv->data;
14693 		bcm_bprintf(b, " version: 0x%x\n", macwake->version);
14694 		bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
14695 		bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
14696 		bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
14697 		bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
14698 		for (i = 0; i < 8; i++)
14699 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
14700 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
14701 		bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
14702 		bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
14703 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
14704 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
14705 		bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
14706 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
14707 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
14708 		bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
14709 		bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
14710 		bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
14711 		bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
14712 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
14713 	}
14714 
14715 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
14716 	if (tlv) {
14717 		const bcm_dngl_pcie_hc_t* hc;
14718 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
14719 		hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
14720 		bcm_bprintf(b, " version: 0x%x\n", hc->version);
14721 		bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
14722 		bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
14723 		bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
14724 		bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
14725 		for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
14726 			bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
14727 	}
14728 
14729 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
14730 	if (tlv) {
14731 		const pcie_hmapviolation_t* hmap;
14732 		hmap = (const pcie_hmapviolation_t *)tlv->data;
14733 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
14734 		bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
14735 		bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
14736 		bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
14737 	}
14738 
14739 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP);
14740 	if (tlv) {
14741 		const hnd_ext_trap_fb_mem_err_t* fbit;
14742 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len);
14743 		fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data;
14744 		bcm_bprintf(b, " version: %d\n", fbit->version);
14745 		bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time);
14746 	}
14747 
14748 	return BCME_OK;
14749 }
14750 
14751 #ifdef BCMPCIE
14752 int
14753 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
14754 	uint16 seqnum, uint16 xt_id)
14755 {
14756 	dhd_prot_t *prot = dhdp->prot;
14757 	host_timestamp_msg_t *ts_req;
14758 	unsigned long flags;
14759 	uint16 alloced = 0;
14760 	uchar *ts_tlv_buf;
14761 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14762 
14763 	if ((tlvs == NULL) || (tlv_len == 0)) {
14764 		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
14765 			__FUNCTION__, tlvs, tlv_len));
14766 		return -1;
14767 	}
14768 
14769 #ifdef PCIE_INB_DW
14770 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14771 		return BCME_ERROR;
14772 #endif /* PCIE_INB_DW */
14773 
14774 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14775 
14776 	/* if Host TS req already pending go away */
14777 	if (prot->hostts_req_buf_inuse == TRUE) {
14778 		DHD_ERROR(("one host TS request already pending at device\n"));
14779 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14780 #ifdef PCIE_INB_DW
14781 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14782 #endif
14783 		return -1;
14784 	}
14785 
14786 	/* Request for cbuf space */
14787 	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
14788 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
14789 	if (ts_req == NULL) {
14790 		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
14791 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14792 #ifdef PCIE_INB_DW
14793 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14794 #endif
14795 		return -1;
14796 	}
14797 
14798 	/* Common msg buf hdr */
14799 	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
14800 	ts_req->msg.if_id = 0;
14801 	ts_req->msg.flags =  ctrl_ring->current_phase;
14802 	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
14803 
14804 	ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14805 	ctrl_ring->seqnum++;
14806 
14807 	ts_req->xt_id = xt_id;
14808 	ts_req->seqnum = seqnum;
14809 	/* populate TS req buffer info */
14810 	ts_req->input_data_len = htol16(tlv_len);
14811 	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
14812 	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
14813 	/* copy ioct payload */
14814 	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
14815 	prot->hostts_req_buf_inuse = TRUE;
14816 	memcpy(ts_tlv_buf, tlvs, tlv_len);
14817 
14818 	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
14819 
14820 	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
14821 		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
14822 	}
14823 
14824 	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
14825 		ts_req->msg.request_id, ts_req->input_data_len,
14826 		ts_req->xt_id, ts_req->seqnum));
14827 
14828 	/* upd wrt ptr and raise interrupt */
14829 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
14830 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
14831 
14832 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14833 
14834 #ifdef PCIE_INB_DW
14835 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14836 #endif
14837 	return 0;
14838 } /* dhd_prot_send_host_timestamp */
14839 
14840 bool
14841 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14842 {
14843 	if (set)
14844 		dhd->prot->tx_ts_log_enabled = enable;
14845 
14846 	return dhd->prot->tx_ts_log_enabled;
14847 }
14848 
14849 bool
14850 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14851 {
14852 	if (set)
14853 		dhd->prot->rx_ts_log_enabled = enable;
14854 
14855 	return dhd->prot->rx_ts_log_enabled;
14856 }
14857 
14858 bool
14859 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
14860 {
14861 	if (set)
14862 		dhd->prot->no_retry = enable;
14863 
14864 	return dhd->prot->no_retry;
14865 }
14866 
14867 bool
14868 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
14869 {
14870 	if (set)
14871 		dhd->prot->no_aggr = enable;
14872 
14873 	return dhd->prot->no_aggr;
14874 }
14875 
14876 bool
14877 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
14878 {
14879 	if (set)
14880 		dhd->prot->fixed_rate = enable;
14881 
14882 	return dhd->prot->fixed_rate;
14883 }
14884 #endif /* BCMPCIE */
14885 
14886 void
14887 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
14888 {
14889 	dhd_prot_t *prot = dhd->prot;
14890 
14891 	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
14892 	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
14893 }
14894 
14895 void
14896 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
14897 {
14898 	if (dhd->prot->max_tsbufpost > 0)
14899 		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14900 }
14901 
14902 static void
14903 BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
14904 {
14905 #ifdef DHD_TIMESYNC
14906 	fw_timestamp_event_msg_t *resp;
14907 	uint32 pktid;
14908 	uint16 buflen, seqnum;
14909 	void * pkt;
14910 
14911 	resp = (fw_timestamp_event_msg_t *)buf;
14912 	pktid = ltoh32(resp->msg.request_id);
14913 	buflen = ltoh16(resp->buf_len);
14914 	seqnum = ltoh16(resp->seqnum);
14915 
14916 #if defined(DHD_PKTID_AUDIT_RING)
14917 	DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
14918 		DHD_DUPLICATE_FREE);
14919 #endif /* DHD_PKTID_AUDIT_RING */
14920 
14921 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
14922 		pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
14923 
14924 	if (!dhd->prot->cur_ts_bufs_posted) {
14925 		DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
14926 		return;
14927 	}
14928 
14929 	dhd->prot->cur_ts_bufs_posted--;
14930 
14931 	if (!dhd_timesync_delay_post_bufs(dhd)) {
14932 		if (dhd->prot->max_tsbufpost > 0) {
14933 			dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14934 		}
14935 	}
14936 
14937 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
14938 
14939 	if (!pkt) {
14940 		DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
14941 		return;
14942 	}
14943 
14944 	PKTSETLEN(dhd->osh, pkt, buflen);
14945 	dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
14946 #ifdef DHD_USE_STATIC_CTRLBUF
14947 	PKTFREE_STATIC(dhd->osh, pkt, TRUE);
14948 #else
14949 	PKTFREE(dhd->osh, pkt, TRUE);
14950 #endif /* DHD_USE_STATIC_CTRLBUF */
14951 #else /* DHD_TIMESYNC */
14952 	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
14953 #endif /* DHD_TIMESYNC */
14954 
14955 }
14956 
14957 uint16
14958 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
14959 {
14960 	return dhdp->prot->ioctl_trans_id;
14961 }
14962 
14963 #ifdef SNAPSHOT_UPLOAD
14964 /* send request to take snapshot */
14965 int
14966 dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param)
14967 {
14968 	dhd_prot_t *prot = dhdp->prot;
14969 	dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf;
14970 	snapshot_upload_request_msg_t *snap_req;
14971 	unsigned long flags;
14972 	uint16 alloced = 0;
14973 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14974 
14975 #ifdef PCIE_INB_DW
14976 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14977 		return BCME_ERROR;
14978 #endif /* PCIE_INB_DW */
14979 
14980 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14981 
14982 	/* Request for cbuf space */
14983 	snap_req = (snapshot_upload_request_msg_t *)dhd_prot_alloc_ring_space(dhdp,
14984 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
14985 		&alloced, FALSE);
14986 	if (snap_req == NULL) {
14987 		DHD_ERROR(("couldn't allocate space on msgring to send snapshot request\n"));
14988 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14989 #ifdef PCIE_INB_DW
14990 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14991 #endif
14992 		return BCME_ERROR;
14993 	}
14994 
14995 	/* Common msg buf hdr */
14996 	snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD;
14997 	snap_req->cmn_hdr.if_id = 0;
14998 	snap_req->cmn_hdr.flags =  ctrl_ring->current_phase;
14999 	snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID;
15000 	snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
15001 	ctrl_ring->seqnum++;
15002 
15003 	/* snapshot request msg */
15004 	snap_req->snapshot_buf_len = htol32(dma_buf->len);
15005 	snap_req->snapshot_type = snapshot_type;
15006 	snap_req->snapshot_param = snapshot_param;
15007 	snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa));
15008 	snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa));
15009 
15010 	if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) {
15011 		DHD_ERROR(("snapshot req buffer address unaligned !!!!! \n"));
15012 	}
15013 
15014 	/* clear previous snapshot upload */
15015 	memset(dma_buf->va, 0, dma_buf->len);
15016 	prot->snapshot_upload_len = 0;
15017 	prot->snapshot_type = snapshot_type;
15018 	prot->snapshot_cmpl_pending = TRUE;
15019 
15020 	DHD_CTL(("submitted snapshot request request_id %d, buf_len %d, type %d, param %d\n",
15021 		snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len,
15022 		snap_req->snapshot_type, snap_req->snapshot_param));
15023 
15024 	/* upd wrt ptr and raise interrupt */
15025 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, snap_req,
15026 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
15027 
15028 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
15029 
15030 #ifdef PCIE_INB_DW
15031 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
15032 #endif
15033 
15034 	return BCME_OK;
15035 } /* dhd_prot_send_snapshot_request */
15036 
15037 /* get uploaded snapshot */
15038 int
15039 dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset,
15040 	uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more)
15041 {
15042 	dhd_prot_t *prot = dhdp->prot;
15043 	uint8 *buf = prot->snapshot_upload_buf.va;
15044 	uint8 *buf_end = buf + prot->snapshot_upload_len;
15045 	uint32 copy_size;
15046 
15047 	/* snapshot type must match */
15048 	if (prot->snapshot_type != snapshot_type) {
15049 		return BCME_DATA_NOTFOUND;
15050 	}
15051 
15052 	/* snapshot not completed */
15053 	if (prot->snapshot_cmpl_pending) {
15054 		return BCME_NOTREADY;
15055 	}
15056 
15057 	/* offset within the buffer */
15058 	if (buf + offset >= buf_end) {
15059 		return BCME_BADARG;
15060 	}
15061 
15062 	/* copy dst buf size or remaining size */
15063 	copy_size = MIN(dst_buf_size, buf_end - (buf + offset));
15064 	memcpy(dst_buf, buf + offset, copy_size);
15065 
15066 	/* return size and is_more */
15067 	*dst_size = copy_size;
15068 	*is_more = (offset + copy_size < prot->snapshot_upload_len) ?
15069 		TRUE : FALSE;
15070 	return BCME_OK;
15071 } /* dhd_prot_get_snapshot */
15072 
15073 #endif	/* SNAPSHOT_UPLOAD */
15074 
15075 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
15076 {
15077 	if (!dhd->hscb_enable) {
15078 		if (len) {
15079 			/* prevent "Operation not supported" dhd message */
15080 			*len = 0;
15081 			return BCME_OK;
15082 		}
15083 		return BCME_UNSUPPORTED;
15084 	}
15085 
15086 	if (va) {
15087 		*va = dhd->prot->host_scb_buf.va;
15088 	}
15089 	if (len) {
15090 		*len = dhd->prot->host_scb_buf.len;
15091 	}
15092 
15093 	return BCME_OK;
15094 }
15095 
15096 #ifdef DHD_BUS_MEM_ACCESS
15097 int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
15098 {
15099 	if (!dhd->hscb_enable) {
15100 		return BCME_UNSUPPORTED;
15101 	}
15102 
15103 	if (dhd->prot->host_scb_buf.va == NULL ||
15104 		((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
15105 		return BCME_BADADDR;
15106 	}
15107 
15108 	memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
15109 
15110 	return BCME_OK;
15111 }
15112 #endif /* DHD_BUS_MEM_ACCESS */
15113 
15114 #ifdef DHD_HP2P
15115 uint32
15116 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15117 {
15118 	if (set)
15119 		dhd->pkt_thresh = (uint16)val;
15120 
15121 	val = dhd->pkt_thresh;
15122 
15123 	return val;
15124 }
15125 
15126 uint32
15127 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15128 {
15129 	if (set)
15130 		dhd->time_thresh = (uint16)val;
15131 
15132 	val = dhd->time_thresh;
15133 
15134 	return val;
15135 }
15136 
15137 uint32
15138 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
15139 {
15140 	if (set)
15141 		dhd->pkt_expiry = (uint16)val;
15142 
15143 	val = dhd->pkt_expiry;
15144 
15145 	return val;
15146 }
15147 
15148 uint8
15149 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
15150 {
15151 	uint8 ret = 0;
15152 	if (set) {
15153 		dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
15154 		dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
15155 
15156 		if (enable) {
15157 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
15158 		} else {
15159 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
15160 		}
15161 	}
15162 	ret = dhd->hp2p_infra_enable ? 0x1:0x0;
15163 	ret <<= 4;
15164 	ret |= dhd->hp2p_enable ? 0x1:0x0;
15165 
15166 	return ret;
15167 }
15168 
15169 static void
15170 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
15171 {
15172 	ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
15173 	hp2p_info_t *hp2p_info;
15174 	uint32 dur1;
15175 
15176 	hp2p_info = &dhd->hp2p_info[0];
15177 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
15178 
15179 	if (dur1 > (MAX_RX_HIST_BIN - 1)) {
15180 		dur1 = MAX_RX_HIST_BIN - 1;
15181 		DHD_INFO(("%s: 0x%x 0x%x\n",
15182 			__FUNCTION__, ts->low, ts->high));
15183 	}
15184 
15185 	hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
15186 	return;
15187 }
15188 
15189 static void
15190 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
15191 {
15192 	ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
15193 	uint16 flowid = txstatus->compl_hdr.flow_ring_id;
15194 	uint32 hp2p_flowid, dur1, dur2;
15195 	hp2p_info_t *hp2p_info;
15196 
15197 	hp2p_flowid = dhd->bus->max_submission_rings -
15198 		dhd->bus->max_cmn_rings - flowid + 1;
15199 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15200 	ts = (ts_timestamp_t *)&(txstatus->ts);
15201 
15202 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15203 	if (dur1 > (MAX_TX_HIST_BIN - 1)) {
15204 		dur1 = MAX_TX_HIST_BIN - 1;
15205 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15206 	}
15207 	hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
15208 
15209 	dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15210 	if (dur2 > (MAX_TX_HIST_BIN - 1)) {
15211 		dur2 = MAX_TX_HIST_BIN - 1;
15212 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15213 	}
15214 
15215 	hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
15216 	return;
15217 }
15218 
15219 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
15220 {
15221 	hp2p_info_t *hp2p_info;
15222 	unsigned long flags;
15223 	dhd_pub_t *dhdp;
15224 
15225 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
15226 	hp2p_info = container_of(timer, hp2p_info_t, timer);
15227 	GCC_DIAGNOSTIC_POP();
15228 
15229 	dhdp = hp2p_info->dhd_pub;
15230 	if (!dhdp) {
15231 		goto done;
15232 	}
15233 
15234 	DHD_INFO(("%s: pend_item = %d flowid = %d\n",
15235 		__FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
15236 		hp2p_info->flowid));
15237 
15238 	flags = dhd_os_hp2plock(dhdp);
15239 
15240 	dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
15241 	hp2p_info->hrtimer_init = FALSE;
15242 	hp2p_info->num_timer_limit++;
15243 
15244 	dhd_os_hp2punlock(dhdp, flags);
15245 done:
15246 	return HRTIMER_NORESTART;
15247 }
15248 
15249 static void
15250 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
15251 {
15252 	hp2p_info_t *hp2p_info;
15253 	uint16 hp2p_flowid;
15254 
15255 	hp2p_flowid = dhd->bus->max_submission_rings -
15256 		dhd->bus->max_cmn_rings - flowid + 1;
15257 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15258 
15259 	if (ring->pend_items_count == dhd->pkt_thresh) {
15260 		dhd_prot_txdata_write_flush(dhd, flowid);
15261 
15262 		hp2p_info->hrtimer_init = FALSE;
15263 		hp2p_info->ring = NULL;
15264 		hp2p_info->num_pkt_limit++;
15265 		hrtimer_cancel(&hp2p_info->timer);
15266 
15267 		DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
15268 			"hp2p_flowid = %d pkt_thresh = %d\n",
15269 			__FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
15270 	} else {
15271 		if (hp2p_info->hrtimer_init == FALSE) {
15272 			hp2p_info->hrtimer_init = TRUE;
15273 			hp2p_info->flowid = flowid;
15274 			hp2p_info->dhd_pub = dhd;
15275 			hp2p_info->ring = ring;
15276 			hp2p_info->num_timer_start++;
15277 
15278 			hrtimer_start(&hp2p_info->timer,
15279 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
15280 
15281 			DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
15282 					__FUNCTION__, flowid, hp2p_flowid));
15283 		}
15284 	}
15285 	return;
15286 }
15287 
15288 static void
15289 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
15290 {
15291 	uint64 ts;
15292 
15293 	ts = local_clock();
15294 	do_div(ts, 1000);
15295 
15296 	txdesc->metadata_buf_len = 0;
15297 	txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
15298 	txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
15299 	txdesc->exp_time = dhd->pkt_expiry;
15300 
15301 	DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
15302 		__FUNCTION__, txdesc->metadata_buf_addr.high_addr,
15303 		txdesc->metadata_buf_addr.low_addr,
15304 		txdesc->exp_time));
15305 
15306 	return;
15307 }
15308 #endif /* DHD_HP2P */
15309 
15310 #ifdef DHD_MAP_LOGGING
15311 void
15312 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
15313 {
15314 	dhd_prot_debug_info_print(dhdp);
15315 	OSL_DMA_MAP_DUMP(dhdp->osh);
15316 #ifdef DHD_MAP_PKTID_LOGGING
15317 	dhd_pktid_logging_dump(dhdp);
15318 #endif /* DHD_MAP_PKTID_LOGGING */
15319 #ifdef DHD_FW_COREDUMP
15320 	dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
15321 #ifdef DNGL_AXI_ERROR_LOGGING
15322 	dhdp->memdump_enabled = DUMP_MEMFILE;
15323 	dhd_bus_get_mem_dump(dhdp);
15324 #else
15325 	dhdp->memdump_enabled = DUMP_MEMONLY;
15326 	dhd_bus_mem_dump(dhdp);
15327 #endif /* DNGL_AXI_ERROR_LOGGING */
15328 #endif /* DHD_FW_COREDUMP */
15329 }
15330 #endif /* DHD_MAP_LOGGING */
15331 
15332 #ifdef DHD_FLOW_RING_STATUS_TRACE
15333 void
15334 dhd_dump_bus_flow_ring_status_trace(
15335 	dhd_bus_t *bus, struct bcmstrbuf *strbuf, dhd_frs_trace_t *frs_trace, int dumpsz, char *str)
15336 {
15337 	int i;
15338 	dhd_prot_t *prot = bus->dhd->prot;
15339 	uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE;
15340 	uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE;
15341 
15342 	bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n",
15343 		str, isr_cnt, dpc_cnt);
15344 	bcm_bprintf(strbuf, "%s\t%s\t%s\t%s\t%s\t%s\t",
15345 		"Timestamp ns", "H2DCtrlPost", "D2HCtrlCpl",
15346 		"H2DRxPost", "D2HRxCpl", "D2HTxCpl");
15347 	if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15348 		bcm_bprintf(strbuf, "%s\t%s\t", "H2DRingInfoPost", "D2HRingInfoCpl");
15349 	}
15350 	if (prot->d2hring_edl != NULL) {
15351 		bcm_bprintf(strbuf, "%s", "D2HRingEDL");
15352 	}
15353 	bcm_bprintf(strbuf, "\n");
15354 	for (i = 0; i < dumpsz; i ++) {
15355 		bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t",
15356 				frs_trace[i].timestamp,
15357 				frs_trace[i].h2d_ctrl_post_drd,
15358 				frs_trace[i].h2d_ctrl_post_dwr,
15359 				frs_trace[i].d2h_ctrl_cpln_drd,
15360 				frs_trace[i].d2h_ctrl_cpln_dwr,
15361 				frs_trace[i].h2d_rx_post_drd,
15362 				frs_trace[i].h2d_rx_post_dwr,
15363 				frs_trace[i].d2h_rx_cpln_drd,
15364 				frs_trace[i].d2h_rx_cpln_dwr,
15365 				frs_trace[i].d2h_tx_cpln_drd,
15366 				frs_trace[i].d2h_tx_cpln_dwr);
15367 		if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15368 			bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t",
15369 				frs_trace[i].h2d_info_post_drd,
15370 				frs_trace[i].h2d_info_post_dwr,
15371 				frs_trace[i].d2h_info_cpln_drd,
15372 				frs_trace[i].d2h_info_cpln_dwr);
15373 		}
15374 		if (prot->d2hring_edl != NULL) {
15375 			bcm_bprintf(strbuf, "%6u-%u",
15376 				frs_trace[i].d2h_ring_edl_drd,
15377 				frs_trace[i].d2h_ring_edl_dwr);
15378 
15379 		}
15380 		bcm_bprintf(strbuf, "\n");
15381 	}
15382 	bcm_bprintf(strbuf, "--------------------------\n");
15383 }
15384 
15385 void
15386 dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15387 {
15388 	int dumpsz;
15389 
15390 	dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ?
15391 		bus->frs_isr_count : FRS_TRACE_SIZE;
15392 	if (dumpsz == 0) {
15393 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15394 		return;
15395 	}
15396 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace,
15397 		dumpsz, "ISR FLOW RING TRACE DRD-DWR");
15398 }
15399 
15400 void
15401 dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15402 {
15403 	int dumpsz;
15404 
15405 	dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ?
15406 		bus->frs_dpc_count : FRS_TRACE_SIZE;
15407 	if (dumpsz == 0) {
15408 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15409 		return;
15410 	}
15411 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace,
15412 		dumpsz, "DPC FLOW RING TRACE DRD-DWR");
15413 }
15414 static void
15415 dhd_bus_flow_ring_status_trace(dhd_pub_t *dhd, dhd_frs_trace_t *frs_trace)
15416 {
15417 	dhd_prot_t *prot = dhd->prot;
15418 	msgbuf_ring_t *ring;
15419 
15420 	ring = &prot->h2dring_ctrl_subn;
15421 	frs_trace->h2d_ctrl_post_drd =
15422 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15423 	frs_trace->h2d_ctrl_post_dwr =
15424 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15425 
15426 	ring = &prot->d2hring_ctrl_cpln;
15427 	frs_trace->d2h_ctrl_cpln_drd =
15428 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15429 	frs_trace->d2h_ctrl_cpln_dwr =
15430 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15431 
15432 	ring = &prot->h2dring_rxp_subn;
15433 	frs_trace->h2d_rx_post_drd =
15434 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15435 	frs_trace->h2d_rx_post_dwr =
15436 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15437 
15438 	ring = &prot->d2hring_rx_cpln;
15439 	frs_trace->d2h_rx_cpln_drd =
15440 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15441 	frs_trace->d2h_rx_cpln_dwr =
15442 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15443 
15444 	ring = &prot->d2hring_tx_cpln;
15445 	frs_trace->d2h_tx_cpln_drd =
15446 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15447 	frs_trace->d2h_tx_cpln_dwr =
15448 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15449 
15450 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
15451 		ring = prot->h2dring_info_subn;
15452 		frs_trace->h2d_info_post_drd =
15453 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15454 		frs_trace->h2d_info_post_dwr =
15455 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15456 
15457 		ring = prot->d2hring_info_cpln;
15458 		frs_trace->d2h_info_cpln_drd =
15459 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15460 		frs_trace->d2h_info_cpln_dwr =
15461 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15462 	}
15463 	if (prot->d2hring_edl != NULL) {
15464 		ring = prot->d2hring_edl;
15465 		frs_trace->d2h_ring_edl_drd =
15466 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15467 		frs_trace->d2h_ring_edl_dwr =
15468 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15469 	}
15470 
15471 }
15472 
15473 void
15474 dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd)
15475 {
15476 	uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE;
15477 	dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt];
15478 	uint64 time_ns_prev = frs_isr_trace[cnt].timestamp;
15479 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15480 
15481 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15482 		return;
15483 	}
15484 
15485 	dhd_bus_flow_ring_status_trace(dhd, frs_isr_trace);
15486 
15487 	frs_isr_trace->timestamp = OSL_LOCALTIME_NS();
15488 	dhd->bus->frs_isr_count ++;
15489 }
15490 
15491 void
15492 dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd)
15493 {
15494 	uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE;
15495 	dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt];
15496 	uint64 time_ns_prev = frs_dpc_trace[cnt].timestamp;
15497 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15498 
15499 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15500 		return;
15501 	}
15502 
15503 	dhd_bus_flow_ring_status_trace(dhd, frs_dpc_trace);
15504 
15505 	frs_dpc_trace->timestamp = OSL_LOCALTIME_NS();
15506 	dhd->bus->frs_dpc_count ++;
15507 }
15508 #endif /* DHD_FLOW_RING_STATUS_TRACE */
15509