xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_msgbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 2020, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *
23  * <<Broadcom-WL-IPTag/Open:>>
24  *
25  * $Id$
26  */
27 
28 /** XXX Twiki: [PCIeFullDongleArchitecture] */
29 
30 #include <typedefs.h>
31 #include <osl.h>
32 
33 #include <bcmutils.h>
34 #include <bcmmsgbuf.h>
35 #include <bcmendian.h>
36 #include <bcmstdlib_s.h>
37 
38 #include <dngl_stats.h>
39 #include <dhd.h>
40 #include <dhd_proto.h>
41 
42 #include <dhd_bus.h>
43 
44 #include <dhd_dbg.h>
45 #include <siutils.h>
46 #include <dhd_debug.h>
47 #ifdef EXT_STA
48 #include <wlc_cfg.h>
49 #include <wlc_pub.h>
50 #include <wl_port_if.h>
51 #endif /* EXT_STA */
52 
53 #include <dhd_flowring.h>
54 
55 #include <pcie_core.h>
56 #include <bcmpcie.h>
57 #include <dhd_pcie.h>
58 #ifdef DHD_TIMESYNC
59 #include <dhd_timesync.h>
60 #endif /* DHD_TIMESYNC */
61 #ifdef DHD_PKTTS
62 #include <bcmudp.h>
63 #include <bcmtcp.h>
64 #endif /* DHD_PKTTS */
65 #include <dhd_config.h>
66 
67 #if defined(DHD_LB)
68 #if !defined(LINUX) && !defined(linux) && !defined(OEM_ANDROID)
69 #error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID"
70 #endif /* !LINUX && !OEM_ANDROID */
71 #include <linux/cpu.h>
72 #include <bcm_ring.h>
73 #define DHD_LB_WORKQ_SZ			    (8192)
74 #define DHD_LB_WORKQ_SYNC           (16)
75 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
76 #endif /* DHD_LB */
77 
78 #include <etd.h>
79 #include <hnd_debug.h>
80 #include <bcmtlv.h>
81 #include <hnd_armtrap.h>
82 #include <dnglevent.h>
83 
84 #ifdef DHD_PKT_LOGGING
85 #include <dhd_pktlog.h>
86 #include <dhd_linux_pktdump.h>
87 #endif /* DHD_PKT_LOGGING */
88 #ifdef DHD_EWPR_VER2
89 #include <dhd_bitpack.h>
90 #endif /* DHD_EWPR_VER2 */
91 
92 extern char dhd_version[];
93 extern char fw_version[];
94 
95 /**
96  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
97  * address where a value must be written. Host may also interrupt coalescing
98  * on this soft doorbell.
99  * Use Case: Hosts with network processors, may register with the dongle the
100  * network processor's thread wakeup register and a value corresponding to the
101  * core/thread context. Dongle will issue a write transaction <address,value>
102  * to the PCIE RC which will need to be routed to the mapped register space, by
103  * the host.
104  */
105 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
106 
107 /* Dependency Check */
108 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
109 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
110 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
111 
112 #define RETRIES 2		/* # of retries to retrieve matching ioctl response */
113 
114 #if defined(DHD_HTPUT_TUNABLES)
115 #define DEFAULT_RX_BUFFERS_TO_POST		1024
116 #define RX_BUF_BURST				64 /* Rx buffers for MSDU Data */
117 #define RXBUFPOST_THRESHOLD			64 /* Rxbuf post threshold */
118 #else
119 #define DEFAULT_RX_BUFFERS_TO_POST		256
120 #define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
121 #define RXBUFPOST_THRESHOLD			32 /* Rxbuf post threshold */
122 #endif /* DHD_HTPUT_TUNABLES */
123 
124 /* Read index update Magic sequence */
125 #define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC	0xDDDDDDDDAu
126 #define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xDD000000 | (ring->idx << 16u) | ring->rd)
127 /* Write index update Magic sequence */
128 #define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xFF000000 | (ring->idx << 16u) | ring->wr)
129 #define DHD_AGGR_H2D_DB_MAGIC	0xFFFFFFFAu
130 
131 #define DHD_STOP_QUEUE_THRESHOLD	200
132 #define DHD_START_QUEUE_THRESHOLD	100
133 
134 #define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
135 #define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
136 
137 /* flags for ioctl pending status */
138 #define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
139 #define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
140 
141 #define DHD_IOCTL_REQ_PKTBUFSZ		2048
142 #define MSGBUF_IOCTL_MAX_RQSTLEN	(DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
143 
144 /**
145  * XXX: DMA_ALIGN_LEN use is overloaded:
146  * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
147  * - in ensuring that a buffer's va is 4 Byte aligned
148  * - in rounding up a buffer length to 4 Bytes.
149  */
150 #define DMA_ALIGN_LEN		4
151 
152 #define DMA_D2H_SCRATCH_BUF_LEN	8
153 #define DMA_XFER_LEN_LIMIT	0x400000
154 
155 #ifdef BCM_HOST_BUF
156 #ifndef DMA_HOST_BUFFER_LEN
157 #define DMA_HOST_BUFFER_LEN	0x200000
158 #endif
159 #endif /* BCM_HOST_BUF */
160 
161 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
162 
163 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
164 #define DHD_FLOWRING_MAX_EVENTBUF_POST			32
165 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
166 #define DHD_H2D_INFORING_MAX_BUF_POST			32
167 #ifdef BTLOG
168 #define DHD_H2D_BTLOGRING_MAX_BUF_POST			32
169 #endif	/* BTLOG */
170 #define DHD_MAX_TSBUF_POST			8
171 
172 #define DHD_PROT_FUNCS	43
173 
174 /* Length of buffer in host for bus throughput measurement */
175 #define DHD_BUS_TPUT_BUF_LEN 2048
176 
177 #define TXP_FLUSH_NITEMS
178 
179 /* optimization to write "n" tx items at a time to ring */
180 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
181 
182 #define RING_NAME_MAX_LENGTH		24
183 #define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
184 /* Giving room before ioctl_trans_id rollsover. */
185 #define BUFFER_BEFORE_ROLLOVER 300
186 
187 /* 512K memory + 32K registers */
188 #define SNAPSHOT_UPLOAD_BUF_SIZE	((512 + 32) * 1024)
189 
190 struct msgbuf_ring; /* ring context for common and flow rings */
191 
192 #ifdef DHD_HMAPTEST
193 /* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */
194 #define HMAP_SANDBOX_BUFFER_LEN	(DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */
195 /**
196  * for D11 DMA HMAPTEST thes states are as follows
197  * iovar sets ACTIVE state
198  * next TXPOST / RXPOST sets POSTED state
199  * on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE
200  * This ensures that on an iovar only one buffer is replaced from sandbox area
201  */
202 #define HMAPTEST_D11_TX_INACTIVE 0
203 #define HMAPTEST_D11_TX_ACTIVE 1
204 #define HMAPTEST_D11_TX_POSTED 2
205 
206 #define HMAPTEST_D11_RX_INACTIVE 0
207 #define HMAPTEST_D11_RX_ACTIVE 1
208 #define HMAPTEST_D11_RX_POSTED 2
209 #endif /* DHD_HMAPTEST */
210 
211 #define PCIE_DMA_LOOPBACK	0
212 #define D11_DMA_LOOPBACK	1
213 #define BMC_DMA_LOOPBACK	2
214 
215 /**
216  * PCIE D2H DMA Complete Sync Modes
217  *
218  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
219  * Host system memory. A WAR using one of 3 approaches is needed:
220  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
221  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
222  *    writes in the last word of each work item. Each work item has a seqnum
223  *    number = sequence num % 253.
224  *
225  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
226  *    interrupt, ensuring that D2H data transfer indeed completed.
227  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
228  *    ring contents before the indices.
229  *
230  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
231  * callback (see dhd_prot_d2h_sync_none) may be bound.
232  *
233  * Dongle advertizes host side sync mechanism requirements.
234  */
235 
236 #define PCIE_D2H_SYNC_WAIT_TRIES    (512U)
237 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5U)
238 #define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
239 
240 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
241 #define DHD_MSGBUF_INFO DHD_TRACE
242 #else
243 #define DHD_MSGBUF_INFO DHD_INFO
244 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
245 
246 /**
247  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
248  *
249  * On success: return cmn_msg_hdr_t::msg_type
250  * On failure: return 0 (invalid msg_type)
251  */
252 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
253                                 volatile cmn_msg_hdr_t *msg, int msglen);
254 
255 /**
256  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
257  * For EDL messages.
258  *
259  * On success: return cmn_msg_hdr_t::msg_type
260  * On failure: return 0 (invalid msg_type)
261  */
262 #ifdef EWP_EDL
263 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
264                                 volatile cmn_msg_hdr_t *msg);
265 #endif /* EWP_EDL */
266 
267 /*
268  * +----------------------------------------------------------------------------
269  *
270  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
271  * flowids do not.
272  *
273  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
274  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
275  *
276  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
277  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
278  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
279  *
280  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
281  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
282  *
283  *  D2H Control  Complete RingId = 2
284  *  D2H Transmit Complete RingId = 3
285  *  D2H Receive  Complete RingId = 4
286  *
287  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
288  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
289  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
290  *
291  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
292  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
293  *
294  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
295  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
296  * FlowId values would be in the range [2..133] and the corresponding
297  * RingId values would be in the range [5..136].
298  *
299  * The flowId allocator, may chose to, allocate Flowids:
300  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
301  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
302  *   packet's access category (e.g. 4 uc flowids per station).
303  *
304  * CAUTION:
305  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
306  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
307  * since the FlowId truly represents the index in the H2D DMA indices array.
308  *
309  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
310  * will represent the index in the D2H DMA indices array.
311  *
312  * +----------------------------------------------------------------------------
313  */
314 
315 /* First TxPost Flowring Id */
316 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
317 
318 /* Determine whether a ringid belongs to a TxPost flowring */
319 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
320 	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
321 	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
322 
323 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
324 #define DHD_FLOWID_TO_RINGID(flowid) \
325 	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
326 
327 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
328 #define DHD_RINGID_TO_FLOWID(ringid) \
329 	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
330 
331 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
332  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
333  * any array of H2D rings.
334  */
335 #define DHD_H2D_RING_OFFSET(ringid) \
336 	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
337 
338 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
339  * This may be used for IFRM.
340  */
341 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
342 	((ringid) - BCMPCIE_COMMON_MSGRINGS)
343 
344 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
345  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
346  * any array of D2H rings.
347  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
348  * max_h2d_rings: total number of h2d rings
349  */
350 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
351 	((ringid) > (max_h2d_rings) ? \
352 		((ringid) - max_h2d_rings) : \
353 		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
354 
355 /* Convert a D2H DMA Indices Offset to a RingId */
356 #define DHD_D2H_RINGID(offset) \
357 	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
358 
359 /* XXX: The ringid and flowid and dma indices array index idiosyncracy is error
360  * prone. While a simplification is possible, the backward compatability
361  * requirement (DHD should operate with any PCIE rev version of firmware),
362  * limits what may be accomplished.
363  *
364  * At the minimum, implementation should use macros for any conversions
365  * facilitating introduction of future PCIE FD revs that need more "common" or
366  * other dynamic rings.
367  */
368 
369 /* XXX: Presently there is no need for maintaining both a dmah and a secdmah */
370 #define DHD_DMAH_NULL      ((void*)NULL)
371 
372 /*
373  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
374  * buffer does not occupy the entire cacheline, and another object is placed
375  * following the DMA-able buffer, data corruption may occur if the DMA-able
376  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
377  * is not available.
378  */
379 #if defined(L1_CACHE_BYTES)
380 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
381 #else
382 #define DHD_DMA_PAD        (128)
383 #endif
384 
385 /*
386  * +----------------------------------------------------------------------------
387  * Flowring Pool
388  *
389  * Unlike common rings, which are attached very early on (dhd_prot_attach),
390  * flowrings are dynamically instantiated. Moreover, flowrings may require a
391  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
392  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
393  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
394  *
395  * Each DMA-able buffer may be allocated independently, or may be carved out
396  * of a single large contiguous region that is registered with the protocol
397  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
398  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
399  *
400  * No flowring pool action is performed in dhd_prot_attach(), as the number
401  * of h2d rings is not yet known.
402  *
403  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
404  * determine the number of flowrings required, and a pool of msgbuf_rings are
405  * allocated and a DMA-able buffer (carved or allocated) is attached.
406  * See: dhd_prot_flowrings_pool_attach()
407  *
408  * A flowring msgbuf_ring object may be fetched from this pool during flowring
409  * creation, using the flowid. Likewise, flowrings may be freed back into the
410  * pool on flowring deletion.
411  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
412  *
413  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
414  * are detached (returned back to the carved region or freed), and the pool of
415  * msgbuf_ring and any objects allocated against it are freed.
416  * See: dhd_prot_flowrings_pool_detach()
417  *
418  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
419  * state as-if upon an attach. All DMA-able buffers are retained.
420  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
421  * pool attach will notice that the pool persists and continue to use it. This
422  * will avoid the case of a fragmented DMA-able region.
423  *
424  * +----------------------------------------------------------------------------
425  */
426 
427 /* Conversion of a flowid to a flowring pool index */
428 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
429 	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
430 
431 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
432 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
433 	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
434 	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
435 
436 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
437 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
438 	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
439 		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
440 		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
441 		 (ring)++, (flowid)++)
442 
443 /* Used in loopback tests */
444 typedef struct dhd_dmaxfer {
445 	dhd_dma_buf_t srcmem;
446 	dhd_dma_buf_t dstmem;
447 	uint32        srcdelay;
448 	uint32        destdelay;
449 	uint32        len;
450 	bool          in_progress;
451 	uint64        start_usec;
452 	uint64        time_taken;
453 	uint32        d11_lpbk;
454 	int           status;
455 } dhd_dmaxfer_t;
456 
457 #ifdef DHD_HMAPTEST
458 /* Used in HMAP test */
459 typedef struct dhd_hmaptest {
460 	dhd_dma_buf_t	mem;
461 	uint32		len;
462 	bool	in_progress;
463 	uint32	is_write;
464 	uint32	accesstype;
465 	uint64  start_usec;
466 	uint32	offset;
467 } dhd_hmaptest_t;
468 #endif /* DHD_HMAPTEST */
469 /**
470  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
471  * buffer, the WR and RD indices, ring parameters such as max number of items
472  * an length of each items, and other miscellaneous runtime state.
473  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
474  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
475  * Ring parameters are conveyed to the dongle, which maintains its own peer end
476  * ring state. Depending on whether the DMA Indices feature is supported, the
477  * host will update the WR/RD index in the DMA indices array in host memory or
478  * directly in dongle memory.
479  */
480 typedef struct msgbuf_ring {
481 	bool           inited;
482 	uint16         idx;       /* ring id */
483 	uint16         rd;        /* read index */
484 	uint16         curr_rd;   /* read index for debug */
485 	uint16         wr;        /* write index */
486 	uint16         max_items; /* maximum number of items in ring */
487 	uint16         item_len;  /* length of each item in the ring */
488 	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
489 	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
490 	uint32         seqnum;    /* next expected item's sequence number */
491 #ifdef TXP_FLUSH_NITEMS
492 	void           *start_addr;
493 	/* # of messages on ring not yet announced to dongle */
494 	uint16         pend_items_count;
495 #ifdef AGG_H2D_DB
496 	osl_atomic_t	inflight;
497 #endif /* AGG_H2D_DB */
498 #endif /* TXP_FLUSH_NITEMS */
499 
500 	uint8   ring_type;
501 	uint8   n_completion_ids;
502 	bool    create_pending;
503 	uint16  create_req_id;
504 	uint8   current_phase;
505 	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
506 	uchar		name[RING_NAME_MAX_LENGTH];
507 	uint32		ring_mem_allocated;
508 	void	*ring_lock;
509 } msgbuf_ring_t;
510 
511 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
512 #define DHD_RING_END_VA(ring) \
513 	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
514 	 (((ring)->max_items - 1) * (ring)->item_len))
515 
516 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
517 #define MAX_IOCTL_TRACE_SIZE    50
518 #define MAX_IOCTL_BUF_SIZE		64
519 typedef struct _dhd_ioctl_trace_t {
520 	uint32	cmd;
521 	uint16	transid;
522 	char	ioctl_buf[MAX_IOCTL_BUF_SIZE];
523 	uint64	timestamp;
524 } dhd_ioctl_trace_t;
525 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
526 
527 #ifdef DHD_PKTTS
528 struct pktts_fwtx_v1 {
529 	uint32 ts[PKTTS_MAX_FWTX];
530 };
531 
532 struct pktts_fwtx_v2 {
533 	uint32 ts[PKTTS_MAX_FWTX];
534 	uint32 ut[PKTTS_MAX_UCTX];
535 	uint32 uc[PKTTS_MAX_UCCNT];
536 };
537 
538 static void dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhd, void *pkt,
539 	void *fw_ts, uint16 version);
540 static void dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhd, void *pkt,
541 	uint fwr1, uint fwr2);
542 #endif /* DHD_PKTTS */
543 
544 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
545 /** D2H WLAN Rx Packet Chaining context */
546 typedef struct rxchain_info {
547 	uint		pkt_count;
548 	uint		ifidx;
549 	void		*pkthead;
550 	void		*pkttail;
551 	uint8		*h_da;	/* pointer to da of chain head */
552 	uint8		*h_sa;	/* pointer to sa of chain head */
553 	uint8		h_prio; /* prio of chain head */
554 } rxchain_info_t;
555 #endif /* BCM_ROUTER_DHD && HNDCTF */
556 
557 /* This can be overwritten by module parameter defined in dhd_linux.c
558  * or by dhd iovar h2d_max_txpost.
559  */
560 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
561 #if defined(DHD_HTPUT_TUNABLES)
562 int h2d_htput_max_txpost = H2DRING_HTPUT_TXPOST_MAX_ITEM;
563 #endif /* DHD_HTPUT_TUNABLES */
564 
565 #ifdef AGG_H2D_DB
566 bool agg_h2d_db_enab = TRUE;
567 
568 #define AGG_H2D_DB_TIMEOUT_USEC		(1000u)	/* 1 msec */
569 uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC;
570 
571 #ifndef AGG_H2D_DB_INFLIGHT_THRESH
572 /* Keep inflight threshold same as txp_threshold */
573 #define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
574 #endif /* !AGG_H2D_DB_INFLIGHT_THRESH */
575 
576 uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH;
577 
578 #define DHD_NUM_INFLIGHT_HISTO_ROWS (14u)
579 #define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS)
580 
581 typedef struct _agg_h2d_db_info {
582 	void *dhd;
583 	struct hrtimer timer;
584 	bool init;
585 	uint32 direct_db_cnt;
586 	uint32 timer_db_cnt;
587 	uint64  *inflight_histo;
588 } agg_h2d_db_info_t;
589 #endif /* AGG_H2D_DB */
590 
591 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
592 typedef struct dhd_prot {
593 	osl_t *osh;		/* OSL handle */
594 	uint16 rxbufpost_sz;
595 	uint16 rxbufpost;
596 	uint16 max_rxbufpost;
597 	uint32 tot_rxbufpost;
598 	uint32 tot_rxcpl;
599 	uint16 max_eventbufpost;
600 	uint16 max_ioctlrespbufpost;
601 	uint16 max_tsbufpost;
602 	uint16 max_infobufpost;
603 	uint16 infobufpost;
604 	uint16 cur_event_bufs_posted;
605 	uint16 cur_ioctlresp_bufs_posted;
606 	uint16 cur_ts_bufs_posted;
607 
608 	/* Flow control mechanism based on active transmits pending */
609 	osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
610 	uint16 h2d_max_txpost;
611 #if defined(DHD_HTPUT_TUNABLES)
612 	uint16 h2d_htput_max_txpost;
613 #endif /* DHD_HTPUT_TUNABLES */
614 	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
615 
616 	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
617 	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
618 	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
619 	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
620 	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
621 	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
622 	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
623 	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
624 	msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
625 
626 	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
627 	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
628 	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
629 
630 	uint32		rx_dataoffset;
631 
632 	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
633 	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
634 
635 	/* ioctl related resources */
636 	uint8 ioctl_state;
637 	int16 ioctl_status;		/* status returned from dongle */
638 	uint16 ioctl_resplen;
639 	dhd_ioctl_recieved_status_t ioctl_received;
640 	uint curr_ioctl_cmd;
641 	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
642 	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
643 
644 	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
645 
646 	/* DMA-able arrays for holding WR and RD indices */
647 	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
648 	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
649 	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
650 	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
651 	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
652 	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
653 
654 	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
655 
656 	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
657 #ifdef DHD_DMA_INDICES_SEQNUM
658 	char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */
659 	char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */
660 	uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */
661 	uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */
662 	uint32 host_seqnum;	/* Seqence number for D2H DMA Indices sync */
663 #endif /* DHD_DMA_INDICES_SEQNUM */
664 	uint32			flowring_num;
665 
666 	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
667 #ifdef EWP_EDL
668 	d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
669 #endif /* EWP_EDL */
670 	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
671 	ulong d2h_sync_wait_tot; /* total wait loops */
672 
673 	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
674 
675 	uint16		ioctl_seq_no;
676 	uint16		data_seq_no;  /* XXX this field is obsolete */
677 	uint16		ioctl_trans_id;
678 	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
679 	void		*pktid_rx_map;	/* pktid map for rx path */
680 	void		*pktid_tx_map;	/* pktid map for tx path */
681 	bool		metadata_dbg;
682 	void		*pktid_map_handle_ioctl;
683 #ifdef DHD_MAP_PKTID_LOGGING
684 	void		*pktid_dma_map;	/* pktid map for DMA MAP */
685 	void		*pktid_dma_unmap; /* pktid map for DMA UNMAP */
686 #endif /* DHD_MAP_PKTID_LOGGING */
687 	uint32		pktid_depleted_cnt;	/* pktid depleted count */
688 	/* netif tx queue stop count */
689 	uint8		pktid_txq_stop_cnt;
690 	/* netif tx queue start count */
691 	uint8		pktid_txq_start_cnt;
692 	uint64		ioctl_fillup_time;	/* timestamp for ioctl fillup */
693 	uint64		ioctl_ack_time;		/* timestamp for ioctl ack */
694 	uint64		ioctl_cmplt_time;	/* timestamp for ioctl completion */
695 
696 	/* Applications/utilities can read tx and rx metadata using IOVARs */
697 	uint16		rx_metadata_offset;
698 	uint16		tx_metadata_offset;
699 
700 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
701 	rxchain_info_t	rxchain;	/* chain of rx packets */
702 #endif
703 
704 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
705 	/* Host's soft doorbell configuration */
706 	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
707 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
708 
709 	/* Work Queues to be used by the producer and the consumer, and threshold
710 	 * when the WRITE index must be synced to consumer's workq
711 	 */
712 	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
713 
714 	uint32  host_ipc_version; /* Host sypported IPC rev */
715 	uint32  device_ipc_version; /* FW supported IPC rev */
716 	uint32  active_ipc_version; /* Host advertised IPC rev */
717 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
718 	dhd_ioctl_trace_t	ioctl_trace[MAX_IOCTL_TRACE_SIZE];
719 	uint32				ioctl_trace_count;
720 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
721 	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
722 	bool    hostts_req_buf_inuse;
723 	bool    rx_ts_log_enabled;
724 	bool    tx_ts_log_enabled;
725 #ifdef BTLOG
726 	msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */
727 	msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */
728 	uint16 btlogbufpost;
729 	uint16 max_btlogbufpost;
730 #endif	/* BTLOG */
731 #ifdef DHD_HMAPTEST
732 	uint32 hmaptest_rx_active;
733 	uint32 hmaptest_rx_pktid;
734 	char *hmap_rx_buf_va;
735 	dmaaddr_t hmap_rx_buf_pa;
736 	uint32 hmap_rx_buf_len;
737 
738 	uint32 hmaptest_tx_active;
739 	uint32 hmaptest_tx_pktid;
740 	char *hmap_tx_buf_va;
741 	dmaaddr_t hmap_tx_buf_pa;
742 	uint32	  hmap_tx_buf_len;
743 	dhd_hmaptest_t	hmaptest; /* for hmaptest */
744 	bool hmap_enabled; /* TRUE = hmap is enabled */
745 #endif /* DHD_HMAPTEST */
746 #ifdef SNAPSHOT_UPLOAD
747 	dhd_dma_buf_t snapshot_upload_buf;	/* snapshot upload buffer */
748 	uint32 snapshot_upload_len;		/* snapshot uploaded len */
749 	uint8 snapshot_type;			/* snaphot uploaded type */
750 	bool snapshot_cmpl_pending;		/* snapshot completion pending */
751 #endif	/* SNAPSHOT_UPLOAD */
752 	bool no_retry;
753 	bool no_aggr;
754 	bool fixed_rate;
755 	dhd_dma_buf_t	host_scb_buf; /* scb host offload buffer */
756 #ifdef DHD_HP2P
757 	msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
758 	msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
759 #endif /* DHD_HP2P */
760 	bool no_tx_resource;
761 	uint32 txcpl_db_cnt;
762 #ifdef AGG_H2D_DB
763 	agg_h2d_db_info_t agg_h2d_db_info;
764 #endif /* AGG_H2D_DB */
765 	uint64 tx_h2d_db_cnt;
766 } dhd_prot_t;
767 
768 #ifdef DHD_EWPR_VER2
769 #define HANG_INFO_BASE64_BUFFER_SIZE 640
770 #endif
771 
772 #ifdef DHD_DUMP_PCIE_RINGS
773 static
774 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
775 	const void *user_buf, unsigned long *file_posn);
776 #ifdef EWP_EDL
777 static
778 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
779 	unsigned long *file_posn);
780 #endif /* EWP_EDL */
781 #endif /* DHD_DUMP_PCIE_RINGS */
782 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
783 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
784 /* Convert a dmaaddr_t to a base_addr with htol operations */
785 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
786 
787 /* APIs for managing a DMA-able buffer */
788 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
789 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
790 
791 /* msgbuf ring management */
792 static int dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot);
793 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
794 	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
795 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
796 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
797 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
798 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
799 
800 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
801 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
802 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
803 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
804 
805 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
806 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
807 	uint16 flowid);
808 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
809 
810 /* Producer: Allocate space in a msgbuf ring */
811 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
812 	uint16 nitems, uint16 *alloced, bool exactly_nitems);
813 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
814 	uint16 *alloced, bool exactly_nitems);
815 
816 /* Consumer: Determine the location where the next message may be consumed */
817 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
818 	uint32 *available_len);
819 
820 /* Producer (WR index update) or Consumer (RD index update) indication */
821 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
822 	void *p, uint16 len);
823 
824 #ifdef AGG_H2D_DB
825 static void dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring,
826 		void* p, uint16 len);
827 static void dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db);
828 static void dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid);
829 #endif /* AGG_H2D_DB */
830 static void dhd_prot_ring_doorbell(dhd_pub_t *dhd, uint32 value);
831 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
832 
833 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
834 	dhd_dma_buf_t *dma_buf, uint32 bufsz);
835 
836 /* Set/Get a RD or WR index in the array of indices */
837 /* See also: dhd_prot_dma_indx_init() */
838 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
839 	uint16 ringid);
840 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
841 
842 /* Locate a packet given a pktid */
843 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
844 	bool free_pktid);
845 /* Locate a packet given a PktId and free it. */
846 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
847 
848 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
849 	void *buf, uint len, uint8 action);
850 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
851 	void *buf, uint len, uint8 action);
852 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
853 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
854 	void *buf, int ifidx);
855 
856 /* Post buffers for Rx, control ioctl response and events */
857 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
858 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
859 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
860 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
861 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
862 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
863 
864 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt);
865 
866 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
867 static void dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len);
868 static void dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf);
869 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
870 
871 /* D2H Message handling */
872 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
873 
874 /* D2H Message handlers */
875 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
876 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
877 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
878 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
879 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
880 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
881 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
882 
883 /* Loopback test with dongle */
884 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
885 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
886 	uint destdelay, dhd_dmaxfer_t *dma);
887 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
888 
889 /* Flowring management communication with dongle */
890 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
891 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
892 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
893 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
894 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
895 
896 /* Monitor Mode */
897 #ifdef WL_MONITOR
898 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
899 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
900 #endif /* WL_MONITOR */
901 
902 /* Configure a soft doorbell per D2H ring */
903 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
904 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
905 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
906 #if !defined(BCM_ROUTER_DHD)
907 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
908 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
909 #endif /* !BCM_ROUTER_DHD */
910 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
911 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
912 #ifdef BTLOG
913 static void dhd_prot_process_btlog_complete(dhd_pub_t *dhd, void* buf);
914 static void dhd_prot_detach_btlog_rings(dhd_pub_t *dhd);
915 #endif	/* BTLOG */
916 #ifdef DHD_HP2P
917 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
918 #endif /* DHD_HP2P */
919 #ifdef EWP_EDL
920 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
921 #endif
922 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
923 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
924 
925 #ifdef DHD_TIMESYNC
926 extern void dhd_parse_proto(uint8 *pktdata, dhd_pkt_parse_t *parse);
927 #endif
928 
929 #ifdef DHD_FLOW_RING_STATUS_TRACE
930 void dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
931 void dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
932 #endif /* DHD_FLOW_RING_STATUS_TRACE */
933 
934 #ifdef DHD_TX_PROFILE
935 extern bool dhd_protocol_matches_profile(uint8 *p, int plen, const
936 		dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
937 #endif /* defined(DHD_TX_PROFILE) */
938 
939 #ifdef DHD_HP2P
940 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
941 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
942 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
943 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
944 #endif
945 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
946 
947 /** callback functions for messages generated by the dongle */
948 #define MSG_TYPE_INVALID 0
949 
950 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
951 	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
952 	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
953 	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
954 	NULL,
955 	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
956 	NULL,
957 	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
958 	NULL,
959 	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
960 	NULL,
961 	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
962 	NULL,
963 	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
964 	NULL,
965 	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
966 	NULL,
967 	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
968 	NULL,
969 	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
970 	NULL,
971 	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
972 	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
973 	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
974 	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
975 	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
976 	NULL, /* MSG_TYPE_INFO_BUF_POST */
977 #if defined(BCM_ROUTER_DHD)
978 	NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
979 #else
980 	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
981 #endif /* BCM_ROUTER_DHD */
982 	NULL, /* MSG_TYPE_H2D_RING_CREATE */
983 	NULL, /* MSG_TYPE_D2H_RING_CREATE */
984 #if defined(BCM_ROUTER_DHD)
985 	NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
986 #else
987 	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
988 #endif /* BCM_ROUTER_DHD */
989 	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
990 	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
991 	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
992 	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
993 	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
994 	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
995 	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
996 	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
997 	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
998 	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
999 	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
1000 	NULL,	/* MSG_TYPE_SNAPSHOT_UPLOAD */
1001 	dhd_prot_process_snapshot_complete,	/* MSG_TYPE_SNAPSHOT_CMPLT */
1002 };
1003 
1004 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
1005 /* Related to router CPU mapping per radio core */
1006 #define DHD_RX_CHAINING
1007 #endif /* BCM_ROUTER_DHD && HNDCTF */
1008 
1009 #ifdef DHD_RX_CHAINING
1010 
1011 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
1012 	(dhd_wet_chainable(dhd) && \
1013 	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
1014 	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
1015 	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
1016 	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
1017 	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
1018 	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
1019 	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
1020 	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
1021 
1022 static INLINE void dhd_rxchain_reset(rxchain_info_t *rxchain);
1023 static void dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
1024 static void dhd_rxchain_commit(dhd_pub_t *dhd);
1025 
1026 #define DHD_PKT_CTF_MAX_CHAIN_LEN	64
1027 
1028 #endif /* DHD_RX_CHAINING */
1029 
1030 #ifdef DHD_EFI
1031 #define DHD_LPBKDTDUMP_ON()	(1)
1032 #else
1033 #define DHD_LPBKDTDUMP_ON()	(dhd_msg_level & DHD_LPBKDTDUMP_VAL)
1034 #endif
1035 
1036 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
1037 
1038 #ifdef D2H_MINIDUMP
1039 dhd_dma_buf_t *
dhd_prot_get_minidump_buf(dhd_pub_t * dhd)1040 dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
1041 {
1042 	return &dhd->prot->fw_trap_buf;
1043 }
1044 #endif /* D2H_MINIDUMP */
1045 
1046 uint16
dhd_prot_get_rxbufpost_sz(dhd_pub_t * dhd)1047 dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd)
1048 {
1049 	return dhd->prot->rxbufpost_sz;
1050 }
1051 
1052 uint16
dhd_prot_get_h2d_rx_post_active(dhd_pub_t * dhd)1053 dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd)
1054 {
1055 	dhd_prot_t *prot = dhd->prot;
1056 	msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn;
1057 	uint16 rd, wr;
1058 
1059 	/* Since wr is owned by host in h2d direction, directly read wr */
1060 	wr = flow_ring->wr;
1061 
1062 	if (dhd->dma_d2h_ring_upd_support) {
1063 		rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
1064 	} else {
1065 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1066 	}
1067 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1068 }
1069 
1070 uint16
dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t * dhd)1071 dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd)
1072 {
1073 	dhd_prot_t *prot = dhd->prot;
1074 	msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln;
1075 	uint16 rd, wr;
1076 
1077 	if (dhd->dma_d2h_ring_upd_support) {
1078 		wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
1079 	} else {
1080 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1081 	}
1082 
1083 	/* Since rd is owned by host in d2h direction, directly read rd */
1084 	rd = flow_ring->rd;
1085 
1086 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1087 }
1088 
1089 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)1090 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
1091 {
1092 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
1093 	uint16 rd, wr;
1094 	bool ret;
1095 
1096 	if (dhd->dma_d2h_ring_upd_support) {
1097 		wr = flow_ring->wr;
1098 	} else {
1099 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1100 	}
1101 	if (dhd->dma_h2d_ring_upd_support) {
1102 		rd = flow_ring->rd;
1103 	} else {
1104 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1105 	}
1106 	ret = (wr == rd) ? TRUE : FALSE;
1107 	return ret;
1108 }
1109 
1110 void
dhd_prot_dump_ring_ptrs(void * prot_info)1111 dhd_prot_dump_ring_ptrs(void *prot_info)
1112 {
1113 	msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
1114 	DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
1115 		ring->curr_rd, ring->rd, ring->wr));
1116 }
1117 
1118 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)1119 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
1120 {
1121 	return (uint16)h2d_max_txpost;
1122 }
1123 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)1124 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
1125 {
1126 	h2d_max_txpost = max_txpost;
1127 }
1128 #if defined(DHD_HTPUT_TUNABLES)
1129 uint16
dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t * dhd)1130 dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd)
1131 {
1132 	return (uint16)h2d_htput_max_txpost;
1133 }
1134 void
dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t * dhd,uint16 htput_max_txpost)1135 dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 htput_max_txpost)
1136 {
1137 	h2d_htput_max_txpost = htput_max_txpost;
1138 }
1139 
1140 #endif /* DHD_HTPUT_TUNABLES */
1141 /**
1142  * D2H DMA to completion callback handlers. Based on the mode advertised by the
1143  * dongle through the PCIE shared region, the appropriate callback will be
1144  * registered in the proto layer to be invoked prior to precessing any message
1145  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
1146  * does not require host participation, then a noop callback handler will be
1147  * bound that simply returns the msg_type.
1148  */
1149 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
1150                                        uint32 tries, volatile uchar *msg, int msglen);
1151 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1152                                       volatile cmn_msg_hdr_t *msg, int msglen);
1153 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1154                                        volatile cmn_msg_hdr_t *msg, int msglen);
1155 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1156                                     volatile cmn_msg_hdr_t *msg, int msglen);
1157 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
1158 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1159 	uint16 ring_type, uint32 id);
1160 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1161 	uint8 type, uint32 id);
1162 
1163 /**
1164  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
1165  * not completed, a livelock condition occurs. Host will avert this livelock by
1166  * dropping this message and moving to the next. This dropped message can lead
1167  * to a packet leak, or even something disastrous in the case the dropped
1168  * message happens to be a control response.
1169  * Here we will log this condition. One may choose to reboot the dongle.
1170  *
1171  */
1172 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)1173 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
1174                            volatile uchar *msg, int msglen)
1175 {
1176 	uint32 ring_seqnum = ring->seqnum;
1177 
1178 	if (dhd_query_bus_erros(dhd)) {
1179 		return;
1180 	}
1181 
1182 	DHD_ERROR((
1183 		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
1184 		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
1185 		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
1186 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
1187 		ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
1188 
1189 	dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
1190 
1191 	/* Try to resume if already suspended or suspend in progress */
1192 #ifdef DHD_PCIE_RUNTIMEPM
1193 	dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
1194 #endif /* DHD_PCIE_RUNTIMEPM */
1195 
1196 	/* Skip if still in suspended or suspend in progress */
1197 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
1198 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
1199 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
1200 		goto exit;
1201 	}
1202 
1203 	dhd_bus_dump_console_buffer(dhd->bus);
1204 	dhd_prot_debug_info_print(dhd);
1205 
1206 #ifdef DHD_FW_COREDUMP
1207 	if (dhd->memdump_enabled) {
1208 		/* collect core dump */
1209 		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
1210 		dhd_bus_mem_dump(dhd);
1211 	}
1212 #endif /* DHD_FW_COREDUMP */
1213 
1214 exit:
1215 	dhd_schedule_reset(dhd);
1216 
1217 #ifdef OEM_ANDROID
1218 #ifdef SUPPORT_LINKDOWN_RECOVERY
1219 #ifdef CONFIG_ARCH_MSM
1220 	dhd->bus->no_cfg_restore = 1;
1221 #endif /* CONFIG_ARCH_MSM */
1222 	/* XXX Trigger HANG event for recovery */
1223 	dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
1224 	dhd_os_send_hang_message(dhd);
1225 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1226 #endif /* OEM_ANDROID */
1227 	dhd->livelock_occured = TRUE;
1228 }
1229 
1230 /**
1231  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
1232  * mode. Sequence number is always in the last word of a message.
1233  */
1234 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_seqnum)1235 BCMFASTPATH(dhd_prot_d2h_sync_seqnum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1236                          volatile cmn_msg_hdr_t *msg, int msglen)
1237 {
1238 	uint32 tries;
1239 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1240 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1241 	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
1242 	dhd_prot_t *prot = dhd->prot;
1243 	uint32 msg_seqnum;
1244 	uint32 step = 0;
1245 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1246 	uint32 total_tries = 0;
1247 
1248 	ASSERT(msglen == ring->item_len);
1249 
1250 	BCM_REFERENCE(delay);
1251 	/*
1252 	 * For retries we have to make some sort of stepper algorithm.
1253 	 * We see that every time when the Dongle comes out of the D3
1254 	 * Cold state, the first D2H mem2mem DMA takes more time to
1255 	 * complete, leading to livelock issues.
1256 	 *
1257 	 * Case 1 - Apart from Host CPU some other bus master is
1258 	 * accessing the DDR port, probably page close to the ring
1259 	 * so, PCIE does not get a change to update the memory.
1260 	 * Solution - Increase the number of tries.
1261 	 *
1262 	 * Case 2 - The 50usec delay given by the Host CPU is not
1263 	 * sufficient for the PCIe RC to start its work.
1264 	 * In this case the breathing time of 50usec given by
1265 	 * the Host CPU is not sufficient.
1266 	 * Solution: Increase the delay in a stepper fashion.
1267 	 * This is done to ensure that there are no
1268 	 * unwanted extra delay introdcued in normal conditions.
1269 	 */
1270 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1271 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1272 			msg_seqnum = *marker;
1273 			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
1274 				ring->seqnum++; /* next expected sequence number */
1275 				/* Check for LIVELOCK induce flag, which is set by firing
1276 				 * dhd iovar to induce LIVELOCK error. If flag is set,
1277 				 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1278 				 */
1279 				if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1280 					goto dma_completed;
1281 				}
1282 			}
1283 
1284 			total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
1285 
1286 			if (total_tries > prot->d2h_sync_wait_max)
1287 				prot->d2h_sync_wait_max = total_tries;
1288 
1289 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1290 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1291 			OSL_DELAY(delay * step); /* Add stepper delay */
1292 
1293 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1294 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1295 
1296 	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
1297 		(volatile uchar *) msg, msglen);
1298 
1299 	ring->seqnum++; /* skip this message ... leak of a pktid */
1300 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1301 
1302 dma_completed:
1303 
1304 	prot->d2h_sync_wait_tot += tries;
1305 	return msg->msg_type;
1306 }
1307 
1308 /**
1309  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1310  * mode. The xorcsum is placed in the last word of a message. Dongle will also
1311  * place a seqnum in the epoch field of the cmn_msg_hdr.
1312  */
1313 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)1314 BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1315                           volatile cmn_msg_hdr_t *msg, int msglen)
1316 {
1317 	uint32 tries;
1318 	uint32 prot_checksum = 0; /* computed checksum */
1319 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1320 	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1321 	dhd_prot_t *prot = dhd->prot;
1322 	uint32 step = 0;
1323 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1324 	uint32 total_tries = 0;
1325 
1326 	ASSERT(msglen == ring->item_len);
1327 
1328 	BCM_REFERENCE(delay);
1329 	/*
1330 	 * For retries we have to make some sort of stepper algorithm.
1331 	 * We see that every time when the Dongle comes out of the D3
1332 	 * Cold state, the first D2H mem2mem DMA takes more time to
1333 	 * complete, leading to livelock issues.
1334 	 *
1335 	 * Case 1 - Apart from Host CPU some other bus master is
1336 	 * accessing the DDR port, probably page close to the ring
1337 	 * so, PCIE does not get a change to update the memory.
1338 	 * Solution - Increase the number of tries.
1339 	 *
1340 	 * Case 2 - The 50usec delay given by the Host CPU is not
1341 	 * sufficient for the PCIe RC to start its work.
1342 	 * In this case the breathing time of 50usec given by
1343 	 * the Host CPU is not sufficient.
1344 	 * Solution: Increase the delay in a stepper fashion.
1345 	 * This is done to ensure that there are no
1346 	 * unwanted extra delay introdcued in normal conditions.
1347 	 */
1348 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1349 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1350 			/* First verify if the seqnumber has been update,
1351 			 * if yes, then only check xorcsum.
1352 			 * Once seqnum and xorcsum is proper that means
1353 			 * complete message has arrived.
1354 			 */
1355 			if (msg->epoch == ring_seqnum) {
1356 				prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1357 					num_words);
1358 				if (prot_checksum == 0U) { /* checksum is OK */
1359 					ring->seqnum++; /* next expected sequence number */
1360 					/* Check for LIVELOCK induce flag, which is set by firing
1361 					 * dhd iovar to induce LIVELOCK error. If flag is set,
1362 					 * MSG_TYPE_INVALID is returned, which results in to
1363 					 * LIVELOCK error.
1364 					 */
1365 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1366 						goto dma_completed;
1367 					}
1368 				}
1369 			}
1370 
1371 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1372 
1373 			if (total_tries > prot->d2h_sync_wait_max)
1374 				prot->d2h_sync_wait_max = total_tries;
1375 
1376 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1377 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1378 			OSL_DELAY(delay * step); /* Add stepper delay */
1379 
1380 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1381 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1382 
1383 	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1384 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1385 		(volatile uchar *) msg, msglen);
1386 
1387 	ring->seqnum++; /* skip this message ... leak of a pktid */
1388 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1389 
1390 dma_completed:
1391 
1392 	prot->d2h_sync_wait_tot += tries;
1393 	return msg->msg_type;
1394 }
1395 
1396 /**
1397  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1398  * need to try to sync. This noop sync handler will be bound when the dongle
1399  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1400  */
1401 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_none)1402 BCMFASTPATH(dhd_prot_d2h_sync_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1403                        volatile cmn_msg_hdr_t *msg, int msglen)
1404 {
1405 	/* Check for LIVELOCK induce flag, which is set by firing
1406 	* dhd iovar to induce LIVELOCK error. If flag is set,
1407 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1408 	*/
1409 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1410 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1411 		return MSG_TYPE_INVALID;
1412 	} else {
1413 		return msg->msg_type;
1414 	}
1415 }
1416 
1417 #ifdef EWP_EDL
1418 /**
1419  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1420  * header values at both the beginning and end of the payload.
1421  * The cmn_msg_hdr_t is placed at the start and end of the payload
1422  * in each work item in the EDL ring.
1423  * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1424  * and the length of the payload in the 'request_id' field.
1425  * Structure of each work item in the EDL ring:
1426  * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1427  * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1428  * too costly on the dongle side and might take up too many ARM cycles,
1429  * hence the xorcsum sync method is not being used for EDL ring.
1430  */
1431 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1432 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1433                           volatile cmn_msg_hdr_t *msg)
1434 {
1435 	uint32 tries;
1436 	int msglen = 0, len = 0;
1437 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1438 	dhd_prot_t *prot = dhd->prot;
1439 	uint32 step = 0;
1440 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1441 	uint32 total_tries = 0;
1442 	volatile cmn_msg_hdr_t *trailer = NULL;
1443 	volatile uint8 *buf = NULL;
1444 	bool valid_msg = FALSE;
1445 
1446 	BCM_REFERENCE(delay);
1447 	/*
1448 	 * For retries we have to make some sort of stepper algorithm.
1449 	 * We see that every time when the Dongle comes out of the D3
1450 	 * Cold state, the first D2H mem2mem DMA takes more time to
1451 	 * complete, leading to livelock issues.
1452 	 *
1453 	 * Case 1 - Apart from Host CPU some other bus master is
1454 	 * accessing the DDR port, probably page close to the ring
1455 	 * so, PCIE does not get a change to update the memory.
1456 	 * Solution - Increase the number of tries.
1457 	 *
1458 	 * Case 2 - The 50usec delay given by the Host CPU is not
1459 	 * sufficient for the PCIe RC to start its work.
1460 	 * In this case the breathing time of 50usec given by
1461 	 * the Host CPU is not sufficient.
1462 	 * Solution: Increase the delay in a stepper fashion.
1463 	 * This is done to ensure that there are no
1464 	 * unwanted extra delay introdcued in normal conditions.
1465 	 */
1466 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1467 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1468 			/* First verify if the seqnumber has been updated,
1469 			 * if yes, only then validate the header and trailer.
1470 			 * Once seqnum, header and trailer have been validated, it means
1471 			 * that the complete message has arrived.
1472 			 */
1473 			valid_msg = FALSE;
1474 			if (msg->epoch == ring_seqnum &&
1475 				msg->msg_type == MSG_TYPE_INFO_PYLD &&
1476 				msg->request_id > 0 &&
1477 				msg->request_id <= ring->item_len) {
1478 				/* proceed to check trailer only if header is valid */
1479 				buf = (volatile uint8 *)msg;
1480 				msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1481 				buf += msglen;
1482 				if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1483 					trailer = (volatile cmn_msg_hdr_t *)buf;
1484 					valid_msg = (trailer->epoch == ring_seqnum) &&
1485 						(trailer->msg_type == msg->msg_type) &&
1486 						(trailer->request_id == msg->request_id);
1487 					if (!valid_msg) {
1488 						DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1489 						" expected, seqnum=%u; reqid=%u. Retrying... \n",
1490 						__FUNCTION__, trailer->epoch, trailer->request_id,
1491 						msg->epoch, msg->request_id));
1492 					}
1493 				} else {
1494 					DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1495 						__FUNCTION__, msg->request_id));
1496 				}
1497 
1498 				if (valid_msg) {
1499 					/* data is OK */
1500 					ring->seqnum++; /* next expected sequence number */
1501 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1502 						goto dma_completed;
1503 					}
1504 				}
1505 			} else {
1506 				DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1507 					" msg_type=0x%x, request_id=%u."
1508 					" Retrying...\n",
1509 					__FUNCTION__, ring_seqnum, msg->epoch,
1510 					msg->msg_type, msg->request_id));
1511 			}
1512 
1513 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1514 
1515 			if (total_tries > prot->d2h_sync_wait_max)
1516 				prot->d2h_sync_wait_max = total_tries;
1517 
1518 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1519 #if !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3))
1520 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1521 			OSL_DELAY(delay * step); /* Add stepper delay */
1522 #endif /* !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) */
1523 
1524 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1525 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1526 
1527 	DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1528 	DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1529 		" msgtype=0x%x; expected-msgtype=0x%x"
1530 		" length=%u; expected-max-length=%u", __FUNCTION__,
1531 		msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1532 		msg->request_id, ring->item_len));
1533 	dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1534 	if (trailer && msglen > 0 &&
1535 			(msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1536 		DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1537 			" msgtype=0x%x; expected-msgtype=0x%x"
1538 			" length=%u; expected-length=%u", __FUNCTION__,
1539 			trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1540 			trailer->request_id, msg->request_id));
1541 		dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1542 			sizeof(*trailer), DHD_ERROR_VAL);
1543 	}
1544 
1545 	if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1546 		len = msglen + sizeof(cmn_msg_hdr_t);
1547 	else
1548 		len = ring->item_len;
1549 
1550 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1551 		(volatile uchar *) msg, len);
1552 
1553 	ring->seqnum++; /* skip this message */
1554 	return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1555 
1556 dma_completed:
1557 	DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1558 		msg->epoch, msg->request_id));
1559 
1560 	prot->d2h_sync_wait_tot += tries;
1561 	return BCME_OK;
1562 }
1563 
1564 /**
1565  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1566  * need to try to sync. This noop sync handler will be bound when the dongle
1567  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1568  */
BCMFASTPATH(dhd_prot_d2h_sync_edl_none)1569 static int BCMFASTPATH
1570 (dhd_prot_d2h_sync_edl_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1571                        volatile cmn_msg_hdr_t *msg)
1572 {
1573 	/* Check for LIVELOCK induce flag, which is set by firing
1574 	* dhd iovar to induce LIVELOCK error. If flag is set,
1575 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1576 	*/
1577 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1578 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1579 		return BCME_ERROR;
1580 	} else {
1581 		if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1582 			return BCME_OK;
1583 		else
1584 			return msg->msg_type;
1585 	}
1586 }
1587 #endif /* EWP_EDL */
1588 
1589 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1590 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1591 {
1592 	/* To synchronize with the previous memory operations call wmb() */
1593 	OSL_SMP_WMB();
1594 	dhd->prot->ioctl_received = reason;
1595 	/* Call another wmb() to make sure before waking up the other event value gets updated */
1596 	OSL_SMP_WMB();
1597 	dhd_os_ioctl_resp_wake(dhd);
1598 }
1599 
1600 /**
1601  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1602  * dongle advertizes.
1603  */
1604 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1605 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1606 {
1607 	dhd_prot_t *prot = dhd->prot;
1608 	prot->d2h_sync_wait_max = 0UL;
1609 	prot->d2h_sync_wait_tot = 0UL;
1610 
1611 	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1612 	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1613 
1614 	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1615 	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1616 
1617 	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1618 	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1619 
1620 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1621 		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1622 #ifdef EWP_EDL
1623 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1624 #endif /* EWP_EDL */
1625 		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1626 	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1627 		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1628 #ifdef EWP_EDL
1629 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1630 #endif /* EWP_EDL */
1631 		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1632 	} else {
1633 		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1634 #ifdef EWP_EDL
1635 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1636 #endif /* EWP_EDL */
1637 		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1638 	}
1639 }
1640 
1641 /**
1642  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1643  */
1644 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1645 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1646 {
1647 	dhd_prot_t *prot = dhd->prot;
1648 	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1649 
1650 	prot->h2dring_rxp_subn.current_phase = 0;
1651 
1652 	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1653 	prot->h2dring_ctrl_subn.current_phase = 0;
1654 }
1655 
1656 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1657 
1658 /*
1659  * +---------------------------------------------------------------------------+
1660  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1661  * virtual and physical address, the buffer lenght and the DMA handler.
1662  * A secdma handler is also included in the dhd_dma_buf object.
1663  * +---------------------------------------------------------------------------+
1664  */
1665 
1666 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1667 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1668 {
1669 	base_addr->low_addr = htol32(PHYSADDRLO(pa));
1670 	base_addr->high_addr = htol32(PHYSADDRHI(pa));
1671 }
1672 
1673 /**
1674  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1675  */
1676 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1677 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1678 {
1679 	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1680 	ASSERT(dma_buf);
1681 	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1682 	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1683 	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1684 	ASSERT(dma_buf->len != 0);
1685 
1686 	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1687 	end = (pa_lowaddr + dma_buf->len); /* end address */
1688 
1689 	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1690 		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1691 			__FUNCTION__, pa_lowaddr, dma_buf->len));
1692 		return BCME_ERROR;
1693 	}
1694 
1695 	return BCME_OK;
1696 }
1697 
1698 /**
1699  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1700  * returns BCME_OK=0 on success
1701  * returns non-zero negative error value on failure.
1702  */
1703 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1704 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1705 {
1706 	uint32 dma_pad = 0;
1707 	osl_t *osh = dhd->osh;
1708 	uint16 dma_align = DMA_ALIGN_LEN;
1709 	uint32 rem = 0;
1710 
1711 	ASSERT(dma_buf != NULL);
1712 	ASSERT(dma_buf->va == NULL);
1713 	ASSERT(dma_buf->len == 0);
1714 
1715 	/* Pad the buffer length to align to cacheline size. */
1716 	rem = (buf_len % DHD_DMA_PAD);
1717 	dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1718 
1719 	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1720 		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1721 
1722 	if (dma_buf->va == NULL) {
1723 		DHD_ERROR(("%s: buf_len %d, no memory available\n",
1724 			__FUNCTION__, buf_len));
1725 		return BCME_NOMEM;
1726 	}
1727 
1728 	dma_buf->len = buf_len; /* not including padded len */
1729 
1730 	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1731 		dhd_dma_buf_free(dhd, dma_buf);
1732 		return BCME_ERROR;
1733 	}
1734 
1735 	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1736 
1737 	return BCME_OK;
1738 }
1739 
1740 /**
1741  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1742  */
1743 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1744 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1745 {
1746 	if ((dma_buf == NULL) || (dma_buf->va == NULL))
1747 		return;
1748 
1749 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1750 
1751 	/* Zero out the entire buffer and cache flush */
1752 	memset((void*)dma_buf->va, 0, dma_buf->len);
1753 	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1754 }
1755 
1756 void
dhd_local_buf_reset(char * buf,uint32 len)1757 dhd_local_buf_reset(char *buf, uint32 len)
1758 {
1759 	/* Zero out the entire buffer and cache flush */
1760 	memset((void*)buf, 0, len);
1761 	OSL_CACHE_FLUSH((void *)buf, len);
1762 }
1763 
1764 /**
1765  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1766  * dhd_dma_buf_alloc().
1767  */
1768 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1769 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1770 {
1771 	osl_t *osh = dhd->osh;
1772 
1773 	ASSERT(dma_buf);
1774 
1775 	if (dma_buf->va == NULL)
1776 		return; /* Allow for free invocation, when alloc failed */
1777 
1778 	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1779 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1780 
1781 	/* dma buffer may have been padded at allocation */
1782 	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1783 		dma_buf->pa, dma_buf->dmah);
1784 
1785 	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1786 }
1787 
1788 /**
1789  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1790  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1791  */
1792 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1793 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1794 	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1795 {
1796 	dhd_dma_buf_t *dma_buf;
1797 	ASSERT(dhd_dma_buf);
1798 	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1799 	dma_buf->va = va;
1800 	dma_buf->len = len;
1801 	dma_buf->pa = pa;
1802 	dma_buf->dmah = dmah;
1803 	dma_buf->secdma = secdma;
1804 
1805 	/* Audit user defined configuration */
1806 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1807 }
1808 
1809 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1810 
1811 /*
1812  * +---------------------------------------------------------------------------+
1813  * DHD_MAP_PKTID_LOGGING
1814  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1815  * debugging in customer platform.
1816  * +---------------------------------------------------------------------------+
1817  */
1818 
1819 #ifdef DHD_MAP_PKTID_LOGGING
1820 typedef struct dhd_pktid_log_item {
1821 	dmaaddr_t pa;		/* DMA bus address */
1822 	uint64 ts_nsec;		/* Timestamp: nsec */
1823 	uint32 size;		/* DMA map/unmap size */
1824 	uint32 pktid;		/* Packet ID */
1825 	uint8 pkttype;		/* Packet Type */
1826 	uint8 rsvd[7];		/* Reserved for future use */
1827 } dhd_pktid_log_item_t;
1828 
1829 typedef struct dhd_pktid_log {
1830 	uint32 items;		/* number of total items */
1831 	uint32 index;		/* index of pktid_log_item */
1832 	dhd_pktid_log_item_t map[0];	/* metadata storage */
1833 } dhd_pktid_log_t;
1834 
1835 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1836 
1837 #define	MAX_PKTID_LOG				(2048)
1838 #define DHD_PKTID_LOG_ITEM_SZ			(sizeof(dhd_pktid_log_item_t))
1839 #define DHD_PKTID_LOG_SZ(items)			(uint32)((sizeof(dhd_pktid_log_t)) + \
1840 					((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1841 
1842 #define DHD_PKTID_LOG_INIT(dhd, hdl)		dhd_pktid_logging_init((dhd), (hdl))
1843 #define DHD_PKTID_LOG_FINI(dhd, hdl)		dhd_pktid_logging_fini((dhd), (hdl))
1844 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)	\
1845 	dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1846 #define DHD_PKTID_LOG_DUMP(dhd)			dhd_pktid_logging_dump((dhd))
1847 
1848 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1849 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1850 {
1851 	dhd_pktid_log_t *log;
1852 	uint32 log_size;
1853 
1854 	log_size = DHD_PKTID_LOG_SZ(num_items);
1855 	log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1856 	if (log == NULL) {
1857 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
1858 			__FUNCTION__, log_size));
1859 		return (dhd_pktid_log_handle_t *)NULL;
1860 	}
1861 
1862 	log->items = num_items;
1863 	log->index = 0;
1864 
1865 	return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1866 }
1867 
1868 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1869 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1870 {
1871 	dhd_pktid_log_t *log;
1872 	uint32 log_size;
1873 
1874 	if (handle == NULL) {
1875 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1876 		return;
1877 	}
1878 
1879 	log = (dhd_pktid_log_t *)handle;
1880 	log_size = DHD_PKTID_LOG_SZ(log->items);
1881 	MFREE(dhd->osh, handle, log_size);
1882 }
1883 
1884 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1885 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1886 	uint32 pktid, uint32 len, uint8 pkttype)
1887 {
1888 	dhd_pktid_log_t *log;
1889 	uint32 idx;
1890 
1891 	if (handle == NULL) {
1892 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1893 		return;
1894 	}
1895 
1896 	log = (dhd_pktid_log_t *)handle;
1897 	idx = log->index;
1898 	log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1899 	log->map[idx].pa = pa;
1900 	log->map[idx].pktid = pktid;
1901 	log->map[idx].size = len;
1902 	log->map[idx].pkttype = pkttype;
1903 	log->index = (idx + 1) % (log->items);	/* update index */
1904 }
1905 
1906 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1907 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1908 {
1909 	dhd_prot_t *prot = dhd->prot;
1910 	dhd_pktid_log_t *map_log, *unmap_log;
1911 	uint64 ts_sec, ts_usec;
1912 
1913 	if (prot == NULL) {
1914 		DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1915 		return;
1916 	}
1917 
1918 	map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1919 	unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1920 	OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1921 	if (map_log && unmap_log) {
1922 		DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1923 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
1924 			map_log->index, unmap_log->index,
1925 			(unsigned long)ts_sec, (unsigned long)ts_usec));
1926 		DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1927 			"pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1928 			(uint64)__virt_to_phys((ulong)(map_log->map)),
1929 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1930 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
1931 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1932 	}
1933 }
1934 #endif /* DHD_MAP_PKTID_LOGGING */
1935 
1936 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1937 
1938 /*
1939  * +---------------------------------------------------------------------------+
1940  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1941  * Main purpose is to save memory on the dongle, has other purposes as well.
1942  * The packet id map, also includes storage for some packet parameters that
1943  * may be saved. A native packet pointer along with the parameters may be saved
1944  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1945  * and the metadata may be retrieved using the previously allocated packet id.
1946  * +---------------------------------------------------------------------------+
1947  */
1948 #define DHD_PCIE_PKTID
1949 
1950 /* On Router, the pktptr serves as a pktid. */
1951 #if defined(BCM_ROUTER_DHD) && !defined(BCA_HNDROUTER)
1952 #undef DHD_PCIE_PKTID		/* Comment this undef, to reenable PKTIDMAP */
1953 #endif /* BCM_ROUTER_DHD && !BCA_HNDROUTER */
1954 
1955 #if defined(BCM_ROUTER_DHD) && defined(DHD_PCIE_PKTID)
1956 #undef MAX_TX_PKTID
1957 #define MAX_TX_PKTID     ((36 * 1024) - 1) /* Extend for 64 clients support. */
1958 #endif /* BCM_ROUTER_DHD && DHD_PCIE_PKTID */
1959 
1960 /* XXX: PROP_TXSTATUS: WLFS defines a private pkttag layout.
1961  * Hence cannot store the dma parameters in the pkttag and the pktidmap locker
1962  * is required.
1963  */
1964 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1965 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1966 #endif
1967 
1968 /* Enum for marking the buffer color based on usage */
1969 typedef enum dhd_pkttype {
1970 	PKTTYPE_DATA_TX = 0,
1971 	PKTTYPE_DATA_RX,
1972 	PKTTYPE_IOCTL_RX,
1973 	PKTTYPE_EVENT_RX,
1974 	PKTTYPE_INFO_RX,
1975 	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1976 	PKTTYPE_NO_CHECK,
1977 	PKTTYPE_TSBUF_RX
1978 } dhd_pkttype_t;
1979 
1980 #define DHD_PKTID_MIN_AVAIL_COUNT		512U
1981 #define DHD_PKTID_DEPLETED_MAX_COUNT		(DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1982 #define DHD_PKTID_INVALID			(0U)
1983 #define DHD_IOCTL_REQ_PKTID			(0xFFFE)
1984 #define DHD_FAKE_PKTID				(0xFACE)
1985 #define DHD_H2D_DBGRING_REQ_PKTID		0xFFFD
1986 #define DHD_D2H_DBGRING_REQ_PKTID		0xFFFC
1987 #define DHD_H2D_HOSTTS_REQ_PKTID		0xFFFB
1988 #define DHD_H2D_BTLOGRING_REQ_PKTID		0xFFFA
1989 #define DHD_D2H_BTLOGRING_REQ_PKTID		0xFFF9
1990 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID	0xFFF8
1991 #ifdef DHD_HP2P
1992 #define DHD_D2H_HPPRING_TXREQ_PKTID		0xFFF7
1993 #define DHD_D2H_HPPRING_RXREQ_PKTID		0xFFF6
1994 #endif /* DHD_HP2P */
1995 
1996 #define IS_FLOWRING(ring) \
1997 	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1998 
1999 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
2000 
2001 /* Construct a packet id mapping table, returning an opaque map handle */
2002 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
2003 
2004 /* Destroy a packet id mapping table, freeing all packets active in the table */
2005 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
2006 
2007 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
2008 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
2009 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
2010 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
2011 
2012 #if defined(DHD_PCIE_PKTID)
2013 #if defined(NDIS) || defined(DHD_EFI)
2014 /* XXX: for NDIS, using consistent memory instead of buffer from PKTGET for
2015  * up to 8K ioctl response
2016  */
2017 #define IOCTLRESP_USE_CONSTMEM
2018 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2019 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2020 #endif /* NDIS || DHD_EFI */
2021 
2022 /* Determine number of pktids that are available */
2023 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
2024 
2025 /* Allocate a unique pktid against which a pkt and some metadata is saved */
2026 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2027 	void *pkt, dhd_pkttype_t pkttype);
2028 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2029 	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
2030 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2031 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2032 	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
2033 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2034 
2035 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
2036 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2037 	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
2038 	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
2039 
2040 #ifdef DHD_PKTTS
2041 /* Store the Metadata buffer to the locker */
2042 static INLINE void
2043 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2044 	dmaaddr_t mpkt_pa,
2045 	uint16	mpkt_len,
2046 	void *dmah,
2047 	uint32 nkey);
2048 
2049 /* Return the Metadata buffer from the locker */
2050 static void * dhd_pktid_map_retreive_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2051 	dmaaddr_t *pmpkt_pa, uint32 *pmpkt_len, void **pdmah, uint32 nkey);
2052 #endif /* DHD_PKTTS */
2053 
2054 /*
2055  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
2056  *
2057  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
2058  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
2059  *
2060  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
2061  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
2062  */
2063 #if defined(DHD_PKTID_AUDIT_ENABLED)
2064 #define USE_DHD_PKTID_AUDIT_LOCK 1
2065 /* Audit the pktidmap allocator */
2066 /* #define DHD_PKTID_AUDIT_MAP */
2067 
2068 /* Audit the pktid during production/consumption of workitems */
2069 #define DHD_PKTID_AUDIT_RING
2070 
2071 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
2072 #error "May only enabled audit of MAP or RING, at a time."
2073 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
2074 
2075 #define DHD_DUPLICATE_ALLOC     1
2076 #define DHD_DUPLICATE_FREE      2
2077 #define DHD_TEST_IS_ALLOC       3
2078 #define DHD_TEST_IS_FREE        4
2079 
2080 typedef enum dhd_pktid_map_type {
2081 	DHD_PKTID_MAP_TYPE_CTRL = 1,
2082 	DHD_PKTID_MAP_TYPE_TX,
2083 	DHD_PKTID_MAP_TYPE_RX,
2084 	DHD_PKTID_MAP_TYPE_UNKNOWN
2085 } dhd_pktid_map_type_t;
2086 
2087 #ifdef USE_DHD_PKTID_AUDIT_LOCK
2088 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          osl_spin_lock_init(osh)
2089 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  osl_spin_lock_deinit(osh, lock)
2090 #define DHD_PKTID_AUDIT_LOCK(lock)              osl_spin_lock(lock)
2091 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     osl_spin_unlock(lock, flags)
2092 #else
2093 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
2094 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
2095 #define DHD_PKTID_AUDIT_LOCK(lock)              0
2096 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
2097 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
2098 
2099 #endif /* DHD_PKTID_AUDIT_ENABLED */
2100 
2101 #define USE_DHD_PKTID_LOCK   1
2102 
2103 #ifdef USE_DHD_PKTID_LOCK
2104 #define DHD_PKTID_LOCK_INIT(osh)                osl_spin_lock_init(osh)
2105 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        osl_spin_lock_deinit(osh, lock)
2106 #define DHD_PKTID_LOCK(lock, flags)             (flags) = osl_spin_lock(lock)
2107 #define DHD_PKTID_UNLOCK(lock, flags)           osl_spin_unlock(lock, flags)
2108 #else
2109 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
2110 #define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
2111 	do { \
2112 		BCM_REFERENCE(osh); \
2113 		BCM_REFERENCE(lock); \
2114 	} while (0)
2115 #define DHD_PKTID_LOCK(lock)                    0
2116 #define DHD_PKTID_UNLOCK(lock, flags)           \
2117 	do { \
2118 		BCM_REFERENCE(lock); \
2119 		BCM_REFERENCE(flags); \
2120 	} while (0)
2121 #endif /* !USE_DHD_PKTID_LOCK */
2122 
2123 typedef enum dhd_locker_state {
2124 	LOCKER_IS_FREE,
2125 	LOCKER_IS_BUSY,
2126 	LOCKER_IS_RSVD
2127 } dhd_locker_state_t;
2128 
2129 /* Packet metadata saved in packet id mapper */
2130 
2131 typedef struct dhd_pktid_item {
2132 	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
2133 	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
2134 	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
2135 	uint16      len;      /* length of mapped packet's buffer */
2136 	void        *pkt;     /* opaque native pointer to a packet */
2137 	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
2138 	void        *dmah;    /* handle to OS specific DMA map */
2139 	void		*secdma;
2140 #ifdef DHD_PKTTS
2141 	void		*mpkt;    /* VA of Metadata */
2142 	dmaaddr_t	mpkt_pa;  /* PA of Metadata */
2143 	uint16		mpkt_len; /* Length of Metadata */
2144 #endif /* DHD_PKTTS */
2145 } dhd_pktid_item_t;
2146 
2147 typedef uint32 dhd_pktid_key_t;
2148 
2149 typedef struct dhd_pktid_map {
2150 	uint32      items;    /* total items in map */
2151 	uint32      avail;    /* total available items */
2152 	int         failures; /* lockers unavailable count */
2153 	/* Spinlock to protect dhd_pktid_map in process/tasklet context */
2154 	void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
2155 
2156 #if defined(DHD_PKTID_AUDIT_ENABLED)
2157 	void		*pktid_audit_lock;
2158 	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
2159 #endif /* DHD_PKTID_AUDIT_ENABLED */
2160 	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
2161 	dhd_pktid_item_t lockers[0];           /* metadata storage */
2162 } dhd_pktid_map_t;
2163 
2164 /*
2165  * PktId (Locker) #0 is never allocated and is considered invalid.
2166  *
2167  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
2168  * depleted pktid pool and must not be used by the caller.
2169  *
2170  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
2171  */
2172 
2173 #define DHD_PKTID_FREE_LOCKER           (FALSE)
2174 #define DHD_PKTID_RSV_LOCKER            (TRUE)
2175 
2176 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
2177 #define DHD_PKIDMAP_ITEMS(items)        (items)
2178 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
2179 	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
2180 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
2181 
2182 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
2183 
2184 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
2185 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
2186 	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
2187 /* Reuse a previously reserved locker to save packet params */
2188 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
2189 	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
2190 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2191 		(dhd_pkttype_t)(pkttype))
2192 /* Convert a packet to a pktid, and save packet params in locker */
2193 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
2194 	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
2195 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2196 		(dhd_pkttype_t)(pkttype))
2197 
2198 /* Convert pktid to a packet, and free the locker */
2199 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2200 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2201 		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2202 		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
2203 
2204 /* Convert the pktid to a packet, empty locker, but keep it reserved */
2205 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2206 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2207 	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2208 	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
2209 
2210 #ifdef DHD_PKTTS
2211 #define DHD_PKTID_SAVE_METADATA(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) \
2212 	dhd_pktid_map_save_metadata(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey)
2213 
2214 #define DHD_PKTID_RETREIVE_METADATA(dhd, map, mpkt_pa, mpkt_len, dmah, nkey) \
2215 	dhd_pktid_map_retreive_metadata(dhd, map, (dmaaddr_t *)&mpkt_pa, (uint32 *)&mpkt_len, \
2216 		(void **) &dmah, nkey)
2217 #endif /* DHD_PKTTS */
2218 
2219 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
2220 
2221 #if defined(DHD_PKTID_AUDIT_ENABLED)
2222 
2223 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)2224 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
2225 {
2226 	dhd_prot_t *prot = dhd->prot;
2227 	int pktid_map_type;
2228 
2229 	if (pktid_map == prot->pktid_ctrl_map) {
2230 		pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
2231 	} else if (pktid_map == prot->pktid_tx_map) {
2232 		pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
2233 	} else if (pktid_map == prot->pktid_rx_map) {
2234 		pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
2235 	} else {
2236 		pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
2237 	}
2238 
2239 	return pktid_map_type;
2240 }
2241 
2242 /**
2243 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
2244 */
2245 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2246 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2247 	const int test_for, const char *errmsg)
2248 {
2249 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
2250 	struct bcm_mwbmap *handle;
2251 	uint32	flags;
2252 	bool ignore_audit;
2253 	int error = BCME_OK;
2254 
2255 	if (pktid_map == (dhd_pktid_map_t *)NULL) {
2256 		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
2257 		return BCME_OK;
2258 	}
2259 
2260 	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
2261 
2262 	handle = pktid_map->pktid_audit;
2263 	if (handle == (struct bcm_mwbmap *)NULL) {
2264 		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
2265 		goto out;
2266 	}
2267 
2268 	/* Exclude special pktids from audit */
2269 	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
2270 	if (ignore_audit) {
2271 		goto out;
2272 	}
2273 
2274 	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
2275 		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
2276 		error = BCME_ERROR;
2277 		goto out;
2278 	}
2279 
2280 	/* Perform audit */
2281 	switch (test_for) {
2282 		case DHD_DUPLICATE_ALLOC:
2283 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2284 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
2285 				           errmsg, pktid));
2286 				error = BCME_ERROR;
2287 			} else {
2288 				bcm_mwbmap_force(handle, pktid);
2289 			}
2290 			break;
2291 
2292 		case DHD_DUPLICATE_FREE:
2293 			if (bcm_mwbmap_isfree(handle, pktid)) {
2294 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
2295 				           errmsg, pktid));
2296 				error = BCME_ERROR;
2297 			} else {
2298 				bcm_mwbmap_free(handle, pktid);
2299 			}
2300 			break;
2301 
2302 		case DHD_TEST_IS_ALLOC:
2303 			if (bcm_mwbmap_isfree(handle, pktid)) {
2304 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
2305 				           errmsg, pktid));
2306 				error = BCME_ERROR;
2307 			}
2308 			break;
2309 
2310 		case DHD_TEST_IS_FREE:
2311 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2312 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
2313 				           errmsg, pktid));
2314 				error = BCME_ERROR;
2315 			}
2316 			break;
2317 
2318 		default:
2319 			DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
2320 			error = BCME_ERROR;
2321 			break;
2322 	}
2323 
2324 out:
2325 	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
2326 
2327 	if (error != BCME_OK) {
2328 		dhd->pktid_audit_failed = TRUE;
2329 	}
2330 
2331 	return error;
2332 }
2333 
2334 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2335 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2336 	const int test_for, const char *errmsg)
2337 {
2338 	int ret = BCME_OK;
2339 	ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2340 	if (ret == BCME_ERROR) {
2341 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2342 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2343 		dhd_pktid_error_handler(dhd);
2344 #ifdef DHD_MAP_PKTID_LOGGING
2345 		DHD_PKTID_LOG_DUMP(dhd);
2346 #endif /* DHD_MAP_PKTID_LOGGING */
2347 	}
2348 
2349 	return ret;
2350 }
2351 
2352 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
2353 	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
2354 
2355 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2356 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2357 	const int test_for, void *msg, uint32 msg_len, const char *func)
2358 {
2359 	int ret = BCME_OK;
2360 
2361 	if (dhd_query_bus_erros(dhdp)) {
2362 		return BCME_ERROR;
2363 	}
2364 
2365 	ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2366 	if (ret == BCME_ERROR) {
2367 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2368 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2369 		prhex(func, (uchar *)msg, msg_len);
2370 		dhd_pktid_error_handler(dhdp);
2371 #ifdef DHD_MAP_PKTID_LOGGING
2372 		DHD_PKTID_LOG_DUMP(dhdp);
2373 #endif /* DHD_MAP_PKTID_LOGGING */
2374 	}
2375 	return ret;
2376 }
2377 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2378 	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2379 		(pktid), (test_for), msg, msg_len, __FUNCTION__)
2380 
2381 #endif /* DHD_PKTID_AUDIT_ENABLED */
2382 
2383 /**
2384  * +---------------------------------------------------------------------------+
2385  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2386  *
2387  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2388  *
2389  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2390  * packet id is returned. This unique packet id may be used to retrieve the
2391  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2392  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2393  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2394  *
2395  * Implementation Note:
2396  * Convert this into a <key,locker> abstraction and place into bcmutils !
2397  * Locker abstraction should treat contents as opaque storage, and a
2398  * callback should be registered to handle busy lockers on destructor.
2399  *
2400  * +---------------------------------------------------------------------------+
2401  */
2402 
2403 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2404 
2405 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2406 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2407 {
2408 	void* osh;
2409 	uint32 nkey;
2410 	dhd_pktid_map_t *map;
2411 	uint32 dhd_pktid_map_sz;
2412 	uint32 map_items;
2413 	uint32 map_keys_sz;
2414 	osh = dhd->osh;
2415 
2416 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2417 
2418 	map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2419 	if (map == NULL) {
2420 		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2421 			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
2422 		return (dhd_pktid_map_handle_t *)NULL;
2423 	}
2424 
2425 	map->items = num_items;
2426 	map->avail = num_items;
2427 
2428 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2429 
2430 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2431 
2432 	/* Initialize the lock that protects this structure */
2433 	map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2434 	if (map->pktid_lock == NULL) {
2435 		DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2436 		goto error;
2437 	}
2438 
2439 	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2440 	if (map->keys == NULL) {
2441 		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2442 			__FUNCTION__, __LINE__, map_keys_sz));
2443 		goto error;
2444 	}
2445 
2446 #if defined(DHD_PKTID_AUDIT_ENABLED)
2447 		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2448 		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2449 		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2450 			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2451 			goto error;
2452 		} else {
2453 			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2454 				__FUNCTION__, __LINE__, map_items + 1));
2455 		}
2456 		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2457 #endif /* DHD_PKTID_AUDIT_ENABLED */
2458 
2459 	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2460 		map->keys[nkey] = nkey; /* populate with unique keys */
2461 		map->lockers[nkey].state = LOCKER_IS_FREE;
2462 		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
2463 		map->lockers[nkey].len   = 0;
2464 	}
2465 
2466 	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2467 	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2468 	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
2469 	map->lockers[DHD_PKTID_INVALID].len   = 0;
2470 
2471 #if defined(DHD_PKTID_AUDIT_ENABLED)
2472 	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2473 	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2474 #endif /* DHD_PKTID_AUDIT_ENABLED */
2475 
2476 	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2477 
2478 error:
2479 	if (map) {
2480 #if defined(DHD_PKTID_AUDIT_ENABLED)
2481 		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2482 			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2483 			map->pktid_audit = (struct bcm_mwbmap *)NULL;
2484 			if (map->pktid_audit_lock)
2485 				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2486 		}
2487 #endif /* DHD_PKTID_AUDIT_ENABLED */
2488 
2489 		if (map->keys) {
2490 			MFREE(osh, map->keys, map_keys_sz);
2491 		}
2492 
2493 		if (map->pktid_lock) {
2494 			DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2495 		}
2496 
2497 		VMFREE(osh, map, dhd_pktid_map_sz);
2498 	}
2499 	return (dhd_pktid_map_handle_t *)NULL;
2500 }
2501 
2502 /**
2503  * Retrieve all allocated keys and free all <numbered_key, locker>.
2504  * Freeing implies: unmapping the buffers and freeing the native packet
2505  * This could have been a callback registered with the pktid mapper.
2506  */
2507 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2508 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2509 {
2510 	void *osh;
2511 	uint32 nkey;
2512 	dhd_pktid_map_t *map;
2513 	dhd_pktid_item_t *locker;
2514 	uint32 map_items;
2515 	unsigned long flags;
2516 	bool data_tx = FALSE;
2517 
2518 	map = (dhd_pktid_map_t *)handle;
2519 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2520 	osh = dhd->osh;
2521 
2522 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2523 	/* skip reserved KEY #0, and start from 1 */
2524 
2525 	for (nkey = 1; nkey <= map_items; nkey++) {
2526 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2527 			locker = &map->lockers[nkey];
2528 			locker->state = LOCKER_IS_FREE;
2529 			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2530 			if (data_tx) {
2531 				OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2532 			}
2533 
2534 #ifdef DHD_PKTID_AUDIT_RING
2535 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2536 #endif /* DHD_PKTID_AUDIT_RING */
2537 #ifdef DHD_MAP_PKTID_LOGGING
2538 			DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2539 				locker->pa, nkey, locker->len,
2540 				locker->pkttype);
2541 #endif /* DHD_MAP_PKTID_LOGGING */
2542 
2543 			DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah);
2544 			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2545 				locker->pkttype, data_tx);
2546 		}
2547 		else {
2548 #ifdef DHD_PKTID_AUDIT_RING
2549 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2550 #endif /* DHD_PKTID_AUDIT_RING */
2551 		}
2552 		map->keys[nkey] = nkey; /* populate with unique keys */
2553 	}
2554 
2555 	map->avail = map_items;
2556 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2557 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2558 }
2559 
2560 #ifdef IOCTLRESP_USE_CONSTMEM
2561 /** Called in detach scenario. Releasing IOCTL buffers. */
2562 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2563 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2564 {
2565 	uint32 nkey;
2566 	dhd_pktid_map_t *map;
2567 	dhd_pktid_item_t *locker;
2568 	uint32 map_items;
2569 	unsigned long flags;
2570 
2571 	map = (dhd_pktid_map_t *)handle;
2572 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2573 
2574 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2575 	/* skip reserved KEY #0, and start from 1 */
2576 	for (nkey = 1; nkey <= map_items; nkey++) {
2577 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2578 			dhd_dma_buf_t retbuf;
2579 
2580 #ifdef DHD_PKTID_AUDIT_RING
2581 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2582 #endif /* DHD_PKTID_AUDIT_RING */
2583 
2584 			locker = &map->lockers[nkey];
2585 			retbuf.va = locker->pkt;
2586 			retbuf.len = locker->len;
2587 			retbuf.pa = locker->pa;
2588 			retbuf.dmah = locker->dmah;
2589 			retbuf.secdma = locker->secdma;
2590 
2591 			free_ioctl_return_buffer(dhd, &retbuf);
2592 		}
2593 		else {
2594 #ifdef DHD_PKTID_AUDIT_RING
2595 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2596 #endif /* DHD_PKTID_AUDIT_RING */
2597 		}
2598 		map->keys[nkey] = nkey; /* populate with unique keys */
2599 	}
2600 
2601 	map->avail = map_items;
2602 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2603 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2604 }
2605 #endif /* IOCTLRESP_USE_CONSTMEM */
2606 
2607 /**
2608  * Free the pktid map.
2609  */
2610 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2611 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2612 {
2613 	dhd_pktid_map_t *map;
2614 	uint32 dhd_pktid_map_sz;
2615 	uint32 map_keys_sz;
2616 
2617 	if (handle == NULL)
2618 		return;
2619 
2620 	/* Free any pending packets */
2621 	dhd_pktid_map_reset(dhd, handle);
2622 
2623 	map = (dhd_pktid_map_t *)handle;
2624 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2625 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2626 
2627 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2628 
2629 #if defined(DHD_PKTID_AUDIT_ENABLED)
2630 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2631 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2632 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2633 		if (map->pktid_audit_lock) {
2634 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2635 		}
2636 	}
2637 #endif /* DHD_PKTID_AUDIT_ENABLED */
2638 	MFREE(dhd->osh, map->keys, map_keys_sz);
2639 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2640 }
2641 
2642 #ifdef IOCTLRESP_USE_CONSTMEM
2643 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2644 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2645 {
2646 	dhd_pktid_map_t *map;
2647 	uint32 dhd_pktid_map_sz;
2648 	uint32 map_keys_sz;
2649 
2650 	if (handle == NULL)
2651 		return;
2652 
2653 	/* Free any pending packets */
2654 	dhd_pktid_map_reset_ioctl(dhd, handle);
2655 
2656 	map = (dhd_pktid_map_t *)handle;
2657 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2658 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2659 
2660 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2661 
2662 #if defined(DHD_PKTID_AUDIT_ENABLED)
2663 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2664 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2665 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2666 		if (map->pktid_audit_lock) {
2667 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2668 		}
2669 	}
2670 #endif /* DHD_PKTID_AUDIT_ENABLED */
2671 
2672 	MFREE(dhd->osh, map->keys, map_keys_sz);
2673 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2674 }
2675 #endif /* IOCTLRESP_USE_CONSTMEM */
2676 
2677 /** Get the pktid free count */
2678 static INLINE uint32
BCMFASTPATH(dhd_pktid_map_avail_cnt)2679 BCMFASTPATH(dhd_pktid_map_avail_cnt)(dhd_pktid_map_handle_t *handle)
2680 {
2681 	dhd_pktid_map_t *map;
2682 	uint32	avail;
2683 	unsigned long flags;
2684 
2685 	ASSERT(handle != NULL);
2686 	map = (dhd_pktid_map_t *)handle;
2687 
2688 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2689 	avail = map->avail;
2690 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2691 
2692 	return avail;
2693 }
2694 
2695 /**
2696  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2697  * yet populated. Invoke the pktid save api to populate the packet parameters
2698  * into the locker. This function is not reentrant, and is the caller's
2699  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2700  * a failure case, implying a depleted pool of pktids.
2701  */
2702 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2703 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2704 	void *pkt, dhd_pkttype_t pkttype)
2705 {
2706 	uint32 nkey;
2707 	dhd_pktid_map_t *map;
2708 	dhd_pktid_item_t *locker;
2709 	unsigned long flags;
2710 
2711 	ASSERT(handle != NULL);
2712 	map = (dhd_pktid_map_t *)handle;
2713 
2714 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2715 
2716 	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2717 		map->failures++;
2718 		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2719 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2720 		return DHD_PKTID_INVALID; /* failed alloc request */
2721 	}
2722 
2723 	ASSERT(map->avail <= map->items);
2724 	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2725 
2726 	if ((map->avail > map->items) || (nkey > map->items)) {
2727 		map->failures++;
2728 		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2729 			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2730 			__FUNCTION__, __LINE__, map->avail, nkey,
2731 			pkttype));
2732 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2733 		return DHD_PKTID_INVALID; /* failed alloc request */
2734 	}
2735 
2736 	locker = &map->lockers[nkey]; /* save packet metadata in locker */
2737 	map->avail--;
2738 	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2739 	locker->len = 0;
2740 	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2741 
2742 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2743 
2744 	ASSERT(nkey != DHD_PKTID_INVALID);
2745 
2746 	return nkey; /* return locker's numbered key */
2747 }
2748 
2749 #ifdef DHD_PKTTS
2750 /*
2751  * dhd_pktid_map_save_metadata - Save metadata information in a locker
2752  * that has a reserved unique numbered key.
2753  */
2754 static INLINE void
dhd_pktid_map_save_metadata(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * mpkt,dmaaddr_t mpkt_pa,uint16 mpkt_len,void * dmah,uint32 nkey)2755 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2756 	dmaaddr_t mpkt_pa,
2757 	uint16	mpkt_len,
2758 	void *dmah,
2759 	uint32 nkey)
2760 {
2761 	dhd_pktid_map_t *map;
2762 	dhd_pktid_item_t *locker;
2763 	unsigned long flags;
2764 
2765 	ASSERT(handle != NULL);
2766 	map = (dhd_pktid_map_t *)handle;
2767 
2768 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2769 
2770 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2771 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u>",
2772 			__FUNCTION__, __LINE__, nkey));
2773 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2774 #ifdef DHD_FW_COREDUMP
2775 		if (dhd->memdump_enabled) {
2776 			/* collect core dump */
2777 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2778 			dhd_bus_mem_dump(dhd);
2779 		}
2780 #else
2781 		ASSERT(0);
2782 #endif /* DHD_FW_COREDUMP */
2783 		return;
2784 	}
2785 
2786 	locker = &map->lockers[nkey];
2787 
2788 	/*
2789 	 * TODO: checking the locker state for BUSY will prevent
2790 	 * us from storing meta data on an already allocated
2791 	 * Locker. But not checking may lead to overwriting
2792 	 * existing data.
2793 	 */
2794 	locker->mpkt = mpkt;
2795 	locker->mpkt_pa = mpkt_pa;
2796 	locker->mpkt_len = mpkt_len;
2797 	locker->dmah = dmah;
2798 
2799 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2800 }
2801 #endif /* DHD_PKTTS */
2802 
2803 /*
2804  * dhd_pktid_map_save - Save a packet's parameters into a locker
2805  * corresponding to a previously reserved unique numbered key.
2806  */
2807 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2808 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2809 	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2810 	dhd_pkttype_t pkttype)
2811 {
2812 	dhd_pktid_map_t *map;
2813 	dhd_pktid_item_t *locker;
2814 	unsigned long flags;
2815 
2816 	ASSERT(handle != NULL);
2817 	map = (dhd_pktid_map_t *)handle;
2818 
2819 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2820 
2821 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2822 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2823 			__FUNCTION__, __LINE__, nkey, pkttype));
2824 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2825 #ifdef DHD_FW_COREDUMP
2826 		if (dhd->memdump_enabled) {
2827 			/* collect core dump */
2828 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2829 			dhd_bus_mem_dump(dhd);
2830 		}
2831 #else
2832 		ASSERT(0);
2833 #endif /* DHD_FW_COREDUMP */
2834 		return;
2835 	}
2836 
2837 	locker = &map->lockers[nkey];
2838 
2839 	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2840 		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2841 
2842 	/* store contents in locker */
2843 	locker->dir = dir;
2844 	locker->pa = pa;
2845 	locker->len = (uint16)len; /* 16bit len */
2846 	locker->dmah = dmah; /* 16bit len */
2847 	locker->secdma = secdma;
2848 	locker->pkttype = pkttype;
2849 	locker->pkt = pkt;
2850 	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2851 #ifdef DHD_MAP_PKTID_LOGGING
2852 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2853 #endif /* DHD_MAP_PKTID_LOGGING */
2854 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2855 }
2856 
2857 /**
2858  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2859  * contents into the corresponding locker. Return the numbered key.
2860  */
2861 static uint32
BCMFASTPATH(dhd_pktid_map_alloc)2862 BCMFASTPATH(dhd_pktid_map_alloc)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2863 	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2864 	dhd_pkttype_t pkttype)
2865 {
2866 	uint32 nkey;
2867 
2868 	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2869 	if (nkey != DHD_PKTID_INVALID) {
2870 		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2871 			len, dir, dmah, secdma, pkttype);
2872 	}
2873 
2874 	return nkey;
2875 }
2876 
2877 #ifdef DHD_PKTTS
2878 static void *
BCMFASTPATH(dhd_pktid_map_retreive_metadata)2879 BCMFASTPATH(dhd_pktid_map_retreive_metadata)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2880 	dmaaddr_t *pmpkt_pa,
2881 	uint32	*pmpkt_len,
2882 	void **pdmah,
2883 	uint32 nkey)
2884 {
2885 	dhd_pktid_map_t *map;
2886 	dhd_pktid_item_t *locker;
2887 	void *mpkt;
2888 	unsigned long flags;
2889 
2890 	ASSERT(handle != NULL);
2891 
2892 	map = (dhd_pktid_map_t *)handle;
2893 
2894 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2895 
2896 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2897 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2898 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>\n",
2899 		           __FUNCTION__, __LINE__, nkey));
2900 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2901 #ifdef DHD_FW_COREDUMP
2902 		if (dhd->memdump_enabled) {
2903 			/* collect core dump */
2904 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2905 			dhd_bus_mem_dump(dhd);
2906 		}
2907 #else
2908 		ASSERT(0);
2909 #endif /* DHD_FW_COREDUMP */
2910 		return NULL;
2911 	}
2912 
2913 	locker = &map->lockers[nkey];
2914 	mpkt = locker->mpkt;
2915 	*pmpkt_pa = locker->mpkt_pa;
2916 	*pmpkt_len = locker->mpkt_len;
2917 	if (pdmah)
2918 		*pdmah = locker->dmah;
2919 	locker->mpkt = NULL;
2920 	locker->mpkt_len = 0;
2921 	locker->dmah = NULL;
2922 
2923 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2924 	return mpkt;
2925 }
2926 #endif /* DHD_PKTTS */
2927 
2928 /**
2929  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2930  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2931  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2932  * value. Only a previously allocated pktid may be freed.
2933  */
2934 static void *
BCMFASTPATH(dhd_pktid_map_free)2935 BCMFASTPATH(dhd_pktid_map_free)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2936 	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2937 	bool rsv_locker)
2938 {
2939 	dhd_pktid_map_t *map;
2940 	dhd_pktid_item_t *locker;
2941 	void * pkt;
2942 	unsigned long long locker_addr;
2943 	unsigned long flags;
2944 
2945 	ASSERT(handle != NULL);
2946 
2947 	map = (dhd_pktid_map_t *)handle;
2948 
2949 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2950 
2951 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2952 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2953 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2954 		           __FUNCTION__, __LINE__, nkey, pkttype));
2955 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2956 #ifdef DHD_FW_COREDUMP
2957 		if (dhd->memdump_enabled) {
2958 			/* collect core dump */
2959 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2960 			dhd_bus_mem_dump(dhd);
2961 		}
2962 #else
2963 		ASSERT(0);
2964 #endif /* DHD_FW_COREDUMP */
2965 		return NULL;
2966 	}
2967 
2968 	locker = &map->lockers[nkey];
2969 
2970 #if defined(DHD_PKTID_AUDIT_MAP)
2971 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2972 #endif /* DHD_PKTID_AUDIT_MAP */
2973 
2974 	/* Debug check for cloned numbered key */
2975 	if (locker->state == LOCKER_IS_FREE) {
2976 		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2977 		           __FUNCTION__, __LINE__, nkey));
2978 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2979 		/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2980 #ifdef DHD_FW_COREDUMP
2981 		if (dhd->memdump_enabled) {
2982 			/* collect core dump */
2983 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2984 			dhd_bus_mem_dump(dhd);
2985 		}
2986 #else
2987 		ASSERT(0);
2988 #endif /* DHD_FW_COREDUMP */
2989 		return NULL;
2990 	}
2991 
2992 	/* Check for the colour of the buffer i.e The buffer posted for TX,
2993 	 * should be freed for TX completion. Similarly the buffer posted for
2994 	 * IOCTL should be freed for IOCT completion etc.
2995 	 */
2996 	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2997 
2998 		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2999 			__FUNCTION__, __LINE__, nkey));
3000 #ifdef BCMDMA64OSL
3001 		PHYSADDRTOULONG(locker->pa, locker_addr);
3002 #else
3003 		locker_addr = PHYSADDRLO(locker->pa);
3004 #endif /* BCMDMA64OSL */
3005 		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
3006 			"pkttype <%d> locker->pa <0x%llx> \n",
3007 			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
3008 			pkttype, locker_addr));
3009 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3010 #ifdef DHD_FW_COREDUMP
3011 		if (dhd->memdump_enabled) {
3012 			/* collect core dump */
3013 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
3014 			dhd_bus_mem_dump(dhd);
3015 		}
3016 #else
3017 		ASSERT(0);
3018 #endif /* DHD_FW_COREDUMP */
3019 		return NULL;
3020 	}
3021 
3022 	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
3023 		map->avail++;
3024 		map->keys[map->avail] = nkey; /* make this numbered key available */
3025 		locker->state = LOCKER_IS_FREE; /* open and free Locker */
3026 	} else {
3027 		/* pktid will be reused, but the locker does not have a valid pkt */
3028 		locker->state = LOCKER_IS_RSVD;
3029 	}
3030 
3031 #if defined(DHD_PKTID_AUDIT_MAP)
3032 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
3033 #endif /* DHD_PKTID_AUDIT_MAP */
3034 #ifdef DHD_MAP_PKTID_LOGGING
3035 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
3036 		(uint32)locker->len, pkttype);
3037 #endif /* DHD_MAP_PKTID_LOGGING */
3038 
3039 	*pa = locker->pa; /* return contents of locker */
3040 	*len = (uint32)locker->len;
3041 	*dmah = locker->dmah;
3042 	*secdma = locker->secdma;
3043 
3044 	pkt = locker->pkt;
3045 	locker->pkt = NULL; /* Clear pkt */
3046 	locker->len = 0;
3047 
3048 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3049 
3050 	return pkt;
3051 }
3052 
3053 #else /* ! DHD_PCIE_PKTID */
3054 
3055 #ifndef linux
3056 #error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms"
3057 #endif
3058 
3059 typedef struct pktlist {
3060 	PKT_LIST *tx_pkt_list;		/* list for tx packets */
3061 	PKT_LIST *rx_pkt_list;		/* list for rx packets */
3062 	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
3063 } pktlists_t;
3064 
3065 /*
3066  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
3067  * of a one to one mapping 32bit pktptr and a 32bit pktid.
3068  *
3069  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
3070  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
3071  *   a lock.
3072  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
3073  */
3074 #define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
3075 #define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
3076 
3077 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3078 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3079 	dhd_pkttype_t pkttype);
3080 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3081 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3082 	dhd_pkttype_t pkttype);
3083 
3084 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)3085 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
3086 {
3087 	osl_t *osh = dhd->osh;
3088 	pktlists_t *handle = NULL;
3089 
3090 	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
3091 		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
3092 		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
3093 		goto error_done;
3094 	}
3095 
3096 	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3097 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3098 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3099 		goto error;
3100 	}
3101 
3102 	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3103 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3104 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3105 		goto error;
3106 	}
3107 
3108 	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3109 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3110 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3111 		goto error;
3112 	}
3113 
3114 	PKTLIST_INIT(handle->tx_pkt_list);
3115 	PKTLIST_INIT(handle->rx_pkt_list);
3116 	PKTLIST_INIT(handle->ctrl_pkt_list);
3117 
3118 	return (dhd_pktid_map_handle_t *) handle;
3119 
3120 error:
3121 	if (handle->ctrl_pkt_list) {
3122 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3123 	}
3124 
3125 	if (handle->rx_pkt_list) {
3126 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3127 	}
3128 
3129 	if (handle->tx_pkt_list) {
3130 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3131 	}
3132 
3133 	if (handle) {
3134 		MFREE(osh, handle, sizeof(pktlists_t));
3135 	}
3136 
3137 error_done:
3138 	return (dhd_pktid_map_handle_t *)NULL;
3139 }
3140 
3141 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)3142 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
3143 {
3144 	osl_t *osh = dhd->osh;
3145 
3146 	if (handle->ctrl_pkt_list) {
3147 		PKTLIST_FINI(handle->ctrl_pkt_list);
3148 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3149 	}
3150 
3151 	if (handle->rx_pkt_list) {
3152 		PKTLIST_FINI(handle->rx_pkt_list);
3153 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3154 	}
3155 
3156 	if (handle->tx_pkt_list) {
3157 		PKTLIST_FINI(handle->tx_pkt_list);
3158 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3159 	}
3160 }
3161 
3162 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)3163 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
3164 {
3165 	osl_t *osh = dhd->osh;
3166 	pktlists_t *handle = (pktlists_t *) map;
3167 
3168 	ASSERT(handle != NULL);
3169 	if (handle == (pktlists_t *)NULL) {
3170 		return;
3171 	}
3172 
3173 	dhd_pktid_map_reset(dhd, handle);
3174 
3175 	if (handle) {
3176 		MFREE(osh, handle, sizeof(pktlists_t));
3177 	}
3178 }
3179 
3180 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
3181 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)3182 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3183 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3184 	dhd_pkttype_t pkttype)
3185 {
3186 	pktlists_t *handle = (pktlists_t *) map;
3187 	ASSERT(pktptr32 != NULL);
3188 	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
3189 	DHD_PKT_SET_DMAH(pktptr32, dmah);
3190 	DHD_PKT_SET_PA(pktptr32, pa);
3191 	DHD_PKT_SET_SECDMA(pktptr32, secdma);
3192 
3193 	/* XXX optimize these branch conditionals */
3194 	if (pkttype == PKTTYPE_DATA_TX) {
3195 		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
3196 	} else if (pkttype == PKTTYPE_DATA_RX) {
3197 		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
3198 	} else {
3199 		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
3200 	}
3201 
3202 	return DHD_PKTID32(pktptr32);
3203 }
3204 
3205 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
3206 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)3207 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3208 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3209 	dhd_pkttype_t pkttype)
3210 {
3211 	pktlists_t *handle = (pktlists_t *) map;
3212 	void *pktptr32;
3213 
3214 	ASSERT(pktid32 != 0U);
3215 	pktptr32 = DHD_PKTPTR32(pktid32);
3216 	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
3217 	*dmah = DHD_PKT_GET_DMAH(pktptr32);
3218 	*pa = DHD_PKT_GET_PA(pktptr32);
3219 	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
3220 
3221 	/* XXX optimize these branch conditionals */
3222 	if (pkttype == PKTTYPE_DATA_TX) {
3223 		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
3224 	} else if (pkttype == PKTTYPE_DATA_RX) {
3225 		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
3226 	} else {
3227 		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
3228 	}
3229 
3230 	return pktptr32;
3231 }
3232 
3233 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
3234 
3235 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
3236 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
3237 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3238 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3239 	})
3240 
3241 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
3242 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
3243 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3244 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3245 	})
3246 
3247 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
3248 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
3249 		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
3250 				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
3251 				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
3252 	})
3253 
3254 #define DHD_PKTID_AVAIL(map)  (~0)
3255 
3256 #endif /* ! DHD_PCIE_PKTID */
3257 
3258 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
3259 
3260 /*
3261  * Allocating buffers for common rings.
3262  * also allocating Buffers for hmaptest, Scratch buffer for dma rx offset,
3263  * bus_throughput_measurement and snapshot upload
3264  */
3265 static int
dhd_prot_allocate_bufs(dhd_pub_t * dhd,dhd_prot_t * prot)3266 dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot)
3267 {
3268 
3269 	/* Common Ring Allocations */
3270 
3271 	/* Ring  0: H2D Control Submission */
3272 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
3273 	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
3274 	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
3275 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
3276 			__FUNCTION__));
3277 		goto fail;
3278 	}
3279 
3280 	/* Ring  1: H2D Receive Buffer Post */
3281 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
3282 	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
3283 	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
3284 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
3285 			__FUNCTION__));
3286 		goto fail;
3287 	}
3288 
3289 	/* Ring  2: D2H Control Completion */
3290 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
3291 	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
3292 	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
3293 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
3294 			__FUNCTION__));
3295 		goto fail;
3296 	}
3297 
3298 	/* Ring  3: D2H Transmit Complete */
3299 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
3300 	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
3301 	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
3302 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
3303 			__FUNCTION__));
3304 		goto fail;
3305 
3306 	}
3307 
3308 	/* Ring  4: D2H Receive Complete */
3309 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
3310 	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
3311 	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
3312 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
3313 			__FUNCTION__));
3314 		goto fail;
3315 
3316 	}
3317 
3318 	/*
3319 	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
3320 	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
3321 	 * See dhd_prot_flowrings_pool_attach()
3322 	 */
3323 	/* ioctl response buffer */
3324 	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
3325 		goto fail;
3326 	}
3327 
3328 	/* IOCTL request buffer */
3329 	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
3330 		goto fail;
3331 	}
3332 
3333 	/* Host TS request buffer one buffer for now */
3334 	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
3335 		goto fail;
3336 	}
3337 	prot->hostts_req_buf_inuse = FALSE;
3338 
3339 	/* Scratch buffer for dma rx offset */
3340 #ifdef BCM_HOST_BUF
3341 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
3342 		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
3343 #else
3344 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
3345 
3346 #endif /* BCM_HOST_BUF */
3347 	{
3348 		goto fail;
3349 	}
3350 
3351 #ifdef DHD_HMAPTEST
3352 	/* Allocate buffer for hmaptest  */
3353 	DHD_ERROR(("allocating memory for hmaptest \n"));
3354 	if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) {
3355 
3356 		goto fail;
3357 	} else {
3358 		uint32 scratch_len;
3359 		uint64 scratch_lin, w1_start;
3360 		dmaaddr_t scratch_pa;
3361 
3362 		scratch_pa = prot->hmaptest.mem.pa;
3363 		scratch_len = prot->hmaptest.mem.len;
3364 		scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
3365 			| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
3366 		w1_start  = scratch_lin +  scratch_len;
3367 		DHD_ERROR(("hmap: NOTE Buffer alloc for HMAPTEST Start=0x%0llx len=0x%08x"
3368 			"End=0x%0llx\n", (uint64) scratch_lin, scratch_len, (uint64) w1_start));
3369 	}
3370 #endif /* DHD_HMAPTEST */
3371 
3372 	/* scratch buffer bus throughput measurement */
3373 	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
3374 		goto fail;
3375 	}
3376 
3377 #ifdef SNAPSHOT_UPLOAD
3378 	/* snapshot upload buffer */
3379 	if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) {
3380 		goto fail;
3381 	}
3382 #endif	/* SNAPSHOT_UPLOAD */
3383 
3384 	return BCME_OK;
3385 
3386 fail:
3387 	return BCME_NOMEM;
3388 }
3389 
3390 /**
3391  * The PCIE FD protocol layer is constructed in two phases:
3392  *    Phase 1. dhd_prot_attach()
3393  *    Phase 2. dhd_prot_init()
3394  *
3395  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
3396  * All Common rings are also attached (msgbuf_ring_t objects are allocated
3397  * with DMA-able buffers).
3398  * All dhd_dma_buf_t objects are also allocated here.
3399  *
3400  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
3401  * initialization of objects that requires information advertized by the dongle
3402  * may not be performed here.
3403  * E.g. the number of TxPost flowrings is not know at this point, neither do
3404  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
3405  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
3406  * rings (common + flow).
3407  *
3408  * dhd_prot_init() is invoked after the bus layer has fetched the information
3409  * advertized by the dongle in the pcie_shared_t.
3410  */
3411 int
dhd_prot_attach(dhd_pub_t * dhd)3412 dhd_prot_attach(dhd_pub_t *dhd)
3413 {
3414 	osl_t *osh = dhd->osh;
3415 	dhd_prot_t *prot;
3416 	uint32 trap_buf_len;
3417 
3418 	/* Allocate prot structure */
3419 	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
3420 		sizeof(dhd_prot_t)))) {
3421 		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
3422 		goto fail;
3423 	}
3424 	memset(prot, 0, sizeof(*prot));
3425 
3426 	prot->osh = osh;
3427 	dhd->prot = prot;
3428 
3429 	/* DMAing ring completes supported? FALSE by default  */
3430 	dhd->dma_d2h_ring_upd_support = FALSE;
3431 	dhd->dma_h2d_ring_upd_support = FALSE;
3432 	dhd->dma_ring_upd_overwrite = FALSE;
3433 
3434 	dhd->idma_inited = 0;
3435 	dhd->ifrm_inited = 0;
3436 	dhd->dar_inited = 0;
3437 
3438 	if (dhd_prot_allocate_bufs(dhd, prot) != BCME_OK) {
3439 		goto fail;
3440 	}
3441 
3442 #ifdef DHD_RX_CHAINING
3443 	dhd_rxchain_reset(&prot->rxchain);
3444 #endif
3445 
3446 	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL);
3447 	if (prot->pktid_ctrl_map == NULL) {
3448 		goto fail;
3449 	}
3450 
3451 	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX);
3452 	if (prot->pktid_rx_map == NULL)
3453 		goto fail;
3454 
3455 	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX);
3456 	if (prot->pktid_rx_map == NULL)
3457 		goto fail;
3458 
3459 #ifdef IOCTLRESP_USE_CONSTMEM
3460 	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
3461 		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
3462 	if (prot->pktid_map_handle_ioctl == NULL) {
3463 		goto fail;
3464 	}
3465 #endif /* IOCTLRESP_USE_CONSTMEM */
3466 
3467 #ifdef DHD_MAP_PKTID_LOGGING
3468 	prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3469 	if (prot->pktid_dma_map == NULL) {
3470 		DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
3471 			__FUNCTION__));
3472 	}
3473 
3474 	prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3475 	if (prot->pktid_dma_unmap == NULL) {
3476 		DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
3477 			__FUNCTION__));
3478 	}
3479 #endif /* DHD_MAP_PKTID_LOGGING */
3480 
3481 #ifdef D2H_MINIDUMP
3482 	if (dhd->bus->sih->buscorerev < 71) {
3483 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
3484 	} else {
3485 		/* buscorerev >= 71, supports minimdump of len 96KB */
3486 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN;
3487 	}
3488 #else
3489 	/* FW going to DMA extended trap data,
3490 	 * allocate buffer for the maximum extended trap data.
3491 	 */
3492 	trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
3493 #endif /* D2H_MINIDUMP */
3494 
3495 	/* Initialize trap buffer */
3496 	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3497 		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3498 		goto fail;
3499 	}
3500 
3501 	return BCME_OK;
3502 
3503 fail:
3504 
3505 	if (prot) {
3506 		/* Free up all allocated memories */
3507 		dhd_prot_detach(dhd);
3508 	}
3509 
3510 	return BCME_NOMEM;
3511 } /* dhd_prot_attach */
3512 
3513 static int
dhd_alloc_host_scbs(dhd_pub_t * dhd)3514 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3515 {
3516 	int ret = BCME_OK;
3517 	sh_addr_t base_addr;
3518 	dhd_prot_t *prot = dhd->prot;
3519 	uint32 host_scb_size = 0;
3520 
3521 	if (dhd->hscb_enable) {
3522 		/* read number of bytes to allocate from F/W */
3523 		dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3524 		if (host_scb_size) {
3525 			/* In fw reload scenario the buffer could have been allocated for previous
3526 			 * run. Check the existing buffer if there is one that can accommodate
3527 			 * the new firmware requirement and reuse the buffer is possible.
3528 			 */
3529 			if (prot->host_scb_buf.va) {
3530 				if (prot->host_scb_buf.len >= host_scb_size) {
3531 					prot->host_scb_buf.len = host_scb_size;
3532 				} else {
3533 					dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3534 				}
3535 			}
3536 			/* alloc array of host scbs */
3537 			if (prot->host_scb_buf.va == NULL) {
3538 				ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3539 			}
3540 			/* write host scb address to F/W */
3541 			if (ret == BCME_OK) {
3542 				dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3543 				dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3544 					HOST_SCB_ADDR, 0);
3545 			}
3546 		}
3547 	} else {
3548 		DHD_TRACE(("%s: Host scb not supported in F/W. \n", __FUNCTION__));
3549 	}
3550 
3551 	if (ret != BCME_OK) {
3552 		DHD_ERROR(("%s dhd_alloc_host_scbs, alloc failed: Err Code %d\n",
3553 			__FUNCTION__, ret));
3554 	}
3555 	return ret;
3556 }
3557 
3558 void
dhd_set_host_cap(dhd_pub_t * dhd)3559 dhd_set_host_cap(dhd_pub_t *dhd)
3560 {
3561 	uint32 data = 0;
3562 	dhd_prot_t *prot = dhd->prot;
3563 #ifdef D2H_MINIDUMP
3564 	uint16 host_trap_addr_len;
3565 #endif /* D2H_MINIDUMP */
3566 
3567 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3568 		if (dhd->h2d_phase_supported) {
3569 			data |= HOSTCAP_H2D_VALID_PHASE;
3570 			if (dhd->force_dongletrap_on_bad_h2d_phase)
3571 				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3572 		}
3573 		if (prot->host_ipc_version > prot->device_ipc_version)
3574 			prot->active_ipc_version = prot->device_ipc_version;
3575 		else
3576 			prot->active_ipc_version = prot->host_ipc_version;
3577 
3578 		data |= prot->active_ipc_version;
3579 
3580 		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3581 			DHD_INFO(("Advertise Hostready Capability\n"));
3582 			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3583 		}
3584 #ifdef PCIE_INB_DW
3585 		if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
3586 			DHD_INFO(("Advertise Inband-DW Capability\n"));
3587 			data |= HOSTCAP_DS_INBAND_DW;
3588 			data |= HOSTCAP_DS_NO_OOB_DW;
3589 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
3590 			if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) {
3591 				dhd_init_dongle_ds_lock(dhd->bus);
3592 				dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE);
3593 			}
3594 		} else
3595 #endif /* PCIE_INB_DW */
3596 #ifdef PCIE_OOB
3597 		if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
3598 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
3599 		} else
3600 #endif /* PCIE_OOB */
3601 		{
3602 			/* Disable DS altogether */
3603 			data |= HOSTCAP_DS_NO_OOB_DW;
3604 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3605 		}
3606 
3607 		/* Indicate support for extended trap data */
3608 		data |= HOSTCAP_EXTENDED_TRAP_DATA;
3609 
3610 		/* Indicate support for TX status metadata */
3611 		if (dhd->pcie_txs_metadata_enable != 0)
3612 			data |= HOSTCAP_TXSTATUS_METADATA;
3613 
3614 #ifdef BTLOG
3615 		/* Indicate support for BT logging */
3616 		if (dhd->bt_logging) {
3617 			if (dhd->bt_logging_enabled) {
3618 				data |= HOSTCAP_BT_LOGGING;
3619 				DHD_ERROR(("BT LOGGING  enabled\n"));
3620 			}
3621 			else {
3622 				DHD_ERROR(("BT logging upported in FW, BT LOGGING disabled\n"));
3623 			}
3624 		}
3625 		else {
3626 			DHD_ERROR(("BT LOGGING not enabled in FW !!\n"));
3627 		}
3628 #endif	/* BTLOG */
3629 
3630 		/* Enable fast delete ring in firmware if supported */
3631 		if (dhd->fast_delete_ring_support) {
3632 			data |= HOSTCAP_FAST_DELETE_RING;
3633 		}
3634 
3635 		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3636 			DHD_ERROR(("IDMA inited\n"));
3637 			data |= HOSTCAP_H2D_IDMA;
3638 			dhd->idma_inited = TRUE;
3639 		} else {
3640 			DHD_ERROR(("IDMA not enabled in FW !!\n"));
3641 			dhd->idma_inited = FALSE;
3642 		}
3643 
3644 		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3645 			DHD_ERROR(("IFRM Inited\n"));
3646 			data |= HOSTCAP_H2D_IFRM;
3647 			dhd->ifrm_inited = TRUE;
3648 			dhd->dma_h2d_ring_upd_support = FALSE;
3649 			dhd_prot_dma_indx_free(dhd);
3650 		} else {
3651 			DHD_ERROR(("IFRM not enabled in FW !!\n"));
3652 			dhd->ifrm_inited = FALSE;
3653 		}
3654 
3655 		if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3656 			DHD_ERROR(("DAR doorbell Use\n"));
3657 			data |= HOSTCAP_H2D_DAR;
3658 			dhd->dar_inited = TRUE;
3659 		} else {
3660 			DHD_ERROR(("DAR not enabled in FW !!\n"));
3661 			dhd->dar_inited = FALSE;
3662 		}
3663 
3664 		/* FW Checks for HOSTCAP_UR_FW_NO_TRAP and Does not TRAP if set
3665 		 * Radar 36403220 JIRA SWWLAN-182145
3666 		 */
3667 		data |= HOSTCAP_UR_FW_NO_TRAP;
3668 
3669 #ifdef SNAPSHOT_UPLOAD
3670 		/* Indicate support for snapshot upload */
3671 		if (dhd->snapshot_upload) {
3672 			data |= HOSTCAP_SNAPSHOT_UPLOAD;
3673 			DHD_ERROR(("ALLOW SNAPSHOT UPLOAD!!\n"));
3674 		}
3675 #endif	/* SNAPSHOT_UPLOAD */
3676 
3677 		if (dhd->hscb_enable) {
3678 			data |= HOSTCAP_HSCB;
3679 		}
3680 
3681 #ifdef EWP_EDL
3682 		if (dhd->dongle_edl_support) {
3683 			data |= HOSTCAP_EDL_RING;
3684 			DHD_ERROR(("Enable EDL host cap\n"));
3685 		} else {
3686 			DHD_ERROR(("DO NOT SET EDL host cap\n"));
3687 		}
3688 #endif /* EWP_EDL */
3689 
3690 #ifdef D2H_MINIDUMP
3691 		if (dhd_bus_is_minidump_enabled(dhd)) {
3692 			data |= HOSTCAP_EXT_TRAP_DBGBUF;
3693 			DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
3694 		}
3695 #endif /* D2H_MINIDUMP */
3696 #ifdef DHD_HP2P
3697 		if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) {
3698 			data |= HOSTCAP_PKT_TIMESTAMP;
3699 			data |= HOSTCAP_PKT_HP2P;
3700 			DHD_ERROR(("Enable HP2P in host cap\n"));
3701 		} else {
3702 			DHD_ERROR(("HP2P not enabled in host cap\n"));
3703 		}
3704 #endif /* DHD_HP2P */
3705 
3706 #ifdef DHD_DB0TS
3707 		if (dhd->db0ts_capable) {
3708 			data |= HOSTCAP_DB0_TIMESTAMP;
3709 			DHD_ERROR(("Enable DB0 TS in host cap\n"));
3710 		} else {
3711 			DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3712 		}
3713 #endif /* DHD_DB0TS */
3714 		if (dhd->extdtxs_in_txcpl) {
3715 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3716 			data |= HOSTCAP_PKT_TXSTATUS;
3717 		}
3718 		else {
3719 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3720 		}
3721 
3722 		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3723 			__FUNCTION__,
3724 			prot->active_ipc_version, prot->host_ipc_version,
3725 			prot->device_ipc_version));
3726 
3727 		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3728 		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3729 			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3730 #ifdef D2H_MINIDUMP
3731 		if (dhd_bus_is_minidump_enabled(dhd)) {
3732 			/* Dongle expects the host_trap_addr_len in terms of words */
3733 			host_trap_addr_len = prot->fw_trap_buf.len / 4;
3734 			dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
3735 				sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
3736 		}
3737 #endif /* D2H_MINIDUMP */
3738 	}
3739 
3740 #ifdef DHD_TIMESYNC
3741 	dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
3742 #endif /* DHD_TIMESYNC */
3743 }
3744 
3745 #ifdef AGG_H2D_DB
dhd_agg_inflight_stats_dump(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)3746 void dhd_agg_inflight_stats_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
3747 {
3748 	uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo;
3749 	uint32 i;
3750 	uint64 total_inflight_histo = 0;
3751 
3752 	bcm_bprintf(strbuf, "inflight: \t count\n");
3753 	for (i = 0; i < DHD_NUM_INFLIGHT_HISTO_ROWS; i++) {
3754 		bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<<i, inflight_histo[i]);
3755 		total_inflight_histo += inflight_histo[i];
3756 	}
3757 	bcm_bprintf(strbuf, "total_inflight_histo: %llu\n", total_inflight_histo);
3758 }
3759 
dhd_agg_inflights_stats_update(dhd_pub_t * dhd,uint32 inflight)3760 void dhd_agg_inflights_stats_update(dhd_pub_t *dhd, uint32 inflight)
3761 {
3762 	uint64 *bin = dhd->prot->agg_h2d_db_info.inflight_histo;
3763 	uint64 *p;
3764 	uint32 bin_power;
3765 	bin_power = next_larger_power2(inflight);
3766 
3767 	switch (bin_power) {
3768 		case   1: p = bin + 0; break;
3769 		case   2: p = bin + 1; break;
3770 		case   4: p = bin + 2; break;
3771 		case   8: p = bin + 3; break;
3772 		case  16: p = bin + 4; break;
3773 		case  32: p = bin + 5; break;
3774 		case  64: p = bin + 6; break;
3775 		case 128: p = bin + 7; break;
3776 		case 256: p = bin + 8; break;
3777 		case 512: p = bin + 9; break;
3778 		case 1024: p = bin + 10; break;
3779 		case 2048: p = bin + 11; break;
3780 		case 4096: p = bin + 12; break;
3781 		case 8192: p = bin + 13; break;
3782 		default : p = bin + 13; break;
3783 	}
3784 	ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS);
3785 	*p = *p + 1;
3786 	return;
3787 }
3788 
3789 /*
3790  * dhd_msgbuf_agg_h2d_db_timer_fn:
3791  * Timer callback function for ringing h2d DB.
3792  * This is run in isr context (HRTIMER_MODE_REL),
3793  * do not hold any spin_lock_bh().
3794  * Using HRTIMER_MODE_REL_SOFT causing TPUT regressions.
3795  */
3796 enum hrtimer_restart
dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer * timer)3797 dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer *timer)
3798 {
3799 	agg_h2d_db_info_t *agg_db_info;
3800 	dhd_pub_t *dhd;
3801 	dhd_prot_t *prot;
3802 	uint32 db_index;
3803 	uint corerev;
3804 
3805 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
3806 	agg_db_info = container_of(timer, agg_h2d_db_info_t, timer);
3807 	GCC_DIAGNOSTIC_POP();
3808 
3809 	dhd = agg_db_info->dhd;
3810 	prot = dhd->prot;
3811 
3812 	prot->agg_h2d_db_info.timer_db_cnt++;
3813 	if (IDMA_ACTIVE(dhd)) {
3814 		db_index = IDMA_IDX0;
3815 		if (dhd->bus->sih) {
3816 			corerev = dhd->bus->sih->buscorerev;
3817 			if (corerev >= 24) {
3818 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
3819 			}
3820 		}
3821 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
3822 	} else {
3823 		prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC);
3824 	}
3825 
3826 	return HRTIMER_NORESTART;
3827 }
3828 
3829 void
dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t * prot)3830 dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t *prot)
3831 {
3832 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3833 
3834 	/* Queue the timer only when it is not in the queue */
3835 	if (!hrtimer_active(&agg_db_info->timer)) {
3836 		hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC),
3837 				HRTIMER_MODE_REL);
3838 	}
3839 }
3840 
3841 static void
dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t * dhd)3842 dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t *dhd)
3843 {
3844 	dhd_prot_t *prot = dhd->prot;
3845 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3846 
3847 	agg_db_info->dhd = dhd;
3848 	hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3849 	/* The timer function will run from ISR context, ensure no spin_lock_bh are used */
3850 	agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn;
3851 	agg_db_info->init = TRUE;
3852 	agg_db_info->timer_db_cnt = 0;
3853 	agg_db_info->direct_db_cnt = 0;
3854 	agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE);
3855 }
3856 
3857 static void
dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t * dhd)3858 dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t *dhd)
3859 {
3860 	dhd_prot_t *prot = dhd->prot;
3861 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3862 	if (agg_db_info->init) {
3863 		if (agg_db_info->inflight_histo) {
3864 			MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE);
3865 		}
3866 		hrtimer_try_to_cancel(&agg_db_info->timer);
3867 		agg_db_info->init = FALSE;
3868 	}
3869 }
3870 
3871 static void
dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t * dhd)3872 dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t *dhd)
3873 {
3874 	dhd_prot_t *prot = dhd->prot;
3875 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3876 	hrtimer_try_to_cancel(&agg_db_info->timer);
3877 }
3878 #endif /* AGG_H2D_DB */
3879 
3880 void
dhd_prot_clearcounts(dhd_pub_t * dhd)3881 dhd_prot_clearcounts(dhd_pub_t *dhd)
3882 {
3883 	dhd_prot_t *prot = dhd->prot;
3884 #ifdef AGG_H2D_DB
3885 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3886 	if (agg_db_info->inflight_histo) {
3887 		memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE);
3888 	}
3889 	agg_db_info->direct_db_cnt = 0;
3890 	agg_db_info->timer_db_cnt = 0;
3891 #endif /* AGG_H2D_DB */
3892 	prot->txcpl_db_cnt = 0;
3893 	prot->tx_h2d_db_cnt = 0;
3894 }
3895 
3896 /**
3897  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3898  * completed it's initialization of the pcie_shared structure, we may now fetch
3899  * the dongle advertized features and adjust the protocol layer accordingly.
3900  *
3901  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3902  */
3903 int
dhd_prot_init(dhd_pub_t * dhd)3904 dhd_prot_init(dhd_pub_t *dhd)
3905 {
3906 	sh_addr_t base_addr;
3907 	dhd_prot_t *prot = dhd->prot;
3908 	int ret = 0;
3909 	uint32 idmacontrol;
3910 	uint32 waitcount = 0;
3911 	uint16 max_eventbufpost = 0;
3912 
3913 	/**
3914 	 * A user defined value can be assigned to global variable h2d_max_txpost via
3915 	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3916 	 * 2. module parameter h2d_max_txpost
3917 	 * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM,
3918 	 * if user has not defined any buffers by one of the above methods.
3919 	 */
3920 	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3921 	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3922 
3923 #if defined(DHD_HTPUT_TUNABLES)
3924 	prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost;
3925 	DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n",
3926 		__FUNCTION__, __LINE__, prot->h2d_htput_max_txpost));
3927 #endif /* DHD_HTPUT_TUNABLES */
3928 
3929 	/* Read max rx packets supported by dongle */
3930 	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3931 	if (prot->max_rxbufpost == 0) {
3932 		/* This would happen if the dongle firmware is not */
3933 		/* using the latest shared structure template */
3934 		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3935 	}
3936 	DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3937 
3938 	/* Initialize.  bzero() would blow away the dma pointers. */
3939 	max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus);
3940 	prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >=
3941 		H2DRING_CTRL_SUB_MAX_ITEM) ? DHD_FLOWRING_MAX_EVENTBUF_POST : max_eventbufpost;
3942 	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3943 	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3944 #ifdef BTLOG
3945 	prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST;
3946 #endif	/* BTLOG */
3947 	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3948 
3949 	prot->cur_ioctlresp_bufs_posted = 0;
3950 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3951 	prot->data_seq_no = 0;
3952 	prot->ioctl_seq_no = 0;
3953 	prot->rxbufpost = 0;
3954 	prot->tot_rxbufpost = 0;
3955 	prot->tot_rxcpl = 0;
3956 	prot->cur_event_bufs_posted = 0;
3957 	prot->ioctl_state = 0;
3958 	prot->curr_ioctl_cmd = 0;
3959 	prot->cur_ts_bufs_posted = 0;
3960 	prot->infobufpost = 0;
3961 #ifdef BTLOG
3962 	prot->btlogbufpost = 0;
3963 #endif	/* BTLOG */
3964 
3965 	prot->dmaxfer.srcmem.va = NULL;
3966 	prot->dmaxfer.dstmem.va = NULL;
3967 	prot->dmaxfer.in_progress = FALSE;
3968 
3969 #ifdef DHD_HMAPTEST
3970 	prot->hmaptest.in_progress = FALSE;
3971 #endif /* DHD_HMAPTEST */
3972 	prot->metadata_dbg = FALSE;
3973 	prot->rx_metadata_offset = 0;
3974 	prot->tx_metadata_offset = 0;
3975 	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3976 
3977 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3978 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3979 	prot->ioctl_state = 0;
3980 	prot->ioctl_status = 0;
3981 	prot->ioctl_resplen = 0;
3982 	prot->ioctl_received = IOCTL_WAIT;
3983 
3984 	/* Initialize Common MsgBuf Rings */
3985 
3986 	prot->device_ipc_version = dhd->bus->api.fw_rev;
3987 	prot->host_ipc_version = PCIE_SHARED_VERSION;
3988 	prot->no_tx_resource = FALSE;
3989 
3990 	/* Init the host API version */
3991 	dhd_set_host_cap(dhd);
3992 
3993 	/* alloc and configure scb host address for dongle */
3994 	if ((ret = dhd_alloc_host_scbs(dhd))) {
3995 		return ret;
3996 	}
3997 
3998 	/* Register the interrupt function upfront */
3999 	/* remove corerev checks in data path */
4000 	/* do this after host/fw negotiation for DAR */
4001 	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
4002 	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
4003 
4004 	prot->tx_h2d_db_cnt = 0;
4005 #ifdef AGG_H2D_DB
4006 	dhd_msgbuf_agg_h2d_db_timer_init(dhd);
4007 #endif /* AGG_H2D_DB */
4008 
4009 	dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
4010 
4011 	/* If supported by the host, indicate the memory block
4012 	 * for completion writes / submission reads to shared space
4013 	 */
4014 	if (dhd->dma_d2h_ring_upd_support) {
4015 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
4016 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4017 			D2H_DMA_INDX_WR_BUF, 0);
4018 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
4019 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4020 			H2D_DMA_INDX_RD_BUF, 0);
4021 	}
4022 
4023 	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
4024 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
4025 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4026 			H2D_DMA_INDX_WR_BUF, 0);
4027 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
4028 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4029 			D2H_DMA_INDX_RD_BUF, 0);
4030 	}
4031 
4032 	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
4033 	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
4034 	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
4035 
4036 	/* Make it compatibile with pre-rev7 Firmware */
4037 	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
4038 		prot->d2hring_tx_cpln.item_len =
4039 			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
4040 		prot->d2hring_rx_cpln.item_len =
4041 			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
4042 	}
4043 	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
4044 	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
4045 
4046 	dhd_prot_d2h_sync_init(dhd);
4047 
4048 	dhd_prot_h2d_sync_init(dhd);
4049 
4050 #ifdef PCIE_INB_DW
4051 	/* Set the initial DS state */
4052 	if (INBAND_DW_ENAB(dhd->bus)) {
4053 		dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
4054 			DW_DEVICE_DS_ACTIVE);
4055 	}
4056 #endif /* PCIE_INB_DW */
4057 
4058 	/* init the scratch buffer */
4059 	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
4060 	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4061 		D2H_DMA_SCRATCH_BUF, 0);
4062 	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
4063 		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
4064 #ifdef DHD_DMA_INDICES_SEQNUM
4065 	prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO;
4066 #endif /* DHD_DMA_INDICES_SEQNUM */
4067 	/* Signal to the dongle that common ring init is complete */
4068 	if (dhd->hostrdy_after_init)
4069 		dhd_bus_hostready(dhd->bus);
4070 
4071 	/*
4072 	 * If the DMA-able buffers for flowring needs to come from a specific
4073 	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
4074 	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
4075 	 * this contiguous memory region, for each of the flowrings.
4076 	 */
4077 
4078 	/* Pre-allocate pool of msgbuf_ring for flowrings */
4079 	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
4080 		return BCME_ERROR;
4081 	}
4082 
4083 	dhd->ring_attached = TRUE;
4084 
4085 	/* If IFRM is enabled, wait for FW to setup the DMA channel */
4086 	if (IFRM_ENAB(dhd)) {
4087 		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
4088 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4089 			H2D_IFRM_INDX_WR_BUF, 0);
4090 	}
4091 
4092 	/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
4093 	 * Waiting just before configuring doorbell
4094 	 */
4095 #ifdef BCMQT
4096 #define	IDMA_ENABLE_WAIT  100
4097 #else
4098 #define	IDMA_ENABLE_WAIT  10
4099 #endif
4100 	if (IDMA_ACTIVE(dhd)) {
4101 		/* wait for idma_en bit in IDMAcontrol register to be set */
4102 		/* Loop till idma_en is not set */
4103 		uint buscorerev = dhd->bus->sih->buscorerev;
4104 		idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4105 			IDMAControl(buscorerev), 0, 0);
4106 		while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
4107 			(waitcount++ < IDMA_ENABLE_WAIT)) {
4108 
4109 			DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
4110 				waitcount, idmacontrol));
4111 			OSL_DELAY(1000); /* 1ms as its onetime only */
4112 			idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4113 				IDMAControl(buscorerev), 0, 0);
4114 		}
4115 
4116 		if (waitcount < IDMA_ENABLE_WAIT) {
4117 			DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
4118 		} else {
4119 			DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
4120 				waitcount, idmacontrol));
4121 			return BCME_ERROR;
4122 		}
4123 		// add delay to fix bring up issue
4124 		OSL_SLEEP(1);
4125 	}
4126 
4127 	/* Host should configure soft doorbells if needed ... here */
4128 
4129 	/* Post to dongle host configured soft doorbells */
4130 	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
4131 
4132 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4133 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
4134 
4135 	prot->no_retry = FALSE;
4136 	prot->no_aggr = FALSE;
4137 	prot->fixed_rate = FALSE;
4138 
4139 	/*
4140 	 * Note that any communication with the Dongle should be added
4141 	 * below this point. Any other host data structure initialiation that
4142 	 * needs to be done prior to the DPC starts executing should be done
4143 	 * befor this point.
4144 	 * Because once we start sending H2D requests to Dongle, the Dongle
4145 	 * respond immediately. So the DPC context to handle this
4146 	 * D2H response could preempt the context in which dhd_prot_init is running.
4147 	 * We want to ensure that all the Host part of dhd_prot_init is
4148 	 * done before that.
4149 	 */
4150 
4151 	/* See if info rings could be created, info rings should be created
4152 	* only if dongle does not support EDL
4153 	*/
4154 #ifdef EWP_EDL
4155 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
4156 #else
4157 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
4158 #endif /* EWP_EDL */
4159 	{
4160 		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
4161 			/* For now log and proceed, further clean up action maybe necessary
4162 			 * when we have more clarity.
4163 			 */
4164 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4165 				__FUNCTION__, ret));
4166 		}
4167 	}
4168 
4169 #ifdef EWP_EDL
4170 		/* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
4171 		if (dhd->dongle_edl_support) {
4172 			if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
4173 				DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
4174 					__FUNCTION__, ret));
4175 			}
4176 		}
4177 #endif /* EWP_EDL */
4178 
4179 #ifdef BTLOG
4180 	/* create BT log rings */
4181 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) {
4182 		if ((ret = dhd_prot_init_btlog_rings(dhd)) != BCME_OK) {
4183 			/* For now log and proceed, further clean up action maybe necessary
4184 			 * when we have more clarity.
4185 			 */
4186 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4187 				__FUNCTION__, ret));
4188 		}
4189 	}
4190 #endif	/* BTLOG */
4191 
4192 #ifdef DHD_HP2P
4193 	/* create HPP txcmpl/rxcmpl rings */
4194 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
4195 		if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
4196 			/* For now log and proceed, further clean up action maybe necessary
4197 			 * when we have more clarity.
4198 			 */
4199 			DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
4200 				__FUNCTION__, ret));
4201 		}
4202 	}
4203 #endif /* DHD_HP2P */
4204 
4205 #ifdef DHD_LB_RXP
4206 	/* defualt rx flow ctrl thresholds. Can be changed at run time through sysfs */
4207 	dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR);
4208 	dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR);
4209 	atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
4210 #endif /* DHD_LB_RXP */
4211 	return BCME_OK;
4212 } /* dhd_prot_init */
4213 
4214 /**
4215  * dhd_prot_detach - PCIE FD protocol layer destructor.
4216  * Unlink, frees allocated protocol memory (including dhd_prot)
4217  */
dhd_prot_detach(dhd_pub_t * dhd)4218 void dhd_prot_detach(dhd_pub_t *dhd)
4219 {
4220 	dhd_prot_t *prot = dhd->prot;
4221 
4222 	/* Stop the protocol module */
4223 	if (prot) {
4224 		/* For non-android platforms, devreset will not be called,
4225 		 * so call prot_reset here. It is harmless if called twice.
4226 		 */
4227 		dhd_prot_reset(dhd);
4228 
4229 		/* free up all DMA-able buffers allocated during prot attach/init */
4230 
4231 		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
4232 #ifdef DHD_HMAPTEST
4233 		dhd_dma_buf_free(dhd, &prot->hmaptest.mem);
4234 #endif /* DHD_HMAPTEST */
4235 		dhd_dma_buf_free(dhd, &prot->retbuf);
4236 		dhd_dma_buf_free(dhd, &prot->ioctbuf);
4237 		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
4238 		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
4239 		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
4240 		dhd_dma_buf_free(dhd, &prot->host_scb_buf);
4241 #ifdef SNAPSHOT_UPLOAD
4242 		dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf);
4243 #endif	/* SNAPSHOT_UPLOAD */
4244 
4245 		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4246 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
4247 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
4248 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
4249 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
4250 
4251 		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
4252 
4253 		/* Common MsgBuf Rings */
4254 		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
4255 		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
4256 		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
4257 		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
4258 		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
4259 
4260 		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
4261 		dhd_prot_flowrings_pool_detach(dhd);
4262 
4263 		/* detach info rings */
4264 		dhd_prot_detach_info_rings(dhd);
4265 
4266 #ifdef BTLOG
4267 		/* detach BT log rings */
4268 		dhd_prot_detach_btlog_rings(dhd);
4269 #endif	/* BTLOG */
4270 
4271 #ifdef EWP_EDL
4272 		dhd_prot_detach_edl_rings(dhd);
4273 #endif
4274 #ifdef DHD_HP2P
4275 		/* detach HPP rings */
4276 		dhd_prot_detach_hp2p_rings(dhd);
4277 #endif /* DHD_HP2P */
4278 
4279 		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
4280 		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
4281 		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
4282 		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
4283 		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
4284 		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
4285 		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
4286 		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
4287 		 */
4288 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
4289 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
4290 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
4291 #ifdef IOCTLRESP_USE_CONSTMEM
4292 		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4293 #endif
4294 #ifdef DHD_MAP_PKTID_LOGGING
4295 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
4296 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
4297 #endif /* DHD_MAP_PKTID_LOGGING */
4298 #ifdef DHD_DMA_INDICES_SEQNUM
4299 		if (prot->h2d_dma_indx_rd_copy_buf) {
4300 			MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf,
4301 				prot->h2d_dma_indx_rd_copy_bufsz);
4302 		}
4303 		if (prot->d2h_dma_indx_wr_copy_buf) {
4304 			MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf,
4305 				prot->d2h_dma_indx_wr_copy_bufsz);
4306 		}
4307 #endif /* DHD_DMA_INDICES_SEQNUM */
4308 		DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
4309 
4310 		dhd->prot = NULL;
4311 	}
4312 } /* dhd_prot_detach */
4313 
4314 /**
4315  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
4316  * This may be invoked to soft reboot the dongle, without having to
4317  * detach and attach the entire protocol layer.
4318  *
4319  * After dhd_prot_reset(), dhd_prot_init() may be invoked
4320  * without going througha dhd_prot_attach() phase.
4321  */
4322 void
dhd_prot_reset(dhd_pub_t * dhd)4323 dhd_prot_reset(dhd_pub_t *dhd)
4324 {
4325 	struct dhd_prot *prot = dhd->prot;
4326 
4327 	DHD_TRACE(("%s\n", __FUNCTION__));
4328 
4329 	if (prot == NULL) {
4330 		return;
4331 	}
4332 
4333 	dhd->ring_attached = FALSE;
4334 
4335 	dhd_prot_flowrings_pool_reset(dhd);
4336 
4337 	/* Reset Common MsgBuf Rings */
4338 	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
4339 	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
4340 	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
4341 	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
4342 	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
4343 
4344 	/* Reset info rings */
4345 	if (prot->h2dring_info_subn) {
4346 		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
4347 	}
4348 
4349 	if (prot->d2hring_info_cpln) {
4350 		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
4351 	}
4352 
4353 #ifdef EWP_EDL
4354 	if (prot->d2hring_edl) {
4355 		dhd_prot_ring_reset(dhd, prot->d2hring_edl);
4356 	}
4357 #endif /* EWP_EDL */
4358 
4359 	/* Reset all DMA-able buffers allocated during prot attach */
4360 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
4361 #ifdef DHD_HMAPTEST
4362 	dhd_dma_buf_reset(dhd, &prot->hmaptest.mem);
4363 #endif /* DHD_HMAPTEST */
4364 	dhd_dma_buf_reset(dhd, &prot->retbuf);
4365 	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
4366 	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
4367 	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
4368 	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
4369 	dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
4370 #ifdef SNAPSHOT_UPLOAD
4371 	dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf);
4372 #endif /* SNAPSHOT_UPLOAD */
4373 
4374 	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
4375 
4376 	/* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4377 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
4378 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
4379 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
4380 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
4381 
4382 #ifdef DHD_DMA_INDICES_SEQNUM
4383 		if (prot->d2h_dma_indx_wr_copy_buf) {
4384 			dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf,
4385 				prot->h2d_dma_indx_rd_copy_bufsz);
4386 			dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf,
4387 				prot->d2h_dma_indx_wr_copy_bufsz);
4388 		}
4389 #endif /* DHD_DMA_INDICES_SEQNUM */
4390 
4391 	/* XXX: dmaxfer src and dst? */
4392 
4393 	prot->rx_metadata_offset = 0;
4394 	prot->tx_metadata_offset = 0;
4395 
4396 	prot->rxbufpost = 0;
4397 	prot->cur_event_bufs_posted = 0;
4398 	prot->cur_ioctlresp_bufs_posted = 0;
4399 
4400 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
4401 	prot->data_seq_no = 0;
4402 	prot->ioctl_seq_no = 0;
4403 	prot->ioctl_state = 0;
4404 	prot->curr_ioctl_cmd = 0;
4405 	prot->ioctl_received = IOCTL_WAIT;
4406 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
4407 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
4408 	prot->txcpl_db_cnt = 0;
4409 
4410 	/* dhd_flow_rings_init is located at dhd_bus_start,
4411 	 * so when stopping bus, flowrings shall be deleted
4412 	 */
4413 	if (dhd->flow_rings_inited) {
4414 		dhd_flow_rings_deinit(dhd);
4415 	}
4416 
4417 #ifdef BTLOG
4418 	/* Reset BTlog rings */
4419 	if (prot->h2dring_btlog_subn) {
4420 		dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn);
4421 	}
4422 
4423 	if (prot->d2hring_btlog_cpln) {
4424 		dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln);
4425 	}
4426 #endif	/* BTLOG */
4427 #ifdef DHD_HP2P
4428 	if (prot->d2hring_hp2p_txcpl) {
4429 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
4430 	}
4431 	if (prot->d2hring_hp2p_rxcpl) {
4432 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
4433 	}
4434 #endif /* DHD_HP2P */
4435 
4436 	/* Reset PKTID map */
4437 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
4438 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
4439 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
4440 #ifdef IOCTLRESP_USE_CONSTMEM
4441 	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4442 #endif /* IOCTLRESP_USE_CONSTMEM */
4443 #ifdef DMAMAP_STATS
4444 	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
4445 	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
4446 #ifndef IOCTLRESP_USE_CONSTMEM
4447 	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
4448 #endif /* IOCTLRESP_USE_CONSTMEM */
4449 	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
4450 	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
4451 	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
4452 #endif /* DMAMAP_STATS */
4453 
4454 #ifdef AGG_H2D_DB
4455 	dhd_msgbuf_agg_h2d_db_timer_reset(dhd);
4456 #endif /* AGG_H2D_DB */
4457 
4458 } /* dhd_prot_reset */
4459 
4460 #if defined(DHD_LB_RXP)
4461 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
4462 #else /* !DHD_LB_RXP */
4463 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
4464 #endif /* !DHD_LB_RXP */
4465 
4466 #if defined(DHD_LB)
4467 /* DHD load balancing: deferral of work to another online CPU */
4468 /* DHD_LB_RXP dispatchers, in dhd_linux.c */
4469 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
4470 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
4471 extern unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
4472 
4473 #if defined(DHD_LB_RXP)
4474 /**
4475  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
4476  * to other CPU cores
4477  */
4478 static INLINE void
dhd_lb_dispatch_rx_process(dhd_pub_t * dhdp)4479 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
4480 {
4481 	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
4482 }
4483 #endif /* DHD_LB_RXP */
4484 #endif /* DHD_LB */
4485 
4486 void
dhd_prot_rx_dataoffset(dhd_pub_t * dhd,uint32 rx_offset)4487 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
4488 {
4489 	dhd_prot_t *prot = dhd->prot;
4490 	prot->rx_dataoffset = rx_offset;
4491 }
4492 
4493 static int
dhd_check_create_info_rings(dhd_pub_t * dhd)4494 dhd_check_create_info_rings(dhd_pub_t *dhd)
4495 {
4496 	dhd_prot_t *prot = dhd->prot;
4497 	int ret = BCME_ERROR;
4498 	uint16 ringid;
4499 
4500 #ifdef BTLOG
4501 	if (dhd->submit_count_WAR) {
4502 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4503 	} else
4504 #endif	/* BTLOG */
4505 	{
4506 		/* dongle may increase max_submission_rings so keep
4507 		 * ringid at end of dynamic rings
4508 		 */
4509 		ringid = dhd->bus->max_tx_flowrings +
4510 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4511 			BCMPCIE_H2D_COMMON_MSGRINGS;
4512 	}
4513 
4514 	if (prot->d2hring_info_cpln) {
4515 		/* for d2hring re-entry case, clear inited flag */
4516 		prot->d2hring_info_cpln->inited = FALSE;
4517 	}
4518 
4519 	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
4520 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4521 	}
4522 
4523 	if (prot->h2dring_info_subn == NULL) {
4524 		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4525 
4526 		if (prot->h2dring_info_subn == NULL) {
4527 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4528 				__FUNCTION__));
4529 			return BCME_NOMEM;
4530 		}
4531 
4532 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4533 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
4534 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4535 			ringid);
4536 		if (ret != BCME_OK) {
4537 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4538 				__FUNCTION__));
4539 			goto err;
4540 		}
4541 	}
4542 
4543 	if (prot->d2hring_info_cpln == NULL) {
4544 		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4545 
4546 		if (prot->d2hring_info_cpln == NULL) {
4547 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4548 				__FUNCTION__));
4549 			return BCME_NOMEM;
4550 		}
4551 
4552 		/* create the debug info completion ring next to debug info submit ring
4553 		* ringid = id next to debug info submit ring
4554 		*/
4555 		ringid = ringid + 1;
4556 
4557 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4558 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
4559 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4560 			ringid);
4561 		if (ret != BCME_OK) {
4562 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4563 				__FUNCTION__));
4564 			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
4565 			goto err;
4566 		}
4567 	}
4568 
4569 	return ret;
4570 err:
4571 	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4572 
4573 	if (prot->d2hring_info_cpln) {
4574 		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4575 	}
4576 	return ret;
4577 } /* dhd_check_create_info_rings */
4578 
4579 int
dhd_prot_init_info_rings(dhd_pub_t * dhd)4580 dhd_prot_init_info_rings(dhd_pub_t *dhd)
4581 {
4582 	dhd_prot_t *prot = dhd->prot;
4583 	int ret = BCME_OK;
4584 
4585 	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
4586 		DHD_ERROR(("%s: info rings aren't created! \n",
4587 			__FUNCTION__));
4588 		return ret;
4589 	}
4590 
4591 	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
4592 		DHD_INFO(("Info completion ring was created!\n"));
4593 		return ret;
4594 	}
4595 
4596 	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
4597 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
4598 		BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
4599 	if (ret != BCME_OK)
4600 		return ret;
4601 
4602 	prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
4603 	prot->h2dring_info_subn->current_phase = 0;
4604 	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4605 	prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4606 
4607 	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
4608 	prot->h2dring_info_subn->n_completion_ids = 1;
4609 	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
4610 
4611 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
4612 		BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
4613 
4614 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4615 	 * so can not cleanup if one ring was created while the other failed
4616 	 */
4617 	return ret;
4618 } /* dhd_prot_init_info_rings */
4619 
4620 static void
dhd_prot_detach_info_rings(dhd_pub_t * dhd)4621 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
4622 {
4623 	if (dhd->prot->h2dring_info_subn) {
4624 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
4625 		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4626 	}
4627 	if (dhd->prot->d2hring_info_cpln) {
4628 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
4629 		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4630 	}
4631 }
4632 
4633 #ifdef DHD_HP2P
4634 static int
dhd_check_create_hp2p_rings(dhd_pub_t * dhd)4635 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
4636 {
4637 	dhd_prot_t *prot = dhd->prot;
4638 	int ret = BCME_ERROR;
4639 	uint16 ringid;
4640 
4641 	/* Last 2 dynamic ring indices are used by hp2p rings */
4642 	ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
4643 
4644 	if (prot->d2hring_hp2p_txcpl == NULL) {
4645 		prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4646 
4647 		if (prot->d2hring_hp2p_txcpl == NULL) {
4648 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
4649 				__FUNCTION__));
4650 			return BCME_NOMEM;
4651 		}
4652 
4653 		DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
4654 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
4655 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
4656 			ringid);
4657 		if (ret != BCME_OK) {
4658 			DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
4659 				__FUNCTION__));
4660 			goto err2;
4661 		}
4662 	} else {
4663 		/* for re-entry case, clear inited flag */
4664 		prot->d2hring_hp2p_txcpl->inited = FALSE;
4665 	}
4666 	if (prot->d2hring_hp2p_rxcpl == NULL) {
4667 		prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4668 
4669 		if (prot->d2hring_hp2p_rxcpl == NULL) {
4670 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
4671 				__FUNCTION__));
4672 			return BCME_NOMEM;
4673 		}
4674 
4675 		/* create the hp2p rx completion ring next to hp2p tx compl ring
4676 		* ringid = id next to hp2p tx compl ring
4677 		*/
4678 		ringid = ringid + 1;
4679 
4680 		DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
4681 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
4682 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
4683 			ringid);
4684 		if (ret != BCME_OK) {
4685 			DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
4686 				__FUNCTION__));
4687 			goto err1;
4688 		}
4689 	} else {
4690 		/* for re-entry case, clear inited flag */
4691 		prot->d2hring_hp2p_rxcpl->inited = FALSE;
4692 	}
4693 
4694 	if (prot->d2hring_hp2p_rxcpl != NULL &&
4695 		prot->d2hring_hp2p_txcpl != NULL) {
4696 		/* dhd_prot_init rentry after a dhd_prot_reset */
4697 		ret = BCME_OK;
4698 	}
4699 
4700 	return ret;
4701 err1:
4702 	MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4703 	prot->d2hring_hp2p_rxcpl = NULL;
4704 
4705 err2:
4706 	MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4707 	prot->d2hring_hp2p_txcpl = NULL;
4708 	return ret;
4709 } /* dhd_check_create_hp2p_rings */
4710 
4711 int
dhd_prot_init_hp2p_rings(dhd_pub_t * dhd)4712 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4713 {
4714 	dhd_prot_t *prot = dhd->prot;
4715 	int ret = BCME_OK;
4716 
4717 	dhd->hp2p_ring_more = TRUE;
4718 	/* default multiflow not allowed */
4719 	dhd->hp2p_mf_enable = FALSE;
4720 
4721 	if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4722 		DHD_ERROR(("%s: hp2p rings aren't created! \n",
4723 			__FUNCTION__));
4724 		return ret;
4725 	}
4726 
4727 	if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4728 		DHD_INFO(("hp2p tx completion ring was created!\n"));
4729 		return ret;
4730 	}
4731 
4732 	DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4733 		prot->d2hring_hp2p_txcpl->idx));
4734 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4735 		BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4736 	if (ret != BCME_OK)
4737 		return ret;
4738 
4739 	prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4740 	prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4741 
4742 	if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4743 		DHD_INFO(("hp2p rx completion ring was created!\n"));
4744 		return ret;
4745 	}
4746 
4747 	DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4748 		prot->d2hring_hp2p_rxcpl->idx));
4749 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4750 		BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4751 	if (ret != BCME_OK)
4752 		return ret;
4753 
4754 	prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4755 	prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4756 
4757 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4758 	 * so can not cleanup if one ring was created while the other failed
4759 	 */
4760 	return BCME_OK;
4761 } /* dhd_prot_init_hp2p_rings */
4762 
4763 static void
dhd_prot_detach_hp2p_rings(dhd_pub_t * dhd)4764 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4765 {
4766 	if (dhd->prot->d2hring_hp2p_txcpl) {
4767 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4768 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4769 		dhd->prot->d2hring_hp2p_txcpl = NULL;
4770 	}
4771 	if (dhd->prot->d2hring_hp2p_rxcpl) {
4772 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4773 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4774 		dhd->prot->d2hring_hp2p_rxcpl = NULL;
4775 	}
4776 }
4777 #endif /* DHD_HP2P */
4778 
4779 #ifdef BTLOG
4780 static int
dhd_check_create_btlog_rings(dhd_pub_t * dhd)4781 dhd_check_create_btlog_rings(dhd_pub_t *dhd)
4782 {
4783 	dhd_prot_t *prot = dhd->prot;
4784 	int ret = BCME_ERROR;
4785 	uint16 ringid;
4786 
4787 	if (dhd->submit_count_WAR) {
4788 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2;
4789 	} else {
4790 		/* ringid is one less than ringids assign by dhd_check_create_info_rings */
4791 		ringid = dhd->bus->max_tx_flowrings +
4792 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4793 			BCMPCIE_H2D_COMMON_MSGRINGS - 1;
4794 	}
4795 
4796 	if (prot->d2hring_btlog_cpln) {
4797 		/* for re-entry case, clear inited flag */
4798 		prot->d2hring_btlog_cpln->inited = FALSE;
4799 	}
4800 
4801 	if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) {
4802 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4803 	}
4804 
4805 	if (prot->h2dring_btlog_subn == NULL) {
4806 		prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4807 
4808 		if (prot->h2dring_btlog_subn == NULL) {
4809 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4810 				__FUNCTION__));
4811 			return BCME_NOMEM;
4812 		}
4813 
4814 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4815 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog",
4816 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4817 			ringid);
4818 		if (ret != BCME_OK) {
4819 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4820 				__FUNCTION__));
4821 			goto err;
4822 		}
4823 	}
4824 
4825 	if (prot->d2hring_btlog_cpln == NULL) {
4826 		prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4827 
4828 		if (prot->d2hring_btlog_cpln == NULL) {
4829 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4830 				__FUNCTION__));
4831 			return BCME_NOMEM;
4832 		}
4833 
4834 		if (dhd->submit_count_WAR) {
4835 			ringid = ringid + 1;
4836 		} else {
4837 			/* advance ringid past BTLOG submit ring and INFO submit and cmplt rings */
4838 			ringid = ringid + 3;
4839 		}
4840 
4841 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4842 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog",
4843 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4844 			ringid);
4845 		if (ret != BCME_OK) {
4846 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4847 				__FUNCTION__));
4848 			dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn);
4849 			goto err;
4850 		}
4851 	}
4852 
4853 	return ret;
4854 err:
4855 	MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4856 
4857 	if (prot->d2hring_btlog_cpln) {
4858 		MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4859 	}
4860 	return ret;
4861 } /* dhd_check_create_btlog_rings */
4862 
4863 int
dhd_prot_init_btlog_rings(dhd_pub_t * dhd)4864 dhd_prot_init_btlog_rings(dhd_pub_t *dhd)
4865 {
4866 	dhd_prot_t *prot = dhd->prot;
4867 	int ret = BCME_OK;
4868 
4869 	if ((ret = dhd_check_create_btlog_rings(dhd)) != BCME_OK) {
4870 		DHD_ERROR(("%s: btlog rings aren't created! \n",
4871 			__FUNCTION__));
4872 		return ret;
4873 	}
4874 
4875 	if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) {
4876 		DHD_INFO(("Info completion ring was created!\n"));
4877 		return ret;
4878 	}
4879 
4880 	DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx));
4881 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln,
4882 		BCMPCIE_D2H_RING_TYPE_BTLOG_CPL, DHD_D2H_BTLOGRING_REQ_PKTID);
4883 	if (ret != BCME_OK)
4884 		return ret;
4885 
4886 	prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL;
4887 	prot->h2dring_btlog_subn->current_phase = 0;
4888 	prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4889 	prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4890 
4891 	DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx));
4892 	prot->h2dring_btlog_subn->n_completion_ids = 1;
4893 	prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx;
4894 
4895 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn,
4896 		BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT, DHD_H2D_BTLOGRING_REQ_PKTID);
4897 
4898 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4899 	 * so can not cleanup if one ring was created while the other failed
4900 	 */
4901 	return ret;
4902 } /* dhd_prot_init_btlog_rings */
4903 
4904 static void
dhd_prot_detach_btlog_rings(dhd_pub_t * dhd)4905 dhd_prot_detach_btlog_rings(dhd_pub_t *dhd)
4906 {
4907 	if (dhd->prot->h2dring_btlog_subn) {
4908 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn);
4909 		MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4910 	}
4911 	if (dhd->prot->d2hring_btlog_cpln) {
4912 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln);
4913 		MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4914 	}
4915 }
4916 #endif	/* BTLOG */
4917 
4918 #ifdef EWP_EDL
4919 static int
dhd_check_create_edl_rings(dhd_pub_t * dhd)4920 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4921 {
4922 	dhd_prot_t *prot = dhd->prot;
4923 	int ret = BCME_ERROR;
4924 	uint16 ringid;
4925 
4926 #ifdef BTLOG
4927 	if (dhd->submit_count_WAR) {
4928 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4929 	} else
4930 #endif	/* BTLOG */
4931 	{
4932 		/* dongle may increase max_submission_rings so keep
4933 		 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4934 		 */
4935 		ringid = dhd->bus->max_tx_flowrings +
4936 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4937 			BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4938 	}
4939 
4940 	if (prot->d2hring_edl) {
4941 		prot->d2hring_edl->inited = FALSE;
4942 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4943 	}
4944 
4945 	if (prot->d2hring_edl == NULL) {
4946 		prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4947 
4948 		if (prot->d2hring_edl == NULL) {
4949 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4950 				__FUNCTION__));
4951 			return BCME_NOMEM;
4952 		}
4953 
4954 		DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4955 			ringid));
4956 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4957 			D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4958 			ringid);
4959 		if (ret != BCME_OK) {
4960 			DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4961 				__FUNCTION__));
4962 			goto err;
4963 		}
4964 	}
4965 
4966 	return ret;
4967 err:
4968 	MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4969 	prot->d2hring_edl = NULL;
4970 
4971 	return ret;
4972 } /* dhd_check_create_btlog_rings */
4973 
4974 int
dhd_prot_init_edl_rings(dhd_pub_t * dhd)4975 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4976 {
4977 	dhd_prot_t *prot = dhd->prot;
4978 	int ret = BCME_ERROR;
4979 
4980 	if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4981 		DHD_ERROR(("%s: EDL rings aren't created! \n",
4982 			__FUNCTION__));
4983 		return ret;
4984 	}
4985 
4986 	if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4987 		DHD_INFO(("EDL completion ring was created!\n"));
4988 		return ret;
4989 	}
4990 
4991 	DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4992 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4993 		BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4994 	if (ret != BCME_OK)
4995 		return ret;
4996 
4997 	prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4998 	prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4999 
5000 	return BCME_OK;
5001 } /* dhd_prot_init_btlog_rings */
5002 
5003 static void
dhd_prot_detach_edl_rings(dhd_pub_t * dhd)5004 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
5005 {
5006 	if (dhd->prot->d2hring_edl) {
5007 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
5008 		MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
5009 		dhd->prot->d2hring_edl = NULL;
5010 	}
5011 }
5012 #endif	/* EWP_EDL */
5013 
5014 /**
5015  * Initialize protocol: sync w/dongle state.
5016  * Sets dongle media info (iswl, drv_version, mac address).
5017  */
dhd_sync_with_dongle(dhd_pub_t * dhd)5018 int dhd_sync_with_dongle(dhd_pub_t *dhd)
5019 {
5020 	int ret = 0;
5021 	uint len = 0;
5022 	wlc_rev_info_t revinfo;
5023 	char buf[128];
5024 	dhd_prot_t *prot = dhd->prot;
5025 
5026 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5027 
5028 	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
5029 
5030 	/* Post ts buffer after shim layer is attached */
5031 	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
5032 
5033 	/* query for 'wlc_ver' to get version info from firmware */
5034 	/* memsetting to zero */
5035 	bzero(buf, sizeof(buf));
5036 	len = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf));
5037 	if (len == 0) {
5038 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5039 		ret = BCME_ERROR;
5040 		goto done;
5041 	}
5042 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5043 	if (ret < 0) {
5044 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
5045 	} else {
5046 		dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major;
5047 		dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
5048 	}
5049 
5050 	DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor));
5051 #ifndef OEM_ANDROID
5052 	/* Get the device MAC address */
5053 	bzero(buf, sizeof(buf));
5054 	strlcpy(buf, "cur_etheraddr", sizeof(buf));
5055 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5056 	if (ret < 0) {
5057 		DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
5058 		goto done;
5059 	}
5060 	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
5061 	if (dhd_msg_level & DHD_INFO_VAL) {
5062 		bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
5063 	}
5064 #endif /* OEM_ANDROID */
5065 
5066 #ifdef DHD_FW_COREDUMP
5067 	/* Check the memdump capability */
5068 	dhd_get_memdump_info(dhd);
5069 #endif /* DHD_FW_COREDUMP */
5070 #ifdef BCMASSERT_LOG
5071 	dhd_get_assert_info(dhd);
5072 #endif /* BCMASSERT_LOG */
5073 
5074 	/* Get the device rev info */
5075 	memset(&revinfo, 0, sizeof(revinfo));
5076 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
5077 	if (ret < 0) {
5078 		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
5079 		goto done;
5080 	}
5081 	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
5082 		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
5083 
5084 	/* Get the RxBuf post size */
5085 	/* Use default value in case of failure */
5086 	prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5087 	memset(buf, 0, sizeof(buf));
5088 	len = bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
5089 	if (len == 0) {
5090 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5091 	} else {
5092 		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5093 		if (ret < 0) {
5094 			DHD_ERROR(("%s: GET RxBuf post FAILED, use default %d\n",
5095 				__FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5096 		} else {
5097 			if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz),
5098 					buf, sizeof(uint16)) != BCME_OK) {
5099 				DHD_ERROR(("%s: rxbufpost_sz memcpy failed\n", __FUNCTION__));
5100 			}
5101 
5102 			if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
5103 				DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
5104 					__FUNCTION__, prot->rxbufpost_sz,
5105 					DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5106 					prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5107 			} else {
5108 				DHD_ERROR(("%s: RxBuf Post : %d\n",
5109 					__FUNCTION__, prot->rxbufpost_sz));
5110 			}
5111 		}
5112 	}
5113 
5114 	/* Post buffers for packet reception */
5115 	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5116 
5117 	DHD_SSSR_DUMP_INIT(dhd);
5118 
5119 	dhd_process_cid_mac(dhd, TRUE);
5120 	ret = dhd_preinit_ioctls(dhd);
5121 	dhd_process_cid_mac(dhd, FALSE);
5122 #if defined(DHD_SDTC_ETB_DUMP)
5123 	dhd_sdtc_etb_init(dhd);
5124 #endif /* DHD_SDTC_ETB_DUMP */
5125 #if defined(DHD_H2D_LOG_TIME_SYNC)
5126 #ifdef DHD_HP2P
5127 	if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
5128 #else
5129 	if (FW_SUPPORTED(dhd, h2dlogts))
5130 #endif // endif
5131 	{
5132 #ifdef DHD_HP2P
5133 		if (dhd->hp2p_enable) {
5134 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
5135 		} else {
5136 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5137 		}
5138 #else
5139 		dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5140 #endif /* DHD_HP2P */
5141 		dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
5142 		/* This is during initialization. */
5143 		dhd_h2d_log_time_sync(dhd);
5144 	} else {
5145 		dhd->dhd_rte_time_sync_ms = 0;
5146 	}
5147 #endif /* DHD_H2D_LOG_TIME_SYNC */
5148 
5149 #ifdef HOST_SFH_LLC
5150 	if (FW_SUPPORTED(dhd, host_sfhllc)) {
5151 		dhd->host_sfhllc_supported = TRUE;
5152 	} else {
5153 		dhd->host_sfhllc_supported = FALSE;
5154 	}
5155 #endif /* HOST_SFH_LLC */
5156 
5157 	/* Always assumes wl for now */
5158 	dhd->iswl = TRUE;
5159 done:
5160 	return ret;
5161 } /* dhd_sync_with_dongle */
5162 
5163 #define DHD_DBG_SHOW_METADATA	0
5164 
5165 #if DHD_DBG_SHOW_METADATA
5166 static void
BCMFASTPATH(dhd_prot_print_metadata)5167 BCMFASTPATH(dhd_prot_print_metadata)(dhd_pub_t *dhd, void *ptr, int len)
5168 {
5169 	uint8 tlv_t;
5170 	uint8 tlv_l;
5171 	uint8 *tlv_v = (uint8 *)ptr;
5172 
5173 	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
5174 		return;
5175 
5176 	len -= BCMPCIE_D2H_METADATA_HDRLEN;
5177 	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
5178 
5179 	while (len > TLV_HDR_LEN) {
5180 		tlv_t = tlv_v[TLV_TAG_OFF];
5181 		tlv_l = tlv_v[TLV_LEN_OFF];
5182 
5183 		len -= TLV_HDR_LEN;
5184 		tlv_v += TLV_HDR_LEN;
5185 		if (len < tlv_l)
5186 			break;
5187 		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
5188 			break;
5189 
5190 		switch (tlv_t) {
5191 		case WLFC_CTL_TYPE_TXSTATUS: {
5192 			uint32 txs;
5193 			memcpy(&txs, tlv_v, sizeof(uint32));
5194 			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
5195 				printf("METADATA TX_STATUS: %08x\n", txs);
5196 			} else {
5197 				wl_txstatus_additional_info_t tx_add_info;
5198 				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
5199 					sizeof(wl_txstatus_additional_info_t));
5200 				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
5201 					" rate = %08x tries = %d - %d\n", txs,
5202 					tx_add_info.seq, tx_add_info.entry_ts,
5203 					tx_add_info.enq_ts, tx_add_info.last_ts,
5204 					tx_add_info.rspec, tx_add_info.rts_cnt,
5205 					tx_add_info.tx_cnt);
5206 			}
5207 			} break;
5208 
5209 		case WLFC_CTL_TYPE_RSSI: {
5210 			if (tlv_l == 1)
5211 				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
5212 			else
5213 				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
5214 					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
5215 					(int8)(*tlv_v), *(tlv_v + 1));
5216 			} break;
5217 
5218 		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
5219 			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
5220 			break;
5221 
5222 		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
5223 			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
5224 			break;
5225 
5226 		case WLFC_CTL_TYPE_RX_STAMP: {
5227 			struct {
5228 				uint32 rspec;
5229 				uint32 bus_time;
5230 				uint32 wlan_time;
5231 			} rx_tmstamp;
5232 			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
5233 			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
5234 				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
5235 			} break;
5236 
5237 		case WLFC_CTL_TYPE_TRANS_ID:
5238 			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
5239 			break;
5240 
5241 		case WLFC_CTL_TYPE_COMP_TXSTATUS:
5242 			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
5243 			break;
5244 
5245 		default:
5246 			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
5247 			break;
5248 		}
5249 
5250 		len -= tlv_l;
5251 		tlv_v += tlv_l;
5252 	}
5253 }
5254 #endif /* DHD_DBG_SHOW_METADATA */
5255 
5256 static INLINE void
BCMFASTPATH(dhd_prot_packet_free)5257 BCMFASTPATH(dhd_prot_packet_free)(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
5258 {
5259 	if (pkt) {
5260 		if (pkttype == PKTTYPE_IOCTL_RX ||
5261 			pkttype == PKTTYPE_EVENT_RX ||
5262 			pkttype == PKTTYPE_INFO_RX ||
5263 			pkttype == PKTTYPE_TSBUF_RX) {
5264 #ifdef DHD_USE_STATIC_CTRLBUF
5265 			PKTFREE_STATIC(dhd->osh, pkt, send);
5266 #else
5267 			PKTFREE(dhd->osh, pkt, send);
5268 #endif /* DHD_USE_STATIC_CTRLBUF */
5269 		} else {
5270 			PKTFREE(dhd->osh, pkt, send);
5271 		}
5272 	}
5273 }
5274 
5275 /**
5276  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
5277  * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
5278  * to ensure thread safety, so no need to hold any locks for this function
5279  */
5280 static INLINE void *
BCMFASTPATH(dhd_prot_packet_get)5281 BCMFASTPATH(dhd_prot_packet_get)(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
5282 {
5283 	void *PKTBUF;
5284 	dmaaddr_t pa;
5285 	uint32 len;
5286 	void *dmah;
5287 	void *secdma;
5288 
5289 #ifdef DHD_PCIE_PKTID
5290 	if (free_pktid) {
5291 		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
5292 			pktid, pa, len, dmah, secdma, pkttype);
5293 	} else {
5294 		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
5295 			pktid, pa, len, dmah, secdma, pkttype);
5296 	}
5297 #else
5298 	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
5299 		len, dmah, secdma, pkttype);
5300 #endif /* DHD_PCIE_PKTID */
5301 	if (PKTBUF) {
5302 		DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5303 #ifdef DMAMAP_STATS
5304 		switch (pkttype) {
5305 #ifndef IOCTLRESP_USE_CONSTMEM
5306 			case PKTTYPE_IOCTL_RX:
5307 				dhd->dma_stats.ioctl_rx--;
5308 				dhd->dma_stats.ioctl_rx_sz -= len;
5309 				break;
5310 #endif /* IOCTLRESP_USE_CONSTMEM */
5311 			case PKTTYPE_EVENT_RX:
5312 				dhd->dma_stats.event_rx--;
5313 				dhd->dma_stats.event_rx_sz -= len;
5314 				break;
5315 			case PKTTYPE_INFO_RX:
5316 				dhd->dma_stats.info_rx--;
5317 				dhd->dma_stats.info_rx_sz -= len;
5318 				break;
5319 			case PKTTYPE_TSBUF_RX:
5320 				dhd->dma_stats.tsbuf_rx--;
5321 				dhd->dma_stats.tsbuf_rx_sz -= len;
5322 				break;
5323 		}
5324 #endif /* DMAMAP_STATS */
5325 	}
5326 
5327 	return PKTBUF;
5328 }
5329 
5330 #ifdef IOCTLRESP_USE_CONSTMEM
5331 static INLINE void
BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)5332 BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
5333 {
5334 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5335 	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
5336 		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
5337 
5338 	return;
5339 }
5340 #endif
5341 
5342 #ifdef PCIE_INB_DW
5343 static int
dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t * bus)5344 dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
5345 {
5346 	unsigned long flags = 0;
5347 
5348 	if (INBAND_DW_ENAB(bus)) {
5349 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5350 		bus->host_active_cnt++;
5351 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5352 		if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
5353 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5354 			bus->host_active_cnt--;
5355 			dhd_bus_inb_ack_pending_ds_req(bus);
5356 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5357 			return BCME_ERROR;
5358 		}
5359 	}
5360 
5361 	return BCME_OK;
5362 }
5363 
5364 static void
dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t * bus)5365 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
5366 {
5367 	unsigned long flags = 0;
5368 	if (INBAND_DW_ENAB(bus)) {
5369 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5370 		bus->host_active_cnt--;
5371 		dhd_bus_inb_ack_pending_ds_req(bus);
5372 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5373 	}
5374 }
5375 #endif /* PCIE_INB_DW */
5376 
5377 static void
BCMFASTPATH(dhd_msgbuf_rxbuf_post)5378 BCMFASTPATH(dhd_msgbuf_rxbuf_post)(dhd_pub_t *dhd, bool use_rsv_pktid)
5379 {
5380 	dhd_prot_t *prot = dhd->prot;
5381 	int16 fillbufs;
5382 	int retcount = 0;
5383 
5384 	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5385 	while (fillbufs >= RX_BUF_BURST) {
5386 		/* Post in a burst of 32 buffers at a time */
5387 		fillbufs = MIN(fillbufs, RX_BUF_BURST);
5388 
5389 		/* Post buffers */
5390 		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
5391 
5392 		if (retcount > 0) {
5393 			prot->rxbufpost += (uint16)retcount;
5394 			/* how many more to post */
5395 			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5396 		} else {
5397 			/* Make sure we don't run loop any further */
5398 			fillbufs = 0;
5399 		}
5400 	}
5401 }
5402 
5403 /** Post 'count' no of rx buffers to dongle */
5404 static int
BCMFASTPATH(dhd_prot_rxbuf_post)5405 BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
5406 {
5407 	void *p, **pktbuf;
5408 	uint8 *rxbuf_post_tmp;
5409 	host_rxbuf_post_t *rxbuf_post;
5410 	void *msg_start;
5411 	dmaaddr_t pa, *pktbuf_pa;
5412 	uint32 *pktlen;
5413 	uint16 i = 0, alloced = 0;
5414 	unsigned long flags;
5415 	uint32 pktid;
5416 	dhd_prot_t *prot = dhd->prot;
5417 	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
5418 	void *lcl_buf;
5419 	uint16 lcl_buf_size;
5420 #ifdef BCM_ROUTER_DHD
5421 	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM;
5422 #else
5423 	uint16 pktsz = prot->rxbufpost_sz;
5424 #endif /* BCM_ROUTER_DHD */
5425 
5426 #ifdef PCIE_INB_DW
5427 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5428 		return BCME_ERROR;
5429 #endif /* PCIE_INB_DW */
5430 	/* allocate a local buffer to store pkt buffer va, pa and length */
5431 	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
5432 		RX_BUF_BURST;
5433 	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
5434 	if (!lcl_buf) {
5435 		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
5436 #ifdef PCIE_INB_DW
5437 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5438 #endif
5439 		return 0;
5440 	}
5441 	pktbuf = lcl_buf;
5442 	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
5443 	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
5444 
5445 	for (i = 0; i < count; i++) {
5446 		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
5447 			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
5448 			dhd->rx_pktgetfail++;
5449 			break;
5450 		}
5451 
5452 #ifdef BCM_ROUTER_DHD
5453 		/* Reserve extra headroom for router builds */
5454 		PKTPULL(dhd->osh, p, BCMEXTRAHDROOM);
5455 #endif /* BCM_ROUTER_DHD */
5456 		pktlen[i] = PKTLEN(dhd->osh, p);
5457 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
5458 
5459 		if (PHYSADDRISZERO(pa)) {
5460 			PKTFREE(dhd->osh, p, FALSE);
5461 			DHD_ERROR(("Invalid phyaddr 0\n"));
5462 			ASSERT(0);
5463 			break;
5464 		}
5465 #ifdef DMAMAP_STATS
5466 		dhd->dma_stats.rxdata++;
5467 		dhd->dma_stats.rxdata_sz += pktlen[i];
5468 #endif /* DMAMAP_STATS */
5469 
5470 		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
5471 		pktlen[i] = PKTLEN(dhd->osh, p);
5472 		pktbuf[i] = p;
5473 		pktbuf_pa[i] = pa;
5474 	}
5475 
5476 	/* only post what we have */
5477 	count = i;
5478 
5479 	/* grab the ring lock to allocate pktid and post on ring */
5480 	DHD_RING_LOCK(ring->ring_lock, flags);
5481 
5482 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5483 	msg_start = (void *)
5484 		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
5485 	if (msg_start == NULL) {
5486 		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5487 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5488 		goto cleanup;
5489 	}
5490 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5491 	ASSERT(alloced > 0);
5492 
5493 	rxbuf_post_tmp = (uint8*)msg_start;
5494 
5495 	for (i = 0; i < alloced; i++) {
5496 		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
5497 		p = pktbuf[i];
5498 		pa = pktbuf_pa[i];
5499 
5500 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
5501 			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
5502 #if defined(DHD_PCIE_PKTID)
5503 		if (pktid == DHD_PKTID_INVALID) {
5504 			break;
5505 		}
5506 #endif /* DHD_PCIE_PKTID */
5507 
5508 #ifdef DHD_HMAPTEST
5509 	if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) {
5510 		/* scratchbuf area */
5511 		dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va
5512 			+ dhd->prot->hmaptest.offset;
5513 
5514 		dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset;
5515 		if ((dhd->prot->hmap_rx_buf_va +  dhd->prot->hmap_rx_buf_len) >
5516 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
5517 			DHD_ERROR(("hmaptest: ERROR Rxpost outside HMAPTEST buffer\n"));
5518 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
5519 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
5520 			dhd->prot->hmaptest.in_progress = FALSE;
5521 		} else {
5522 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va,
5523 				dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0);
5524 
5525 			dhd->prot->hmap_rx_buf_pa = pa;
5526 			dhd->prot->hmaptest_rx_pktid = pktid;
5527 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED;
5528 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf pktid=0x%08x\n",
5529 				pktid));
5530 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf va=0x%p pa.lo=0x%08x\n",
5531 				dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa)));
5532 			DHD_ERROR(("hmaptest: d11write rxpost orig pktdata va=0x%p pa.lo=0x%08x\n",
5533 				PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i])));
5534 		}
5535 	}
5536 #endif /* DHD_HMAPTEST */
5537 		dhd->prot->tot_rxbufpost++;
5538 		/* Common msg header */
5539 		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
5540 		rxbuf_post->cmn_hdr.if_id = 0;
5541 		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5542 		rxbuf_post->cmn_hdr.flags = ring->current_phase;
5543 		ring->seqnum++;
5544 		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
5545 		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5546 		rxbuf_post->data_buf_addr.low_addr =
5547 			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
5548 
5549 		if (prot->rx_metadata_offset) {
5550 			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
5551 			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5552 			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5553 		} else {
5554 			rxbuf_post->metadata_buf_len = 0;
5555 			rxbuf_post->metadata_buf_addr.high_addr = 0;
5556 			rxbuf_post->metadata_buf_addr.low_addr  = 0;
5557 		}
5558 
5559 #ifdef DHD_PKTID_AUDIT_RING
5560 		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
5561 #endif /* DHD_PKTID_AUDIT_RING */
5562 
5563 		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5564 
5565 		/* Move rxbuf_post_tmp to next item */
5566 		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
5567 #ifdef DHD_LBUF_AUDIT
5568 		PKTAUDIT(dhd->osh, p);
5569 #endif
5570 	}
5571 
5572 	if (i < alloced) {
5573 		if (ring->wr < (alloced - i))
5574 			ring->wr = ring->max_items - (alloced - i);
5575 		else
5576 			ring->wr -= (alloced - i);
5577 
5578 		if (ring->wr == 0) {
5579 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5580 				ring->current_phase = ring->current_phase ?
5581 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5582 		}
5583 
5584 		alloced = i;
5585 	}
5586 
5587 	/* update ring's WR index and ring doorbell to dongle */
5588 	if (alloced > 0) {
5589 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5590 	}
5591 
5592 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5593 
5594 cleanup:
5595 	for (i = alloced; i < count; i++) {
5596 		p = pktbuf[i];
5597 		pa = pktbuf_pa[i];
5598 
5599 		DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
5600 		PKTFREE(dhd->osh, p, FALSE);
5601 	}
5602 
5603 	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
5604 #ifdef PCIE_INB_DW
5605 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5606 #endif
5607 
5608 	return alloced;
5609 } /* dhd_prot_rxbufpost */
5610 
5611 #if !defined(BCM_ROUTER_DHD)
5612 static int
dhd_prot_infobufpost(dhd_pub_t * dhd,msgbuf_ring_t * ring)5613 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5614 {
5615 	unsigned long flags;
5616 	uint32 pktid;
5617 	dhd_prot_t *prot = dhd->prot;
5618 	uint16 alloced = 0;
5619 	uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
5620 	uint32 pktlen;
5621 	info_buf_post_msg_t *infobuf_post;
5622 	uint8 *infobuf_post_tmp;
5623 	void *p;
5624 	void* msg_start;
5625 	uint8 i = 0;
5626 	dmaaddr_t pa;
5627 	int16 count = 0;
5628 
5629 	if (ring == NULL)
5630 		return 0;
5631 
5632 	if (ring->inited != TRUE)
5633 		return 0;
5634 	if (ring == dhd->prot->h2dring_info_subn) {
5635 		if (prot->max_infobufpost == 0)
5636 			return 0;
5637 
5638 		count = prot->max_infobufpost - prot->infobufpost;
5639 	}
5640 #ifdef BTLOG
5641 	else if (ring == dhd->prot->h2dring_btlog_subn) {
5642 		if (prot->max_btlogbufpost == 0)
5643 			return 0;
5644 
5645 		pktsz = DHD_BTLOG_RX_BUFPOST_PKTSZ;
5646 		count = prot->max_btlogbufpost - prot->btlogbufpost;
5647 	}
5648 #endif	/* BTLOG */
5649 	else {
5650 		DHD_ERROR(("Unknown ring\n"));
5651 		return 0;
5652 	}
5653 
5654 	if (count <= 0) {
5655 		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
5656 			__FUNCTION__));
5657 		return 0;
5658 	}
5659 
5660 #ifdef PCIE_INB_DW
5661 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5662 		return BCME_ERROR;
5663 #endif /* PCIE_INB_DW */
5664 
5665 	/* grab the ring lock to allocate pktid and post on ring */
5666 	DHD_RING_LOCK(ring->ring_lock, flags);
5667 
5668 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5669 	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
5670 
5671 	if (msg_start == NULL) {
5672 		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5673 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5674 #ifdef PCIE_INB_DW
5675 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5676 #endif
5677 		return -1;
5678 	}
5679 
5680 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5681 	ASSERT(alloced > 0);
5682 
5683 	infobuf_post_tmp = (uint8*) msg_start;
5684 
5685 	/* loop through each allocated message in the host ring */
5686 	for (i = 0; i < alloced; i++) {
5687 		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
5688 		/* Create a rx buffer */
5689 #ifdef DHD_USE_STATIC_CTRLBUF
5690 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5691 #else
5692 		p = PKTGET(dhd->osh, pktsz, FALSE);
5693 #endif /* DHD_USE_STATIC_CTRLBUF */
5694 		if (p == NULL) {
5695 			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
5696 			dhd->rx_pktgetfail++;
5697 			break;
5698 		}
5699 		pktlen = PKTLEN(dhd->osh, p);
5700 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5701 		if (PHYSADDRISZERO(pa)) {
5702 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5703 #ifdef DHD_USE_STATIC_CTRLBUF
5704 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5705 #else
5706 			PKTFREE(dhd->osh, p, FALSE);
5707 #endif /* DHD_USE_STATIC_CTRLBUF */
5708 			DHD_ERROR(("Invalid phyaddr 0\n"));
5709 			ASSERT(0);
5710 			break;
5711 		}
5712 #ifdef DMAMAP_STATS
5713 		dhd->dma_stats.info_rx++;
5714 		dhd->dma_stats.info_rx_sz += pktlen;
5715 #endif /* DMAMAP_STATS */
5716 		pktlen = PKTLEN(dhd->osh, p);
5717 
5718 		/* Common msg header */
5719 		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
5720 		infobuf_post->cmn_hdr.if_id = 0;
5721 		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5722 		infobuf_post->cmn_hdr.flags = ring->current_phase;
5723 		ring->seqnum++;
5724 
5725 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
5726 			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
5727 
5728 #if defined(DHD_PCIE_PKTID)
5729 		if (pktid == DHD_PKTID_INVALID) {
5730 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
5731 
5732 #ifdef DHD_USE_STATIC_CTRLBUF
5733 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5734 #else
5735 			PKTFREE(dhd->osh, p, FALSE);
5736 #endif /* DHD_USE_STATIC_CTRLBUF */
5737 			DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5738 			break;
5739 		}
5740 #endif /* DHD_PCIE_PKTID */
5741 
5742 		infobuf_post->host_buf_len = htol16((uint16)pktlen);
5743 		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5744 		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5745 
5746 #ifdef DHD_PKTID_AUDIT_RING
5747 		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
5748 #endif /* DHD_PKTID_AUDIT_RING */
5749 
5750 		DHD_MSGBUF_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
5751 			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
5752 			infobuf_post->host_buf_addr.high_addr));
5753 
5754 		infobuf_post->cmn_hdr.request_id = htol32(pktid);
5755 		/* Move rxbuf_post_tmp to next item */
5756 		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
5757 #ifdef DHD_LBUF_AUDIT
5758 		PKTAUDIT(dhd->osh, p);
5759 #endif
5760 	}
5761 
5762 	if (i < alloced) {
5763 		if (ring->wr < (alloced - i))
5764 			ring->wr = ring->max_items - (alloced - i);
5765 		else
5766 			ring->wr -= (alloced - i);
5767 
5768 		alloced = i;
5769 		if (alloced && ring->wr == 0) {
5770 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5771 			ring->current_phase = ring->current_phase ?
5772 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5773 		}
5774 	}
5775 
5776 	/* Update the write pointer in TCM & ring bell */
5777 	if (alloced > 0) {
5778 		if (ring == dhd->prot->h2dring_info_subn) {
5779 			prot->infobufpost += alloced;
5780 		}
5781 #ifdef BTLOG
5782 		if (ring == dhd->prot->h2dring_btlog_subn) {
5783 			prot->btlogbufpost += alloced;
5784 		}
5785 #endif	/* BTLOG */
5786 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5787 	}
5788 
5789 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5790 
5791 #ifdef PCIE_INB_DW
5792 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5793 #endif
5794 	return alloced;
5795 } /* dhd_prot_infobufpost */
5796 #endif /* !BCM_ROUTER_DHD */
5797 
5798 #ifdef IOCTLRESP_USE_CONSTMEM
5799 static int
alloc_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)5800 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5801 {
5802 	int err;
5803 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5804 
5805 	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
5806 		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
5807 		ASSERT(0);
5808 		return BCME_NOMEM;
5809 	}
5810 
5811 	return BCME_OK;
5812 }
5813 
5814 static void
free_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)5815 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5816 {
5817 	/* retbuf (declared on stack) not fully populated ...  */
5818 	if (retbuf->va) {
5819 		uint32 dma_pad;
5820 		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
5821 		retbuf->len = IOCT_RETBUF_SIZE;
5822 		retbuf->_alloced = retbuf->len + dma_pad;
5823 	}
5824 
5825 	dhd_dma_buf_free(dhd, retbuf);
5826 	return;
5827 }
5828 #endif /* IOCTLRESP_USE_CONSTMEM */
5829 
5830 static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t * dhd,uint8 msg_type)5831 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
5832 {
5833 	void *p;
5834 	uint16 pktsz;
5835 	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
5836 	dmaaddr_t pa;
5837 	uint32 pktlen;
5838 	dhd_prot_t *prot = dhd->prot;
5839 	uint16 alloced = 0;
5840 	unsigned long flags;
5841 	dhd_dma_buf_t retbuf;
5842 	void *dmah = NULL;
5843 	uint32 pktid;
5844 	void *map_handle;
5845 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5846 	bool non_ioctl_resp_buf = 0;
5847 	dhd_pkttype_t buf_type;
5848 
5849 	if (dhd->busstate == DHD_BUS_DOWN) {
5850 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5851 		return -1;
5852 	}
5853 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
5854 
5855 	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
5856 		buf_type = PKTTYPE_IOCTL_RX;
5857 	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
5858 		buf_type = PKTTYPE_EVENT_RX;
5859 	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
5860 		buf_type = PKTTYPE_TSBUF_RX;
5861 	else {
5862 		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
5863 		/* XXX: may be add an assert */
5864 		return -1;
5865 	}
5866 #ifdef PCIE_INB_DW
5867 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
5868 		 return BCME_ERROR;
5869 	}
5870 #endif /* PCIE_INB_DW */
5871 
5872 	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
5873 		non_ioctl_resp_buf = TRUE;
5874 	else
5875 		non_ioctl_resp_buf = FALSE;
5876 
5877 	if (non_ioctl_resp_buf) {
5878 		/* Allocate packet for not ioctl resp buffer post */
5879 		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5880 	} else {
5881 		/* Allocate packet for ctrl/ioctl buffer post */
5882 		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
5883 	}
5884 
5885 #ifdef IOCTLRESP_USE_CONSTMEM
5886 	if (!non_ioctl_resp_buf) {
5887 		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
5888 			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
5889 			goto fail;
5890 		}
5891 		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
5892 		p = retbuf.va;
5893 		pktlen = retbuf.len;
5894 		pa = retbuf.pa;
5895 		dmah = retbuf.dmah;
5896 	} else
5897 #endif /* IOCTLRESP_USE_CONSTMEM */
5898 	{
5899 #ifdef DHD_USE_STATIC_CTRLBUF
5900 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5901 #else
5902 		p = PKTGET(dhd->osh, pktsz, FALSE);
5903 #endif /* DHD_USE_STATIC_CTRLBUF */
5904 		if (p == NULL) {
5905 			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
5906 				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
5907 				"EVENT" : "IOCTL RESP"));
5908 			dhd->rx_pktgetfail++;
5909 			goto fail;
5910 		}
5911 
5912 		pktlen = PKTLEN(dhd->osh, p);
5913 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5914 
5915 		if (PHYSADDRISZERO(pa)) {
5916 			DHD_ERROR(("Invalid physaddr 0\n"));
5917 			ASSERT(0);
5918 			goto free_pkt_return;
5919 		}
5920 
5921 #ifdef DMAMAP_STATS
5922 		switch (buf_type) {
5923 #ifndef IOCTLRESP_USE_CONSTMEM
5924 			case PKTTYPE_IOCTL_RX:
5925 				dhd->dma_stats.ioctl_rx++;
5926 				dhd->dma_stats.ioctl_rx_sz += pktlen;
5927 				break;
5928 #endif /* !IOCTLRESP_USE_CONSTMEM */
5929 			case PKTTYPE_EVENT_RX:
5930 				dhd->dma_stats.event_rx++;
5931 				dhd->dma_stats.event_rx_sz += pktlen;
5932 				break;
5933 			case PKTTYPE_TSBUF_RX:
5934 				dhd->dma_stats.tsbuf_rx++;
5935 				dhd->dma_stats.tsbuf_rx_sz += pktlen;
5936 				break;
5937 			default:
5938 				break;
5939 		}
5940 #endif /* DMAMAP_STATS */
5941 
5942 	}
5943 
5944 	/* grab the ring lock to allocate pktid and post on ring */
5945 	DHD_RING_LOCK(ring->ring_lock, flags);
5946 
5947 	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5948 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5949 
5950 	if (rxbuf_post == NULL) {
5951 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5952 		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5953 			__FUNCTION__, __LINE__));
5954 
5955 #ifdef IOCTLRESP_USE_CONSTMEM
5956 		if (non_ioctl_resp_buf)
5957 #endif /* IOCTLRESP_USE_CONSTMEM */
5958 		{
5959 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5960 		}
5961 		goto free_pkt_return;
5962 	}
5963 
5964 	/* CMN msg header */
5965 	rxbuf_post->cmn_hdr.msg_type = msg_type;
5966 
5967 #ifdef IOCTLRESP_USE_CONSTMEM
5968 	if (!non_ioctl_resp_buf) {
5969 		map_handle = dhd->prot->pktid_map_handle_ioctl;
5970 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5971 			ring->dma_buf.secdma, buf_type);
5972 	} else
5973 #endif /* IOCTLRESP_USE_CONSTMEM */
5974 	{
5975 		map_handle = dhd->prot->pktid_ctrl_map;
5976 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5977 			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5978 			buf_type);
5979 	}
5980 
5981 	if (pktid == DHD_PKTID_INVALID) {
5982 		if (ring->wr == 0) {
5983 			ring->wr = ring->max_items - 1;
5984 		} else {
5985 			ring->wr--;
5986 			if (ring->wr == 0) {
5987 				ring->current_phase = ring->current_phase ? 0 :
5988 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5989 			}
5990 		}
5991 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5992 		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5993 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5994 		goto free_pkt_return;
5995 	}
5996 
5997 #ifdef DHD_PKTID_AUDIT_RING
5998 	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
5999 #endif /* DHD_PKTID_AUDIT_RING */
6000 
6001 	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
6002 	rxbuf_post->cmn_hdr.if_id = 0;
6003 	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
6004 	ring->seqnum++;
6005 	rxbuf_post->cmn_hdr.flags = ring->current_phase;
6006 
6007 #if defined(DHD_PCIE_PKTID)
6008 	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
6009 		if (ring->wr == 0) {
6010 			ring->wr = ring->max_items - 1;
6011 		} else {
6012 			if (ring->wr == 0) {
6013 				ring->current_phase = ring->current_phase ? 0 :
6014 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6015 			}
6016 		}
6017 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6018 #ifdef IOCTLRESP_USE_CONSTMEM
6019 		if (non_ioctl_resp_buf)
6020 #endif /* IOCTLRESP_USE_CONSTMEM */
6021 		{
6022 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
6023 		}
6024 		goto free_pkt_return;
6025 	}
6026 #endif /* DHD_PCIE_PKTID */
6027 
6028 #ifndef IOCTLRESP_USE_CONSTMEM
6029 	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
6030 #else
6031 	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
6032 #endif /* IOCTLRESP_USE_CONSTMEM */
6033 	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6034 	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
6035 #ifdef DHD_LBUF_AUDIT
6036 	if (non_ioctl_resp_buf)
6037 		PKTAUDIT(dhd->osh, p);
6038 #endif
6039 	/* update ring's WR index and ring doorbell to dongle */
6040 	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
6041 
6042 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6043 
6044 #ifdef PCIE_INB_DW
6045 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6046 #endif
6047 	return 1;
6048 
6049 free_pkt_return:
6050 	if (!non_ioctl_resp_buf) {
6051 #ifdef IOCTLRESP_USE_CONSTMEM
6052 		free_ioctl_return_buffer(dhd, &retbuf);
6053 #else
6054 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6055 #endif /* IOCTLRESP_USE_CONSTMEM */
6056 	} else {
6057 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6058 	}
6059 
6060 fail:
6061 #ifdef PCIE_INB_DW
6062 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6063 #endif
6064 	return -1;
6065 } /* dhd_prot_rxbufpost_ctrl */
6066 
6067 static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t * dhd,uint8 msg_type,uint32 max_to_post)6068 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
6069 {
6070 	uint32 i = 0;
6071 	int32 ret_val;
6072 
6073 	DHD_MSGBUF_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
6074 
6075 	if (dhd->busstate == DHD_BUS_DOWN) {
6076 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
6077 		return 0;
6078 	}
6079 
6080 	while (i < max_to_post) {
6081 		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
6082 		if (ret_val < 0)
6083 			break;
6084 		i++;
6085 	}
6086 	DHD_MSGBUF_INFO(("posted %d buffers of type %d\n", i, msg_type));
6087 	return (uint16)i;
6088 }
6089 
6090 static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t * dhd)6091 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
6092 {
6093 	dhd_prot_t *prot = dhd->prot;
6094 	int max_to_post;
6095 
6096 	DHD_MSGBUF_INFO(("ioctl resp buf post\n"));
6097 	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
6098 	if (max_to_post <= 0) {
6099 		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
6100 			__FUNCTION__));
6101 		return;
6102 	}
6103 	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6104 		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
6105 }
6106 
6107 static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t * dhd)6108 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
6109 {
6110 	dhd_prot_t *prot = dhd->prot;
6111 	int max_to_post;
6112 
6113 	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
6114 	if (max_to_post <= 0) {
6115 		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
6116 			__FUNCTION__));
6117 		return;
6118 	}
6119 	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6120 		MSG_TYPE_EVENT_BUF_POST, max_to_post);
6121 }
6122 
6123 static int
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t * dhd)6124 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
6125 {
6126 #ifdef DHD_TIMESYNC
6127 	dhd_prot_t *prot = dhd->prot;
6128 	int max_to_post;
6129 
6130 	if (prot->active_ipc_version < 7) {
6131 		DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
6132 			prot->active_ipc_version));
6133 		return 0;
6134 	}
6135 
6136 	max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
6137 	if (max_to_post <= 0) {
6138 		DHD_INFO(("%s: Cannot post more than max ts buffers\n",
6139 			__FUNCTION__));
6140 		return 0;
6141 	}
6142 
6143 	prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6144 		MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
6145 #endif /* DHD_TIMESYNC */
6146 	return 0;
6147 }
6148 
6149 bool
BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)6150 BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)(dhd_pub_t *dhd, uint bound)
6151 {
6152 	dhd_prot_t *prot = dhd->prot;
6153 	bool more = TRUE;
6154 	uint n = 0;
6155 	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
6156 	unsigned long flags;
6157 
6158 	if (ring == NULL)
6159 		return FALSE;
6160 	if (ring->inited != TRUE)
6161 		return FALSE;
6162 
6163 	/* Process all the messages - DTOH direction */
6164 	while (!dhd_is_device_removed(dhd)) {
6165 		uint8 *msg_addr;
6166 		uint32 msg_len;
6167 
6168 		if (dhd->hang_was_sent) {
6169 			more = FALSE;
6170 			break;
6171 		}
6172 
6173 		if (dhd->smmu_fault_occurred) {
6174 			more = FALSE;
6175 			break;
6176 		}
6177 
6178 		DHD_RING_LOCK(ring->ring_lock, flags);
6179 		/* Get the message from ring */
6180 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6181 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6182 		if (msg_addr == NULL) {
6183 			more = FALSE;
6184 			break;
6185 		}
6186 
6187 		/* Prefetch data to populate the cache */
6188 		OSL_PREFETCH(msg_addr);
6189 
6190 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6191 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6192 				__FUNCTION__, msg_len));
6193 		}
6194 
6195 		/* Update read pointer */
6196 		dhd_prot_upd_read_idx(dhd, ring);
6197 
6198 		/* After batch processing, check RX bound */
6199 		n += msg_len / ring->item_len;
6200 		if (n >= bound) {
6201 			break;
6202 		}
6203 	}
6204 
6205 	return more;
6206 }
6207 
6208 #ifdef BTLOG
6209 bool
BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)6210 BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)(dhd_pub_t *dhd, uint bound)
6211 {
6212 	dhd_prot_t *prot = dhd->prot;
6213 	bool more = TRUE;
6214 	uint n = 0;
6215 	msgbuf_ring_t *ring = prot->d2hring_btlog_cpln;
6216 
6217 	if (ring == NULL)
6218 		return FALSE;
6219 	if (ring->inited != TRUE)
6220 		return FALSE;
6221 
6222 	/* Process all the messages - DTOH direction */
6223 	while (!dhd_is_device_removed(dhd)) {
6224 		uint8 *msg_addr;
6225 		uint32 msg_len;
6226 
6227 		if (dhd_query_bus_erros(dhd)) {
6228 			more = FALSE;
6229 			break;
6230 		}
6231 
6232 		if (dhd->hang_was_sent) {
6233 			more = FALSE;
6234 			break;
6235 		}
6236 
6237 		if (dhd->smmu_fault_occurred) {
6238 			more = FALSE;
6239 			break;
6240 		}
6241 
6242 		/* Get the message from ring */
6243 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6244 		if (msg_addr == NULL) {
6245 			more = FALSE;
6246 			break;
6247 		}
6248 
6249 		/* Prefetch data to populate the cache */
6250 		OSL_PREFETCH(msg_addr);
6251 
6252 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6253 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6254 				__FUNCTION__, msg_len));
6255 		}
6256 
6257 		/* Update read pointer */
6258 		dhd_prot_upd_read_idx(dhd, ring);
6259 
6260 		/* After batch processing, check RX bound */
6261 		n += msg_len / ring->item_len;
6262 		if (n >= bound) {
6263 			break;
6264 		}
6265 	}
6266 
6267 	return more;
6268 }
6269 #endif	/* BTLOG */
6270 
6271 #ifdef EWP_EDL
6272 bool
dhd_prot_process_msgbuf_edl(dhd_pub_t * dhd)6273 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
6274 {
6275 	dhd_prot_t *prot = dhd->prot;
6276 	msgbuf_ring_t *ring = prot->d2hring_edl;
6277 	unsigned long flags = 0;
6278 	uint32 items = 0;
6279 	uint16 rd = 0;
6280 	uint16 depth = 0;
6281 
6282 	if (ring == NULL)
6283 		return FALSE;
6284 	if (ring->inited != TRUE)
6285 		return FALSE;
6286 	if (ring->item_len == 0) {
6287 		DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
6288 			__FUNCTION__, ring->idx, ring->item_len));
6289 		return FALSE;
6290 	}
6291 
6292 	if (dhd_query_bus_erros(dhd)) {
6293 		return FALSE;
6294 	}
6295 
6296 	if (dhd->hang_was_sent) {
6297 		return FALSE;
6298 	}
6299 
6300 	/* in this DPC context just check if wr index has moved
6301 	 * and schedule deferred context to actually process the
6302 	 * work items.
6303 	*/
6304 
6305 	/* update the write index */
6306 	DHD_RING_LOCK(ring->ring_lock, flags);
6307 	if (dhd->dma_d2h_ring_upd_support) {
6308 		/* DMAing write/read indices supported */
6309 		ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
6310 	} else {
6311 		dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
6312 	}
6313 	rd = ring->rd;
6314 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6315 
6316 	depth = ring->max_items;
6317 	/* check for avail space, in number of ring items */
6318 	items = READ_AVAIL_SPACE(ring->wr, rd, depth);
6319 	if (items == 0) {
6320 		/* no work items in edl ring */
6321 		return FALSE;
6322 	}
6323 	if (items > ring->max_items) {
6324 		DHD_ERROR(("\r\n======================= \r\n"));
6325 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
6326 			__FUNCTION__, ring, ring->name, ring->max_items, items));
6327 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n",
6328 			ring->wr, ring->rd, depth));
6329 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
6330 			dhd->busstate, dhd->bus->wait_for_d3_ack));
6331 		DHD_ERROR(("\r\n======================= \r\n"));
6332 #ifdef SUPPORT_LINKDOWN_RECOVERY
6333 		if (ring->wr >= ring->max_items) {
6334 			dhd->bus->read_shm_fail = TRUE;
6335 		}
6336 #else
6337 #ifdef DHD_FW_COREDUMP
6338 		if (dhd->memdump_enabled) {
6339 			/* collect core dump */
6340 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
6341 			dhd_bus_mem_dump(dhd);
6342 
6343 		}
6344 #endif /* DHD_FW_COREDUMP */
6345 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6346 		dhd_schedule_reset(dhd);
6347 
6348 		return FALSE;
6349 	}
6350 
6351 	if (items > D2HRING_EDL_WATERMARK) {
6352 		DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
6353 			" rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
6354 			ring->rd, ring->wr, depth));
6355 	}
6356 
6357 	dhd_schedule_logtrace(dhd->info);
6358 
6359 	return FALSE;
6360 }
6361 
6362 /*
6363  * This is called either from work queue context of 'event_log_dispatcher_work' or
6364  * from the kthread context of dhd_logtrace_thread
6365  */
6366 int
dhd_prot_process_edl_complete(dhd_pub_t * dhd,void * evt_decode_data)6367 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
6368 {
6369 	dhd_prot_t *prot = NULL;
6370 	msgbuf_ring_t *ring = NULL;
6371 	int err = 0;
6372 	unsigned long flags = 0;
6373 	cmn_msg_hdr_t *msg = NULL;
6374 	uint8 *msg_addr = NULL;
6375 	uint32 max_items_to_process = 0, n = 0;
6376 	uint32 num_items = 0, new_items = 0;
6377 	uint16 depth = 0;
6378 	volatile uint16 wr = 0;
6379 
6380 	if (!dhd || !dhd->prot)
6381 		return 0;
6382 
6383 	prot = dhd->prot;
6384 	ring = prot->d2hring_edl;
6385 
6386 	if (!ring || !evt_decode_data) {
6387 		return 0;
6388 	}
6389 
6390 	if (dhd->hang_was_sent) {
6391 		return FALSE;
6392 	}
6393 
6394 	DHD_RING_LOCK(ring->ring_lock, flags);
6395 	ring->curr_rd = ring->rd;
6396 	wr = ring->wr;
6397 	depth = ring->max_items;
6398 	/* check for avail space, in number of ring items
6399 	 * Note, that this will only give the # of items
6400 	 * from rd to wr if wr>=rd, or from rd to ring end
6401 	 * if wr < rd. So in the latter case strictly speaking
6402 	 * not all the items are read. But this is OK, because
6403 	 * these will be processed in the next doorbell as rd
6404 	 * would have wrapped around. Processing in the next
6405 	 * doorbell is acceptable since EDL only contains debug data
6406 	 */
6407 	num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6408 
6409 	if (num_items == 0) {
6410 		/* no work items in edl ring */
6411 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6412 		return 0;
6413 	}
6414 
6415 	DHD_INFO(("%s: EDL work items [%u] available \n",
6416 			__FUNCTION__, num_items));
6417 
6418 	/* if space is available, calculate address to be read */
6419 	msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
6420 
6421 	max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
6422 
6423 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6424 
6425 	/* Prefetch data to populate the cache */
6426 	OSL_PREFETCH(msg_addr);
6427 
6428 	n = max_items_to_process;
6429 	while (n > 0) {
6430 		msg = (cmn_msg_hdr_t *)msg_addr;
6431 		/* wait for DMA of work item to complete */
6432 		if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
6433 			DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n",
6434 				__FUNCTION__, err));
6435 		}
6436 		/*
6437 		 * Update the curr_rd to the current index in the ring, from where
6438 		 * the work item is fetched. This way if the fetched work item
6439 		 * fails in LIVELOCK, we can print the exact read index in the ring
6440 		 * that shows up the corrupted work item.
6441 		 */
6442 		if ((ring->curr_rd + 1) >= ring->max_items) {
6443 			ring->curr_rd = 0;
6444 		} else {
6445 			ring->curr_rd += 1;
6446 		}
6447 
6448 		if (err != BCME_OK) {
6449 			return 0;
6450 		}
6451 
6452 		/* process the edl work item, i.e, the event log */
6453 		err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
6454 
6455 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
6456 		OSL_SLEEP(0);
6457 
6458 		/* Prefetch data to populate the cache */
6459 		OSL_PREFETCH(msg_addr + ring->item_len);
6460 
6461 		msg_addr += ring->item_len;
6462 		--n;
6463 	}
6464 
6465 	DHD_RING_LOCK(ring->ring_lock, flags);
6466 	/* update host ring read pointer */
6467 	if ((ring->rd + max_items_to_process) >= ring->max_items)
6468 		ring->rd = 0;
6469 	else
6470 		ring->rd += max_items_to_process;
6471 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6472 
6473 	/* Now after processing max_items_to_process update dongle rd index.
6474 	 * The TCM rd index is updated only if bus is not
6475 	 * in D3. Else, the rd index is updated from resume
6476 	 * context in - 'dhdpcie_bus_suspend'
6477 	 */
6478 	DHD_GENERAL_LOCK(dhd, flags);
6479 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
6480 		DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
6481 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
6482 		DHD_GENERAL_UNLOCK(dhd, flags);
6483 	} else {
6484 		DHD_GENERAL_UNLOCK(dhd, flags);
6485 		DHD_EDL_RING_TCM_RD_UPDATE(dhd);
6486 	}
6487 
6488 	/* if num_items > bound, then anyway we will reschedule and
6489 	 * this function runs again, so that if in between the DPC has
6490 	 * updated the wr index, then the updated wr is read. But if
6491 	 * num_items <= bound, and if DPC executes and updates the wr index
6492 	 * when the above while loop is running, then the updated 'wr' index
6493 	 * needs to be re-read from here, If we don't do so, then till
6494 	 * the next time this function is scheduled
6495 	 * the event logs will not be processed.
6496 	*/
6497 	if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
6498 		/* read the updated wr index if reqd. and update num_items */
6499 		DHD_RING_LOCK(ring->ring_lock, flags);
6500 		if (wr != (volatile uint16)ring->wr) {
6501 			wr = (volatile uint16)ring->wr;
6502 			new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6503 			DHD_INFO(("%s: new items [%u] avail in edl\n",
6504 				__FUNCTION__, new_items));
6505 			num_items += new_items;
6506 		}
6507 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6508 	}
6509 
6510 	/* if # of items processed is less than num_items, need to re-schedule
6511 	* the deferred ctx
6512 	*/
6513 	if (max_items_to_process < num_items) {
6514 		DHD_INFO(("%s: EDL bound hit / new items found, "
6515 				"items processed=%u; remaining=%u, "
6516 				"resched deferred ctx...\n",
6517 				__FUNCTION__, max_items_to_process,
6518 				num_items - max_items_to_process));
6519 		return (num_items - max_items_to_process);
6520 	}
6521 
6522 	return 0;
6523 
6524 }
6525 
6526 void
dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t * dhd)6527 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
6528 {
6529 	dhd_prot_t *prot = NULL;
6530 	unsigned long flags = 0;
6531 	msgbuf_ring_t *ring = NULL;
6532 
6533 	if (!dhd)
6534 		return;
6535 
6536 	prot = dhd->prot;
6537 	if (!prot || !prot->d2hring_edl)
6538 		return;
6539 
6540 	ring = prot->d2hring_edl;
6541 	DHD_RING_LOCK(ring->ring_lock, flags);
6542 	dhd_prot_upd_read_idx(dhd, ring);
6543 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6544 	if (dhd->dma_h2d_ring_upd_support &&
6545 		!IDMA_ACTIVE(dhd)) {
6546 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
6547 	}
6548 }
6549 #endif /* EWP_EDL */
6550 
6551 static void
dhd_prot_rx_frame(dhd_pub_t * dhd,void * pkt,int ifidx,uint pkt_count)6552 dhd_prot_rx_frame(dhd_pub_t *dhd, void *pkt, int ifidx, uint pkt_count)
6553 {
6554 
6555 #ifdef DHD_LB_RXP
6556 	if (dhd_read_lb_rxp(dhd) == 1) {
6557 		dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
6558 		return;
6559 	}
6560 #endif /* DHD_LB_RXP */
6561 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count);
6562 }
6563 
6564 #ifdef DHD_LB_RXP
dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t * dhd)6565 static int dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t *dhd)
6566 {
6567 	if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) {
6568 		/* when either of stop and start thresholds are zero flow ctrl is not enabled */
6569 		return FALSE;
6570 	}
6571 
6572 	if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) &&
6573 			(!atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6574 		atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE);
6575 #ifdef DHD_LB_STATS
6576 		dhd->lb_rxp_stop_thr_hitcnt++;
6577 #endif /* DHD_LB_STATS */
6578 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_stop_thr %d\n",
6579 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr));
6580 	} else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) &&
6581 			(atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6582 		atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
6583 #ifdef DHD_LB_STATS
6584 		dhd->lb_rxp_strt_thr_hitcnt++;
6585 #endif /* DHD_LB_STATS */
6586 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_strt_thr %d\n",
6587 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr));
6588 	}
6589 
6590 	return atomic_read(&dhd->lb_rxp_flow_ctrl);
6591 }
6592 #endif /* DHD_LB_RXP */
6593 
6594 /** called when DHD needs to check for 'receive complete' messages from the dongle */
6595 bool
BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)6596 BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6597 {
6598 	bool more = FALSE;
6599 	uint n = 0;
6600 	dhd_prot_t *prot = dhd->prot;
6601 	msgbuf_ring_t *ring;
6602 	uint16 item_len;
6603 	host_rxbuf_cmpl_t *msg = NULL;
6604 	uint8 *msg_addr;
6605 	uint32 msg_len;
6606 	uint16 pkt_cnt, pkt_cnt_newidx;
6607 	unsigned long flags;
6608 	dmaaddr_t pa;
6609 	uint32 len;
6610 	void *dmah;
6611 	void *secdma;
6612 	int ifidx = 0, if_newidx = 0;
6613 	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
6614 	uint32 pktid;
6615 	int i;
6616 	uint8 sync;
6617 
6618 #ifdef DHD_LB_RXP
6619 	/* must be the first check in this function */
6620 	if (dhd_prot_lb_rxp_flow_ctrl(dhd)) {
6621 		/* DHD is holding a lot of RX packets.
6622 		 * Just give chance for netwrok stack to consumes RX packets.
6623 		 */
6624 		return FALSE;
6625 	}
6626 #endif /* DHD_LB_RXP */
6627 #ifdef DHD_PCIE_RUNTIMEPM
6628 	/* Set rx_pending_due_to_rpm if device is not in resume state */
6629 	if (dhdpcie_runtime_bus_wake(dhd, FALSE, dhd_prot_process_msgbuf_rxcpl)) {
6630 		dhd->rx_pending_due_to_rpm = TRUE;
6631 		return more;
6632 	}
6633 	dhd->rx_pending_due_to_rpm = FALSE;
6634 #endif /* DHD_PCIE_RUNTIMEPM */
6635 
6636 #ifdef DHD_HP2P
6637 	if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
6638 		ring = prot->d2hring_hp2p_rxcpl;
6639 	else
6640 #endif /* DHD_HP2P */
6641 		ring = &prot->d2hring_rx_cpln;
6642 	item_len = ring->item_len;
6643 	while (1) {
6644 		if (dhd_is_device_removed(dhd))
6645 			break;
6646 
6647 		if (dhd_query_bus_erros(dhd))
6648 			break;
6649 
6650 		if (dhd->hang_was_sent)
6651 			break;
6652 
6653 		if (dhd->smmu_fault_occurred) {
6654 			break;
6655 		}
6656 
6657 		pkt_cnt = 0;
6658 		pktqhead = pkt_newidx = NULL;
6659 		pkt_cnt_newidx = 0;
6660 
6661 		DHD_RING_LOCK(ring->ring_lock, flags);
6662 
6663 		/* Get the address of the next message to be read from ring */
6664 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6665 		if (msg_addr == NULL) {
6666 			DHD_RING_UNLOCK(ring->ring_lock, flags);
6667 			break;
6668 		}
6669 
6670 		while (msg_len > 0) {
6671 			msg = (host_rxbuf_cmpl_t *)msg_addr;
6672 
6673 			/* Wait until DMA completes, then fetch msg_type */
6674 			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
6675 			/*
6676 			 * Update the curr_rd to the current index in the ring, from where
6677 			 * the work item is fetched. This way if the fetched work item
6678 			 * fails in LIVELOCK, we can print the exact read index in the ring
6679 			 * that shows up the corrupted work item.
6680 			 */
6681 			if ((ring->curr_rd + 1) >= ring->max_items) {
6682 				ring->curr_rd = 0;
6683 			} else {
6684 				ring->curr_rd += 1;
6685 			}
6686 
6687 			if (!sync) {
6688 				msg_len -= item_len;
6689 				msg_addr += item_len;
6690 				continue;
6691 			}
6692 
6693 			pktid = ltoh32(msg->cmn_hdr.request_id);
6694 
6695 #ifdef DHD_PKTID_AUDIT_RING
6696 			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
6697 				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
6698 #endif /* DHD_PKTID_AUDIT_RING */
6699 
6700 			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
6701 			        len, dmah, secdma, PKTTYPE_DATA_RX);
6702 			/* Sanity check of shinfo nrfrags */
6703 			if (!pkt || (dhd_check_shinfo_nrfrags(dhd, pkt, &pa, pktid) != BCME_OK)) {
6704 				msg_len -= item_len;
6705 				msg_addr += item_len;
6706 				continue;
6707 			}
6708 			dhd->prot->tot_rxcpl++;
6709 
6710 			DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6711 
6712 #ifdef DMAMAP_STATS
6713 			dhd->dma_stats.rxdata--;
6714 			dhd->dma_stats.rxdata_sz -= len;
6715 #endif /* DMAMAP_STATS */
6716 #ifdef DHD_HMAPTEST
6717 			if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) &&
6718 				(pktid == dhd->prot->hmaptest_rx_pktid)) {
6719 
6720 				uchar *ptr;
6721 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6722 				DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa,
6723 					(uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah);
6724 				DHD_ERROR(("hmaptest: d11write rxcpl rcvd sc rxbuf pktid=0x%08x\n",
6725 					pktid));
6726 				DHD_ERROR(("hmaptest: d11write rxcpl r0_st=0x%08x r1_stat=0x%08x\n",
6727 					msg->rx_status_0, msg->rx_status_1));
6728 				DHD_ERROR(("hmaptest: d11write rxcpl rxbuf va=0x%p pa=0x%08x\n",
6729 					dhd->prot->hmap_rx_buf_va,
6730 					(uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa)));
6731 				DHD_ERROR(("hmaptest: d11write rxcpl pktdata va=0x%p pa=0x%08x\n",
6732 					PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa)));
6733 				memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len);
6734 				dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
6735 				dhd->prot->hmap_rx_buf_va = NULL;
6736 				dhd->prot->hmap_rx_buf_len = 0;
6737 				PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0);
6738 				PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0);
6739 				prot->hmaptest.in_progress = FALSE;
6740 			}
6741 #endif /* DHD_HMAPTEST */
6742 			DHD_MSGBUF_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
6743 				"pktdata %p, metalen %d\n",
6744 				ltoh32(msg->cmn_hdr.request_id),
6745 				ltoh16(msg->data_offset),
6746 				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
6747 				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
6748 				ltoh16(msg->metadata_len)));
6749 
6750 			pkt_cnt++;
6751 			msg_len -= item_len;
6752 			msg_addr += item_len;
6753 
6754 #if !defined(BCM_ROUTER_DHD)
6755 #if DHD_DBG_SHOW_METADATA
6756 			if (prot->metadata_dbg && prot->rx_metadata_offset &&
6757 			        msg->metadata_len) {
6758 				uchar *ptr;
6759 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6760 				/* header followed by data */
6761 				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
6762 				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
6763 			}
6764 #endif /* DHD_DBG_SHOW_METADATA */
6765 #endif /* !BCM_ROUTER_DHD */
6766 
6767 			/* data_offset from buf start */
6768 			if (ltoh16(msg->data_offset)) {
6769 				/* data offset given from dongle after split rx */
6770 				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
6771 			}
6772 			else if (prot->rx_dataoffset) {
6773 				/* DMA RX offset updated through shared area */
6774 				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
6775 			}
6776 			/* Actual length of the packet */
6777 			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
6778 #ifdef DHD_PKTTS
6779 			if (dhd_get_pktts_enab(dhd) == TRUE) {
6780 				uint fwr1 = 0, fwr2 = 0;
6781 
6782 				/* firmware mark rx_pktts.tref with 0xFFFFFFFF for errors */
6783 				if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) {
6784 					fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref));
6785 					fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) +
6786 						ltoh16(msg->rx_pktts.d_t2));
6787 
6788 					/* check for overflow */
6789 					if (ntohl(fwr2) > ntohl(fwr1)) {
6790 						/* send rx timestamp to netlnik socket */
6791 						dhd_msgbuf_send_msg_rx_ts(dhd, pkt, fwr1, fwr2);
6792 					}
6793 				}
6794 			}
6795 #endif /* DHD_PKTTS */
6796 
6797 #if defined(WL_MONITOR)
6798 			if (dhd_monitor_enabled(dhd, ifidx)) {
6799 				if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
6800 					dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
6801 					continue;
6802 				} else {
6803 					DHD_ERROR(("Received non 802.11 packet, "
6804 						"when monitor mode is enabled\n"));
6805 				}
6806 			}
6807 #endif /* WL_MONITOR */
6808 
6809 			if (!pktqhead) {
6810 				pktqhead = prevpkt = pkt;
6811 				ifidx = msg->cmn_hdr.if_id;
6812 			} else {
6813 				if (ifidx != msg->cmn_hdr.if_id) {
6814 					pkt_newidx = pkt;
6815 					if_newidx = msg->cmn_hdr.if_id;
6816 					pkt_cnt--;
6817 					pkt_cnt_newidx = 1;
6818 					break;
6819 				} else {
6820 					PKTSETNEXT(dhd->osh, prevpkt, pkt);
6821 					prevpkt = pkt;
6822 				}
6823 			}
6824 
6825 #ifdef DHD_HP2P
6826 			if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
6827 #ifdef DHD_HP2P_DEBUG
6828 				bcm_print_bytes("Rxcpl", (uchar *)msg,  sizeof(host_rxbuf_cmpl_t));
6829 #endif /* DHD_HP2P_DEBUG */
6830 				dhd_update_hp2p_rxstats(dhd, msg);
6831 			}
6832 #endif /* DHD_HP2P */
6833 
6834 #ifdef DHD_TIMESYNC
6835 			if (dhd->prot->rx_ts_log_enabled) {
6836 				dhd_pkt_parse_t parse;
6837 				ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
6838 
6839 				memset(&parse, 0, sizeof(dhd_pkt_parse_t));
6840 				dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
6841 
6842 				if (parse.proto == IP_PROT_ICMP)
6843 					dhd_timesync_log_rx_timestamp(dhd->ts, ifidx,
6844 							ts->low, ts->high, &parse);
6845 			}
6846 #endif /* DHD_TIMESYNC */
6847 
6848 #ifdef DHD_LBUF_AUDIT
6849 			PKTAUDIT(dhd->osh, pkt);
6850 #endif
6851 		}
6852 
6853 		/* roll back read pointer for unprocessed message */
6854 		if (msg_len > 0) {
6855 			if (ring->rd < msg_len / item_len)
6856 				ring->rd = ring->max_items - msg_len / item_len;
6857 			else
6858 				ring->rd -= msg_len / item_len;
6859 		}
6860 
6861 		/* Update read pointer */
6862 		dhd_prot_upd_read_idx(dhd, ring);
6863 
6864 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6865 
6866 		pkt = pktqhead;
6867 		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
6868 			nextpkt = PKTNEXT(dhd->osh, pkt);
6869 			PKTSETNEXT(dhd->osh, pkt, NULL);
6870 #ifdef DHD_RX_CHAINING
6871 			dhd_rxchain_frame(dhd, pkt, ifidx);
6872 #else
6873 			dhd_prot_rx_frame(dhd, pkt, ifidx, 1);
6874 #endif /* DHD_LB_RXP */
6875 		}
6876 
6877 		if (pkt_newidx) {
6878 #ifdef DHD_RX_CHAINING
6879 			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
6880 #else
6881 			dhd_prot_rx_frame(dhd, pkt_newidx, if_newidx, 1);
6882 #endif /* DHD_LB_RXP */
6883 		}
6884 
6885 		pkt_cnt += pkt_cnt_newidx;
6886 
6887 		/* Post another set of rxbufs to the device */
6888 		dhd_prot_return_rxbuf(dhd, ring, 0, pkt_cnt);
6889 
6890 #ifdef DHD_RX_CHAINING
6891 		dhd_rxchain_commit(dhd);
6892 #endif
6893 
6894 		/* After batch processing, check RX bound */
6895 		n += pkt_cnt;
6896 		if (n >= bound) {
6897 			more = TRUE;
6898 			break;
6899 		}
6900 	}
6901 
6902 	/* Call lb_dispatch only if packets are queued */
6903 	if (n &&
6904 #ifdef WL_MONITOR
6905 	!(dhd_monitor_enabled(dhd, ifidx)) &&
6906 #endif /* WL_MONITOR */
6907 	TRUE) {
6908 		DHD_LB_DISPATCH_RX_PROCESS(dhd);
6909 	}
6910 
6911 	return more;
6912 
6913 }
6914 
6915 /**
6916  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
6917  */
6918 void
dhd_prot_update_txflowring(dhd_pub_t * dhd,uint16 flowid,void * msgring)6919 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
6920 {
6921 	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
6922 
6923 	if (ring == NULL) {
6924 		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
6925 		return;
6926 	}
6927 	/* Update read pointer */
6928 	if (dhd->dma_d2h_ring_upd_support) {
6929 		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6930 	}
6931 
6932 	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
6933 		ring->idx, flowid, ring->wr, ring->rd));
6934 
6935 	/* Need more logic here, but for now use it directly */
6936 	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
6937 }
6938 
6939 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
6940 bool
BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)6941 BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6942 {
6943 	bool more = TRUE;
6944 	uint n = 0;
6945 	msgbuf_ring_t *ring;
6946 	unsigned long flags;
6947 
6948 #ifdef DHD_HP2P
6949 	if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
6950 		ring = dhd->prot->d2hring_hp2p_txcpl;
6951 	else
6952 #endif /* DHD_HP2P */
6953 		ring = &dhd->prot->d2hring_tx_cpln;
6954 
6955 	/* Process all the messages - DTOH direction */
6956 	while (!dhd_is_device_removed(dhd)) {
6957 		uint8 *msg_addr;
6958 		uint32 msg_len;
6959 
6960 		if (dhd_query_bus_erros(dhd)) {
6961 			more = FALSE;
6962 			break;
6963 		}
6964 
6965 		if (dhd->hang_was_sent) {
6966 			more = FALSE;
6967 			break;
6968 		}
6969 
6970 		if (dhd->smmu_fault_occurred) {
6971 			more = FALSE;
6972 			break;
6973 		}
6974 
6975 		DHD_RING_LOCK(ring->ring_lock, flags);
6976 		/* Get the address of the next message to be read from ring */
6977 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6978 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6979 
6980 		if (msg_addr == NULL) {
6981 			more = FALSE;
6982 			break;
6983 		}
6984 
6985 		/* Prefetch data to populate the cache */
6986 		OSL_PREFETCH(msg_addr);
6987 
6988 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6989 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
6990 				__FUNCTION__, ring->name, msg_addr, msg_len));
6991 		}
6992 
6993 		/* Write to dngl rd ptr */
6994 		dhd_prot_upd_read_idx(dhd, ring);
6995 
6996 		/* After batch processing, check bound */
6997 		n += msg_len / ring->item_len;
6998 		if (n >= bound) {
6999 			break;
7000 		}
7001 	}
7002 
7003 	if (n) {
7004 		/* For IDMA and HWA case, doorbell is sent along with read index update.
7005 		 * For DMA indices case ring doorbell once n items are read to sync with dongle.
7006 		 */
7007 		if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
7008 			dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
7009 			dhd->prot->txcpl_db_cnt++;
7010 		}
7011 	}
7012 	return more;
7013 }
7014 
7015 int
BCMFASTPATH(dhd_prot_process_trapbuf)7016 BCMFASTPATH(dhd_prot_process_trapbuf)(dhd_pub_t *dhd)
7017 {
7018 	uint32 data;
7019 	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
7020 
7021 	/* Interrupts can come in before this struct
7022 	 *  has been initialized.
7023 	 */
7024 	if (trap_addr->va == NULL) {
7025 		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
7026 		return 0;
7027 	}
7028 
7029 	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
7030 	data = *(uint32 *)(trap_addr->va);
7031 
7032 	if (data & D2H_DEV_FWHALT) {
7033 		if (dhd->db7_trap.fw_db7w_trap_inprogress) {
7034 			DHD_ERROR(("DB7 FW responded 0x%04x\n", data));
7035 		} else {
7036 			DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
7037 		}
7038 
7039 		if (data & D2H_DEV_EXT_TRAP_DATA)
7040 		{
7041 			if (dhd->extended_trap_data) {
7042 				OSL_CACHE_INV((void *)trap_addr->va,
7043 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7044 				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
7045 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7046 			}
7047 			if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
7048 				DHD_ERROR(("Extended trap data available\n"));
7049 			}
7050 		}
7051 #ifdef BT_OVER_PCIE
7052 		if (data & D2H_DEV_TRAP_DUE_TO_BT) {
7053 			DHD_ERROR(("WLAN Firmware trapped due to BT\n"));
7054 			dhd->dongle_trap_due_to_bt = TRUE;
7055 		}
7056 #endif /* BT_OVER_PCIE */
7057 		return data;
7058 	}
7059 	return 0;
7060 }
7061 
7062 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
7063 int
BCMFASTPATH(dhd_prot_process_ctrlbuf)7064 BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd)
7065 {
7066 	dhd_prot_t *prot = dhd->prot;
7067 	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
7068 	unsigned long flags;
7069 
7070 	/* Process all the messages - DTOH direction */
7071 	while (!dhd_is_device_removed(dhd)) {
7072 		uint8 *msg_addr;
7073 		uint32 msg_len;
7074 
7075 		if (dhd_query_bus_erros(dhd)) {
7076 			break;
7077 		}
7078 
7079 		if (dhd->hang_was_sent) {
7080 			break;
7081 		}
7082 
7083 		if (dhd->smmu_fault_occurred) {
7084 			break;
7085 		}
7086 
7087 		DHD_RING_LOCK(ring->ring_lock, flags);
7088 		/* Get the address of the next message to be read from ring */
7089 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
7090 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7091 
7092 		if (msg_addr == NULL) {
7093 			break;
7094 		}
7095 
7096 		/* Prefetch data to populate the cache */
7097 		OSL_PREFETCH(msg_addr);
7098 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
7099 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
7100 				__FUNCTION__, ring->name, msg_addr, msg_len));
7101 		}
7102 
7103 		/* Write to dngl rd ptr */
7104 		dhd_prot_upd_read_idx(dhd, ring);
7105 	}
7106 
7107 	return 0;
7108 }
7109 
7110 /**
7111  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
7112  * memory has completed, before invoking the message handler via a table lookup
7113  * of the cmn_msg_hdr::msg_type.
7114  */
7115 static int
BCMFASTPATH(dhd_prot_process_msgtype)7116 BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
7117 {
7118 	uint32 buf_len = len;
7119 	uint16 item_len;
7120 	uint8 msg_type;
7121 	cmn_msg_hdr_t *msg = NULL;
7122 	int ret = BCME_OK;
7123 
7124 	ASSERT(ring);
7125 	item_len = ring->item_len;
7126 	if (item_len == 0) {
7127 		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
7128 			__FUNCTION__, ring->idx, item_len, buf_len));
7129 		return BCME_ERROR;
7130 	}
7131 
7132 	while (buf_len > 0) {
7133 		if (dhd->hang_was_sent) {
7134 			ret = BCME_ERROR;
7135 			goto done;
7136 		}
7137 
7138 		if (dhd->smmu_fault_occurred) {
7139 			ret = BCME_ERROR;
7140 			goto done;
7141 		}
7142 
7143 		msg = (cmn_msg_hdr_t *)buf;
7144 
7145 		/* Wait until DMA completes, then fetch msg_type */
7146 		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
7147 
7148 		/*
7149 		 * Update the curr_rd to the current index in the ring, from where
7150 		 * the work item is fetched. This way if the fetched work item
7151 		 * fails in LIVELOCK, we can print the exact read index in the ring
7152 		 * that shows up the corrupted work item.
7153 		 */
7154 		if ((ring->curr_rd + 1) >= ring->max_items) {
7155 			ring->curr_rd = 0;
7156 		} else {
7157 			ring->curr_rd += 1;
7158 		}
7159 
7160 		/* Prefetch data to populate the cache */
7161 		OSL_PREFETCH(buf + item_len);
7162 
7163 		DHD_MSGBUF_INFO(("msg_type %d item_len %d buf_len %d\n",
7164 			msg_type, item_len, buf_len));
7165 
7166 		if (msg_type == MSG_TYPE_LOOPBACK) {
7167 			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
7168 			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
7169 		}
7170 
7171 		ASSERT(msg_type < DHD_PROT_FUNCS);
7172 		if (msg_type >= DHD_PROT_FUNCS) {
7173 			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
7174 				__FUNCTION__, msg_type, item_len, buf_len));
7175 			ret = BCME_ERROR;
7176 			goto done;
7177 		}
7178 
7179 #if !defined(BCM_ROUTER_DHD)
7180 		if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
7181 			if (ring == dhd->prot->d2hring_info_cpln) {
7182 				if (!dhd->prot->infobufpost) {
7183 					DHD_ERROR(("infobuf posted are zero,"
7184 						   "but there is a completion\n"));
7185 					goto done;
7186 				}
7187 				dhd->prot->infobufpost--;
7188 				dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
7189 				dhd_prot_process_infobuf_complete(dhd, buf);
7190 			}
7191 #ifdef BTLOG
7192 			else if (ring == dhd->prot->d2hring_btlog_cpln) {
7193 				info_buf_resp_t *resp = (info_buf_resp_t *)buf;
7194 
7195 				if (!dhd->prot->btlogbufpost) {
7196 					DHD_ERROR(("btlogbuf posted are zero,"
7197 						   "but there is a completion\n"));
7198 					goto done;
7199 				}
7200 
7201 				dhd->prot->btlogbufpost--;
7202 				if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) {
7203 					dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
7204 				}
7205 				dhd_prot_process_btlog_complete(dhd, buf);
7206 			}
7207 #endif	/* BTLOG */
7208 		} else
7209 #endif	/* !defined(BCM_ROUTER_DHD) */
7210 		if (table_lookup[msg_type]) {
7211 			table_lookup[msg_type](dhd, buf);
7212 		}
7213 
7214 		if (buf_len < item_len) {
7215 			ret = BCME_ERROR;
7216 			goto done;
7217 		}
7218 		buf_len = buf_len - item_len;
7219 		buf = buf + item_len;
7220 	}
7221 
7222 done:
7223 
7224 #ifdef DHD_RX_CHAINING
7225 	dhd_rxchain_commit(dhd);
7226 #endif
7227 
7228 	return ret;
7229 } /* dhd_prot_process_msgtype */
7230 
7231 static void
dhd_prot_noop(dhd_pub_t * dhd,void * msg)7232 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
7233 {
7234 	return;
7235 }
7236 
7237 /** called on MSG_TYPE_RING_STATUS message received from dongle */
7238 static void
dhd_prot_ringstatus_process(dhd_pub_t * dhd,void * msg)7239 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
7240 {
7241 	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
7242 	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
7243 	uint16 status = ltoh16(ring_status->compl_hdr.status);
7244 	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
7245 
7246 	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
7247 		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
7248 
7249 	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
7250 		return;
7251 	if (status == BCMPCIE_BAD_PHASE) {
7252 		/* bad phase report from */
7253 		/* XXX: if the request is ioctl request finish the ioctl, rather than timing out */
7254 		DHD_ERROR(("Bad phase\n"));
7255 	}
7256 	if (status != BCMPCIE_BADOPTION)
7257 		return;
7258 
7259 	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
7260 		/* XXX: see if the debug ring create is pending */
7261 		if (dhd->prot->h2dring_info_subn != NULL) {
7262 			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
7263 				DHD_ERROR(("H2D ring create failed for info ring\n"));
7264 				dhd->prot->h2dring_info_subn->create_pending = FALSE;
7265 			}
7266 			else
7267 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7268 		} else {
7269 			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
7270 		}
7271 	}
7272 	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
7273 		/* XXX: see if the debug ring create is pending */
7274 		if (dhd->prot->d2hring_info_cpln != NULL) {
7275 			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
7276 				DHD_ERROR(("D2H ring create failed for info ring\n"));
7277 				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
7278 			}
7279 			else
7280 				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
7281 		} else {
7282 			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
7283 		}
7284 	}
7285 #ifdef BTLOG
7286 	else if (request_id == DHD_H2D_BTLOGRING_REQ_PKTID) {
7287 		/* XXX: see if the debug ring create is pending */
7288 		if (dhd->prot->h2dring_btlog_subn != NULL) {
7289 			if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) {
7290 				DHD_ERROR(("H2D ring create failed for btlog ring\n"));
7291 				dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
7292 			}
7293 			else
7294 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7295 		} else {
7296 			DHD_ERROR(("%s btlog submit ring doesn't exist\n", __FUNCTION__));
7297 		}
7298 	}
7299 	else if (request_id == DHD_D2H_BTLOGRING_REQ_PKTID) {
7300 		/* XXX: see if the debug ring create is pending */
7301 		if (dhd->prot->d2hring_btlog_cpln != NULL) {
7302 			if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) {
7303 				DHD_ERROR(("D2H ring create failed for btlog ring\n"));
7304 				dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
7305 			}
7306 			else
7307 				DHD_ERROR(("ring create ID for btlog ring, create not pending\n"));
7308 		} else {
7309 			DHD_ERROR(("%s btlog cpl ring doesn't exist\n", __FUNCTION__));
7310 		}
7311 	}
7312 #endif	/* BTLOG */
7313 #ifdef DHD_HP2P
7314 	else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
7315 		/* XXX: see if the HPP txcmpl ring create is pending */
7316 		if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
7317 			if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
7318 				DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
7319 				dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
7320 			}
7321 			else
7322 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7323 		} else {
7324 			DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
7325 		}
7326 	}
7327 	else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
7328 		/* XXX: see if the hp2p rxcmpl ring create is pending */
7329 		if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
7330 			if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
7331 				DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
7332 				dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
7333 			}
7334 			else
7335 				DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
7336 		} else {
7337 			DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
7338 		}
7339 	}
7340 #endif /* DHD_HP2P */
7341 	else {
7342 		DHD_ERROR(("don;t know how to pair with original request\n"));
7343 	}
7344 	/* How do we track this to pair it with ??? */
7345 	return;
7346 }
7347 
7348 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
7349 static void
dhd_prot_genstatus_process(dhd_pub_t * dhd,void * msg)7350 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
7351 {
7352 	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
7353 	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
7354 		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
7355 		gen_status->compl_hdr.flow_ring_id));
7356 
7357 	/* How do we track this to pair it with ??? */
7358 	return;
7359 }
7360 
7361 /**
7362  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
7363  * dongle received the ioctl message in dongle memory.
7364  */
7365 static void
dhd_prot_ioctack_process(dhd_pub_t * dhd,void * msg)7366 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
7367 {
7368 	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
7369 	unsigned long flags;
7370 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7371 	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
7372 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7373 
7374 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7375 	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
7376 	if (pktid != DHD_IOCTL_REQ_PKTID) {
7377 #ifndef IOCTLRESP_USE_CONSTMEM
7378 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
7379 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7380 #else
7381 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
7382 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7383 #endif /* !IOCTLRESP_USE_CONSTMEM */
7384 	}
7385 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7386 
7387 	dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
7388 
7389 	DHD_GENERAL_LOCK(dhd, flags);
7390 	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
7391 		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7392 		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
7393 	} else {
7394 		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
7395 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7396 		prhex("dhd_prot_ioctack_process:",
7397 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7398 	}
7399 	DHD_GENERAL_UNLOCK(dhd, flags);
7400 
7401 	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
7402 		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
7403 		ioct_ack->compl_hdr.flow_ring_id));
7404 	if (ioct_ack->compl_hdr.status != 0)  {
7405 		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
7406 		/* FIXME: should we fail the pending IOCTL compelteion wait process... */
7407 	}
7408 #ifdef REPORT_FATAL_TIMEOUTS
7409 	else {
7410 		dhd_stop_bus_timer(dhd);
7411 	}
7412 #endif /* REPORT_FATAL_TIMEOUTS */
7413 }
7414 
7415 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
7416 static void
dhd_prot_ioctcmplt_process(dhd_pub_t * dhd,void * msg)7417 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
7418 {
7419 	dhd_prot_t *prot = dhd->prot;
7420 	uint32 pkt_id, xt_id;
7421 	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
7422 	void *pkt;
7423 	unsigned long flags;
7424 	dhd_dma_buf_t retbuf;
7425 #ifdef REPORT_FATAL_TIMEOUTS
7426 	uint16	dhd_xt_id;
7427 #endif
7428 
7429 	/* Check for ioctl timeout induce flag, which is set by firing
7430 	 * dhd iovar to induce IOCTL timeout. If flag is set,
7431 	 * return from here, which results in to IOCTL timeout.
7432 	 */
7433 	if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
7434 		DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
7435 		return;
7436 	}
7437 
7438 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
7439 
7440 	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
7441 
7442 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7443 #ifndef IOCTLRESP_USE_CONSTMEM
7444 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
7445 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7446 #else
7447 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
7448 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7449 #endif /* !IOCTLRESP_USE_CONSTMEM */
7450 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7451 
7452 	DHD_GENERAL_LOCK(dhd, flags);
7453 	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
7454 		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7455 		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
7456 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7457 		prhex("dhd_prot_ioctcmplt_process:",
7458 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7459 		DHD_GENERAL_UNLOCK(dhd, flags);
7460 		return;
7461 	}
7462 
7463 	dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
7464 
7465 	/* Clear Response pending bit */
7466 	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
7467 	DHD_GENERAL_UNLOCK(dhd, flags);
7468 
7469 #ifndef IOCTLRESP_USE_CONSTMEM
7470 	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
7471 #else
7472 	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
7473 	pkt = retbuf.va;
7474 #endif /* !IOCTLRESP_USE_CONSTMEM */
7475 	if (!pkt) {
7476 		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
7477 		prhex("dhd_prot_ioctcmplt_process:",
7478 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7479 		return;
7480 	}
7481 
7482 	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
7483 	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
7484 	xt_id = ltoh16(ioct_resp->trans_id);
7485 
7486 	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
7487 		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
7488 			__FUNCTION__, xt_id, prot->ioctl_trans_id,
7489 			prot->curr_ioctl_cmd, ioct_resp->cmd));
7490 #ifdef REPORT_FATAL_TIMEOUTS
7491 		dhd_stop_cmd_timer(dhd);
7492 #endif /* REPORT_FATAL_TIMEOUTS */
7493 		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
7494 		dhd_prot_debug_info_print(dhd);
7495 #ifdef DHD_FW_COREDUMP
7496 		if (dhd->memdump_enabled) {
7497 			/* collect core dump */
7498 			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
7499 			dhd_bus_mem_dump(dhd);
7500 		}
7501 #else
7502 		ASSERT(0);
7503 #endif /* DHD_FW_COREDUMP */
7504 		dhd_schedule_reset(dhd);
7505 		goto exit;
7506 	}
7507 #ifdef REPORT_FATAL_TIMEOUTS
7508 	dhd_xt_id = dhd_get_request_id(dhd);
7509 	if (xt_id == dhd_xt_id) {
7510 		dhd_stop_cmd_timer(dhd);
7511 	} else {
7512 		DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
7513 			__FUNCTION__, xt_id, dhd_xt_id));
7514 	}
7515 #endif /* REPORT_FATAL_TIMEOUTS */
7516 	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
7517 		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
7518 
7519 	if (prot->ioctl_resplen > 0) {
7520 #ifndef IOCTLRESP_USE_CONSTMEM
7521 		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
7522 #else
7523 		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
7524 #endif /* !IOCTLRESP_USE_CONSTMEM */
7525 	}
7526 
7527 	/* wake up any dhd_os_ioctl_resp_wait() */
7528 	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
7529 
7530 exit:
7531 #ifndef IOCTLRESP_USE_CONSTMEM
7532 	dhd_prot_packet_free(dhd, pkt,
7533 		PKTTYPE_IOCTL_RX, FALSE);
7534 #else
7535 	free_ioctl_return_buffer(dhd, &retbuf);
7536 #endif /* !IOCTLRESP_USE_CONSTMEM */
7537 
7538 	/* Post another ioctl buf to the device */
7539 	if (prot->cur_ioctlresp_bufs_posted > 0) {
7540 		prot->cur_ioctlresp_bufs_posted--;
7541 	}
7542 
7543 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
7544 }
7545 
7546 int
dhd_prot_check_tx_resource(dhd_pub_t * dhd)7547 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
7548 {
7549 	return dhd->prot->no_tx_resource;
7550 }
7551 
7552 #ifdef DHD_PKTTS
7553 /**
7554  * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported)
7555  * 1. pointer to data portion of pkt
7556  * 2. five tuple checksum of pkt
7557  *   = {scr_ip, dst_ip, src_port, dst_port, proto}
7558  * 3. ip_prec
7559  *
7560  * @dhdp: pointer to dhd_pub object
7561  * @pkt: packet pointer
7562  * @ptr: retuns pointer to data portion of pkt
7563  * @chksum: returns five tuple checksum of pkt
7564  * @prec: returns ip precedence
7565  * @tcp_seqno: returns tcp sequnce number
7566  *
7567  * returns packet length remaining after tcp/udp header or BCME_ERROR.
7568  */
7569 static int
dhd_msgbuf_get_ip_info(dhd_pub_t * dhdp,void * pkt,void ** ptr,uint32 * chksum,uint32 * prec,uint32 * tcp_seqno,uint32 * tcp_ackno)7570 dhd_msgbuf_get_ip_info(dhd_pub_t *dhdp, void *pkt, void **ptr, uint32 *chksum,
7571 	uint32 *prec, uint32 *tcp_seqno, uint32 *tcp_ackno)
7572 {
7573 	char *pdata;
7574 	uint plen;
7575 	uint32 type, len;
7576 	uint32 checksum = 0;
7577 	uint8 dscp_prio = 0;
7578 	struct bcmtcp_hdr *tcp = NULL;
7579 
7580 	pdata = PKTDATA(dhdp->osh, pkt);
7581 	plen = PKTLEN(dhdp->osh, pkt);
7582 
7583 	/* Ethernet header */
7584 	if (plen < ETHER_HDR_LEN) {
7585 		return BCME_ERROR;
7586 	}
7587 	type = ntoh16(((struct ether_header *)pdata)->ether_type);
7588 	pdata += ETHER_HDR_LEN;
7589 	plen -= ETHER_HDR_LEN;
7590 
7591 	if ((type == ETHER_TYPE_IP) ||
7592 		(type == ETHER_TYPE_IPV6)) {
7593 		dscp_prio = (IP_TOS46(pdata) >> IPV4_TOS_PREC_SHIFT);
7594 	}
7595 
7596 	/* IP header (v4 or v6) */
7597 	if (type == ETHER_TYPE_IP) {
7598 		struct ipv4_hdr *iph = (struct ipv4_hdr *)pdata;
7599 		if (plen <= sizeof(*iph)) {
7600 			return BCME_ERROR;
7601 		}
7602 
7603 		len = IPV4_HLEN(iph);
7604 		if (plen <= len || IP_VER(iph) != IP_VER_4 || len < IPV4_MIN_HEADER_LEN) {
7605 			return BCME_ERROR;
7606 		}
7607 
7608 		type = IPV4_PROT(iph);
7609 		pdata += len;
7610 		plen -= len;
7611 
7612 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip,
7613 			sizeof(iph->src_ip) / sizeof(uint32));
7614 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip,
7615 			sizeof(iph->dst_ip) / sizeof(uint32));
7616 	} else if (type == ETHER_TYPE_IPV6) {
7617 		struct ipv6_hdr *ip6h = (struct ipv6_hdr *)pdata;
7618 
7619 		if (plen <= IPV6_MIN_HLEN || IP_VER(ip6h) != IP_VER_6) {
7620 			return BCME_ERROR;
7621 		}
7622 
7623 		type = IPV6_PROT(ip6h);
7624 		pdata += IPV6_MIN_HLEN;
7625 		plen -= IPV6_MIN_HLEN;
7626 		if (IPV6_EXTHDR(type)) {
7627 			uint8 proto = 0;
7628 			int32 exth_len = ipv6_exthdr_len(pdata, &proto);
7629 			if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
7630 				return BCME_ERROR;
7631 			}
7632 			type = proto;
7633 			pdata += exth_len;
7634 			plen -= exth_len;
7635 		}
7636 
7637 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr,
7638 			sizeof(ip6h->saddr) / sizeof(uint32));
7639 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr,
7640 			sizeof(ip6h->saddr) / sizeof(uint32));
7641 	}
7642 
7643 	/* return error if not TCP or UDP */
7644 	if ((type != IP_PROT_UDP) && (type != IP_PROT_TCP)) {
7645 		return BCME_ERROR;
7646 	}
7647 
7648 	/* src_port and dst_port (together 32bit) */
7649 	checksum ^= bcm_compute_xor32((volatile uint32 *)pdata, 1);
7650 	checksum ^= bcm_compute_xor32((volatile uint32 *)&type, 1);
7651 
7652 	if (type == IP_PROT_TCP) {
7653 		tcp = (struct bcmtcp_hdr *)pdata;
7654 		len = TCP_HDRLEN(pdata[TCP_HLEN_OFFSET]) << 2;
7655 	} else { /* IP_PROT_UDP */
7656 		len =	sizeof(struct bcmudp_hdr);
7657 	}
7658 
7659 	/* length check */
7660 	if (plen < len) {
7661 		return BCME_ERROR;
7662 	}
7663 
7664 	pdata += len;
7665 	plen -= len;
7666 
7667 	/* update data[0] */
7668 	*ptr = (void *)pdata;
7669 
7670 	/* update fivetuple checksum */
7671 	*chksum = checksum;
7672 
7673 	/* update ip prec */
7674 	*prec = dscp_prio;
7675 
7676 	/* update tcp sequence number */
7677 	if (tcp != NULL) {
7678 		*tcp_seqno = tcp->seq_num;
7679 		*tcp_ackno = tcp->ack_num;
7680 	}
7681 
7682 	return plen;
7683 }
7684 
7685 /**
7686  * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket
7687  *
7688  * @dhdp: pointer to dhd_pub object
7689  * @pkt: packet pointer
7690  * @fwts: firmware timestamp {fwt1..fwt4}
7691  * @version: pktlat version supported in firmware
7692  */
7693 static void
dhd_msgbuf_send_msg_tx_ts(dhd_pub_t * dhdp,void * pkt,void * fw_ts,uint16 version)7694 dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhdp, void *pkt, void *fw_ts, uint16 version)
7695 {
7696 	bcm_to_info_tx_ts_t to_tx_info;
7697 	void *ptr = NULL;
7698 	int dlen = 0;
7699 	uint32 checksum = 0;
7700 	uint32 prec = 0;
7701 	pktts_flow_t *flow = NULL;
7702 	uint32 flow_pkt_offset = 0;
7703 	uint32 num_config = 0;
7704 	uint32 tcp_seqno = 0;
7705 	uint32 tcp_ackno = 0;
7706 
7707 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7708 
7709 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7710 	if (flow) {
7711 		/* there is valid config for this chksum */
7712 		flow_pkt_offset = flow->pkt_offset;
7713 	} else if (num_config) {
7714 		/* there is valid config + no matching config for this chksum */
7715 		return;
7716 	} else {
7717 		/* there is no valid config. pass all to netlink */
7718 	}
7719 
7720 	memset(&to_tx_info, 0, sizeof(to_tx_info));
7721 	to_tx_info.hdr.type = BCM_TS_TX;
7722 	to_tx_info.hdr.flowid = checksum;
7723 	to_tx_info.hdr.prec = prec;
7724 
7725 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7726 	if (!flow && tcp_seqno) {
7727 		uint32 *xbytes = (uint32 *)to_tx_info.hdr.xbytes;
7728 
7729 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7730 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7731 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7732 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7733 	} else if ((dlen > flow_pkt_offset) &&
7734 		((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) {
7735 		(void)memcpy_s(to_tx_info.hdr.xbytes, sizeof(to_tx_info.hdr.xbytes),
7736 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_tx_info.hdr.xbytes));
7737 	}
7738 
7739 	to_tx_info.dhdt0 = DHD_PKT_GET_QTIME(pkt);
7740 	to_tx_info.dhdt5 = OSL_SYSUPTIME_US();
7741 
7742 	if (version == METADATA_VER_1) {
7743 		struct pktts_fwtx_v1 *fwts = (struct pktts_fwtx_v1 *)fw_ts;
7744 
7745 		to_tx_info.hdr.magic = BCM_TS_MAGIC;
7746 
7747 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7748 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7749 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7750 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7751 
7752 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, OFFSETOF(bcm_to_info_tx_ts_t, ucts));
7753 	} else if (version == METADATA_VER_2) {
7754 		struct pktts_fwtx_v2 *fwts = (struct pktts_fwtx_v2 *)fw_ts;
7755 
7756 		to_tx_info.hdr.magic = BCM_TS_MAGIC_V2;
7757 
7758 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7759 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7760 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7761 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7762 
7763 		to_tx_info.ucts[0] = ntohl(fwts->ut[0]);
7764 		to_tx_info.ucts[1] = ntohl(fwts->ut[1]);
7765 		to_tx_info.ucts[2] = ntohl(fwts->ut[2]);
7766 		to_tx_info.ucts[3] = ntohl(fwts->ut[3]);
7767 		to_tx_info.ucts[4] = ntohl(fwts->ut[4]);
7768 
7769 		to_tx_info.uccnt[0] = ntohl(fwts->uc[0]);
7770 		to_tx_info.uccnt[1] = ntohl(fwts->uc[1]);
7771 		to_tx_info.uccnt[2] = ntohl(fwts->uc[2]);
7772 		to_tx_info.uccnt[3] = ntohl(fwts->uc[3]);
7773 		to_tx_info.uccnt[4] = ntohl(fwts->uc[4]);
7774 		to_tx_info.uccnt[5] = ntohl(fwts->uc[5]);
7775 		to_tx_info.uccnt[6] = ntohl(fwts->uc[6]);
7776 		to_tx_info.uccnt[7] = ntohl(fwts->uc[7]);
7777 
7778 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, sizeof(to_tx_info));
7779 	}
7780 	return;
7781 }
7782 
7783 /**
7784  * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket
7785  *
7786  * @dhdp: pointer to dhd_pub object
7787  * @pkt: packet pointer
7788  * @fwr1: firmware timestamp at probe point 1
7789  * @fwr2: firmware timestamp at probe point 2
7790  */
7791 static void
dhd_msgbuf_send_msg_rx_ts(dhd_pub_t * dhdp,void * pkt,uint fwr1,uint fwr2)7792 dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhdp, void *pkt, uint fwr1, uint fwr2)
7793 {
7794 	bcm_to_info_rx_ts_t to_rx_info;
7795 	void *ptr = NULL;
7796 	int dlen = 0;
7797 	uint32 checksum = 0;
7798 	uint32 prec = 0;
7799 	pktts_flow_t *flow = NULL;
7800 	uint32 flow_pkt_offset = 0;
7801 	uint32 num_config = 0;
7802 	uint32 tcp_seqno = 0;
7803 	uint32 tcp_ackno = 0;
7804 
7805 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7806 
7807 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7808 	if (flow) {
7809 		/* there is valid config for this chksum */
7810 		flow_pkt_offset = flow->pkt_offset;
7811 	} else if (num_config) {
7812 		/* there is valid config + no matching config for this chksum */
7813 		return;
7814 	} else {
7815 		/* there is no valid config. pass all to netlink */
7816 	}
7817 
7818 	memset(&to_rx_info, 0, sizeof(to_rx_info));
7819 	to_rx_info.hdr.magic = BCM_TS_MAGIC;
7820 	to_rx_info.hdr.type = BCM_TS_RX;
7821 	to_rx_info.hdr.flowid = checksum;
7822 	to_rx_info.hdr.prec = prec;
7823 
7824 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7825 	if (!flow && tcp_seqno) {
7826 		uint32 *xbytes = (uint32 *)to_rx_info.hdr.xbytes;
7827 
7828 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7829 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7830 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7831 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7832 	} else if ((dlen > flow_pkt_offset) &&
7833 		((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) {
7834 		(void)memcpy_s(to_rx_info.hdr.xbytes, sizeof(to_rx_info.hdr.xbytes),
7835 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_rx_info.hdr.xbytes));
7836 	}
7837 
7838 	to_rx_info.dhdr3 = OSL_SYSUPTIME_US();
7839 
7840 	to_rx_info.fwts[0] = ntohl(fwr1);
7841 	to_rx_info.fwts[1] = ntohl(fwr2);
7842 
7843 	dhd_send_msg_to_ts(NULL, (void *)&to_rx_info, sizeof(to_rx_info));
7844 	return;
7845 }
7846 #endif /* DHD_PKTTS */
7847 
7848 /** called on MSG_TYPE_TX_STATUS message received from dongle */
7849 static void
BCMFASTPATH(dhd_prot_txstatus_process)7850 BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
7851 {
7852 	dhd_prot_t *prot = dhd->prot;
7853 	host_txbuf_cmpl_t * txstatus;
7854 	unsigned long flags;
7855 	uint32 pktid;
7856 	void *pkt;
7857 	dmaaddr_t pa;
7858 	uint32 len;
7859 	void *dmah;
7860 	void *secdma;
7861 	bool pkt_fate;
7862 	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
7863 #if defined(TX_STATUS_LATENCY_STATS)
7864 	flow_info_t *flow_info;
7865 	uint64 tx_status_latency;
7866 #endif /* TX_STATUS_LATENCY_STATS */
7867 #ifdef AGG_H2D_DB
7868 	msgbuf_ring_t *flow_ring;
7869 #endif /* AGG_H2D_DB */
7870 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
7871 	dhd_awdl_stats_t *awdl_stats;
7872 	if_flow_lkup_t *if_flow_lkup;
7873 	unsigned long awdl_stats_lock_flags;
7874 	uint8 ifindex;
7875 	uint8 role;
7876 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
7877 	flow_ring_node_t *flow_ring_node;
7878 	uint16 flowid;
7879 #ifdef DHD_PKTTS
7880 	struct metadata_txcmpl_v1 meta_ts_v1;
7881 	struct metadata_txcmpl_v2 meta_ts_v2;
7882 	dhd_dma_buf_t meta_data_buf;
7883 	uint64 addr = 0;
7884 
7885 	BCM_REFERENCE(meta_ts_v1);
7886 	BCM_REFERENCE(meta_ts_v2);
7887 	BCM_REFERENCE(meta_data_buf);
7888 	BCM_REFERENCE(addr);
7889 
7890 	if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) ||
7891 		(dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) {
7892 		DHD_ERROR_RLMT(("%s: return as invalid pktid detected\n", __FUNCTION__));
7893 		return;
7894 	}
7895 
7896 	memset(&meta_ts_v1, 0, sizeof(meta_ts_v1));
7897 	memset(&meta_ts_v2, 0, sizeof(meta_ts_v2));
7898 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
7899 #endif /* DHD_PKTTS */
7900 	txstatus = (host_txbuf_cmpl_t *)msg;
7901 
7902 	flowid = txstatus->compl_hdr.flow_ring_id;
7903 	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
7904 #ifdef AGG_H2D_DB
7905 	flow_ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
7906 	OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight);
7907 #endif /* AGG_H2D_DB */
7908 
7909 	BCM_REFERENCE(flow_ring_node);
7910 
7911 #ifdef DEVICE_TX_STUCK_DETECT
7912 	/**
7913 	 * Since we got a completion message on this flowid,
7914 	 * update tx_cmpl time stamp
7915 	 */
7916 	flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
7917 	/* update host copy of rd pointer */
7918 #ifdef DHD_HP2P
7919 	if (dhd->prot->d2hring_hp2p_txcpl &&
7920 		flow_ring_node->flow_info.tid == HP2P_PRIO) {
7921 		ring = dhd->prot->d2hring_hp2p_txcpl;
7922 	}
7923 #endif /* DHD_HP2P */
7924 	ring->curr_rd++;
7925 	if (ring->curr_rd >= ring->max_items) {
7926 		ring->curr_rd = 0;
7927 	}
7928 #endif /* DEVICE_TX_STUCK_DETECT */
7929 
7930 	/* locks required to protect circular buffer accesses */
7931 	DHD_RING_LOCK(ring->ring_lock, flags);
7932 	pktid = ltoh32(txstatus->cmn_hdr.request_id);
7933 
7934 	if (dhd->pcie_txs_metadata_enable > 1) {
7935 		/* Return metadata format (little endian):
7936 		 * |<--- txstatus --->|<- metadatalen ->|
7937 		 * |____|____|________|________|________|
7938 		 * |    |    |        |        |> total delay from fetch to report (8-bit 1 = 4ms)
7939 		 * |    |    |        |> ucode delay from enqueue to completion (8-bit 1 = 4ms)
7940 		 * |    |    |> 8-bit reserved (pre-filled with original TX status by caller)
7941 		 * |    |> delay time first fetch to the last fetch (4-bit 1 = 32ms)
7942 		 * |> fetch count (4-bit)
7943 		 */
7944 		printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid,
7945 			ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status),
7946 			(txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK),
7947 			((txstatus->tx_status >> 12) & 0xf),
7948 			((txstatus->tx_status >> 8) & 0xf) * 32,
7949 			((txstatus->tx_status_ext & 0xff) * 4),
7950 			((txstatus->tx_status_ext >> 8) & 0xff) * 4);
7951 	}
7952 	pkt_fate = TRUE;
7953 
7954 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7955 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
7956 			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
7957 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7958 
7959 	DHD_MSGBUF_INFO(("txstatus for pktid 0x%04x\n", pktid));
7960 	if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
7961 		DHD_ERROR(("Extra packets are freed\n"));
7962 	}
7963 	ASSERT(pktid != 0);
7964 
7965 #ifdef DHD_HMAPTEST
7966 
7967 	if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) &&
7968 		(pktid == dhd->prot->hmaptest_tx_pktid)) {
7969 		DHD_ERROR(("hmaptest: d11read txcpl received sc txbuf pktid=0x%08x\n", pktid));
7970 		DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status));
7971 		DHD_ERROR(("hmaptest: d11read txcpl sc txbuf va=0x%p pa=0x%08x\n",
7972 			dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa)));
7973 		dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
7974 		dhd->prot->hmap_tx_buf_va = NULL;
7975 		dhd->prot->hmap_tx_buf_len = 0;
7976 		PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0);
7977 		PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0);
7978 		prot->hmaptest.in_progress = FALSE;
7979 	}
7980 	/* original skb is kept as it is because its going to be freed  later in this path */
7981 #endif /* DHD_HMAPTEST */
7982 
7983 #ifdef DHD_PKTTS
7984 	if (dhd_get_pktts_enab(dhd) &&
7985 		dhd->pkt_metadata_buflen) {
7986 		/* Handle the Metadata first */
7987 		meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map,
7988 			meta_data_buf.pa, meta_data_buf._alloced, meta_data_buf.dmah, pktid);
7989 		if (meta_data_buf.va) {
7990 			if (dhd->pkt_metadata_version == METADATA_VER_1) {
7991 				memcpy(&meta_ts_v1, meta_data_buf.va, sizeof(meta_ts_v1));
7992 			} else if (dhd->pkt_metadata_version == METADATA_VER_2) {
7993 				memcpy(&meta_ts_v2, meta_data_buf.va, sizeof(meta_ts_v2));
7994 			}
7995 			memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
7996 			DHD_TRACE(("%s(): pktid %d retrieved mdata buffer %p "
7997 				"pa: %llx dmah: %p\r\n",  __FUNCTION__,
7998 				pktid, meta_data_buf.va, addr,
7999 				meta_data_buf.dmah));
8000 		}
8001 	}
8002 #endif /* DHD_PKTTS */
8003 
8004 	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
8005 		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
8006 	if (!pkt) {
8007 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8008 #ifdef DHD_PKTTS
8009 		/*
8010 		 * Call the free function after the Ring Lock is released.
8011 		 * This is becuase pcie_free_consistent is not supposed to be
8012 		 * called with Interrupts Disabled
8013 		 */
8014 		if (meta_data_buf.va) {
8015 			DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8016 				meta_data_buf.pa, meta_data_buf.dmah);
8017 		}
8018 #endif /* DHD_PKTTS */
8019 		DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
8020 		prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
8021 #ifdef DHD_FW_COREDUMP
8022 		if (dhd->memdump_enabled) {
8023 			/* collect core dump */
8024 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
8025 			dhd_bus_mem_dump(dhd);
8026 		}
8027 #else
8028 		ASSERT(0);
8029 #endif /* DHD_FW_COREDUMP */
8030 		return;
8031 	}
8032 
8033 	if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
8034 		DHD_ERROR_RLMT(("%s: start tx queue as min pktids are available\n",
8035 			__FUNCTION__));
8036 		prot->pktid_txq_stop_cnt--;
8037 		dhd->prot->no_tx_resource = FALSE;
8038 		dhd_bus_start_queue(dhd->bus);
8039 	}
8040 
8041 	DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
8042 
8043 #ifdef TX_STATUS_LATENCY_STATS
8044 	/* update the tx status latency for flowid */
8045 	flow_info = &flow_ring_node->flow_info;
8046 	tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
8047 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8048 	if (dhd->pkt_latency > 0 &&
8049 		tx_status_latency > (dhd->pkt_latency)) {
8050 		DHD_ERROR(("Latency: %llu > %u aw_cnt: %u \n",
8051 			tx_status_latency, dhd->pkt_latency,
8052 			dhd->awdl_aw_counter));
8053 	}
8054 #endif /*  defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
8055 	flow_info->cum_tx_status_latency += tx_status_latency;
8056 	flow_info->num_tx_status++;
8057 #endif /* TX_STATUS_LATENCY_STATS */
8058 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8059 	/* update the tx status latency when this AWDL slot is active */
8060 	if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup;
8061 	ifindex = flow_ring_node->flow_info.ifindex;
8062 	role = if_flow_lkup[ifindex].role;
8063 	if (role == WLC_E_IF_ROLE_AWDL) {
8064 		awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot];
8065 		DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8066 		awdl_stats->cum_tx_status_latency += tx_status_latency;
8067 		awdl_stats->num_tx_status++;
8068 		DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8069 	}
8070 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
8071 
8072 #ifdef HOST_SFH_LLC
8073 	if (dhd->host_sfhllc_supported) {
8074 		struct ether_header eth;
8075 		if (!memcpy_s(&eth, sizeof(eth),
8076 			PKTDATA(dhd->osh, pkt), sizeof(eth))) {
8077 			if (dhd_8023_llc_to_ether_hdr(dhd->osh,
8078 				&eth, pkt) != BCME_OK) {
8079 				DHD_ERROR_RLMT(("%s: host sfh llc"
8080 					" converstion to ether failed\n",
8081 					__FUNCTION__));
8082 			}
8083 		}
8084 	}
8085 #endif /* HOST_SFH_LLC */
8086 
8087 #ifdef DMAMAP_STATS
8088 	dhd->dma_stats.txdata--;
8089 	dhd->dma_stats.txdata_sz -= len;
8090 #endif /* DMAMAP_STATS */
8091 	pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
8092 		ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
8093 #ifdef DHD_PKT_LOGGING
8094 	if (dhd->d11_tx_status) {
8095 		uint16 status = ltoh16(txstatus->compl_hdr.status) &
8096 			WLFC_CTL_PKTFLAG_MASK;
8097 		dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id),
8098 			pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len,
8099 			&status, NULL, TRUE, FALSE, TRUE);
8100 	}
8101 #endif /* DHD_PKT_LOGGING */
8102 #if defined(BCMPCIE) && (defined(LINUX) || defined(OEM_ANDROID) || defined(DHD_EFI))
8103 	dhd_txcomplete(dhd, pkt, pkt_fate);
8104 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
8105 	dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
8106 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8107 #endif /* BCMPCIE && (defined(LINUX) || defined(OEM_ANDROID)) */
8108 
8109 #ifdef DHD_PKTTS
8110 	if (dhd_get_pktts_enab(dhd) == TRUE) {
8111 		if (dhd->pkt_metadata_buflen) {
8112 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8113 			if ((dhd->pkt_metadata_version == METADATA_VER_1) &&
8114 					(ltoh32(meta_ts_v1.tref) != 0xFFFFFFFF)) {
8115 				struct pktts_fwtx_v1 fwts;
8116 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v1.tref));
8117 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8118 					ltoh16(meta_ts_v1.d_t2));
8119 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8120 					ltoh16(meta_ts_v1.d_t3));
8121 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8122 					ltoh16(meta_ts_v1.d_t4));
8123 				/* check for overflow */
8124 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8125 					/* send tx timestamp to netlink socket */
8126 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8127 						dhd->pkt_metadata_version);
8128 				}
8129 			} else if ((dhd->pkt_metadata_version == METADATA_VER_2) &&
8130 					(ltoh32(meta_ts_v2.tref) != 0xFFFFFFFF)) {
8131 				struct pktts_fwtx_v2 fwts;
8132 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref));
8133 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8134 					ltoh16(meta_ts_v2.d_t2));
8135 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8136 					ltoh16(meta_ts_v2.d_t3));
8137 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8138 					ltoh16(meta_ts_v2.d_t4));
8139 
8140 				fwts.ut[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8141 					ltoh16(meta_ts_v2.u_t1));
8142 				fwts.ut[1] = (uint32)htonl(ltoh16(meta_ts_v2.u_t2));
8143 				fwts.ut[2] = (uint32)htonl(ltoh16(meta_ts_v2.u_t3));
8144 				fwts.ut[3] = (uint32)htonl(ltoh16(meta_ts_v2.u_t4));
8145 				fwts.ut[4] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8146 					ltoh16(meta_ts_v2.u_t5));
8147 
8148 				fwts.uc[0] = (uint32)htonl(ltoh32(meta_ts_v2.u_c1));
8149 				fwts.uc[1] = (uint32)htonl(ltoh32(meta_ts_v2.u_c2));
8150 				fwts.uc[2] = (uint32)htonl(ltoh32(meta_ts_v2.u_c3));
8151 				fwts.uc[3] = (uint32)htonl(ltoh32(meta_ts_v2.u_c4));
8152 				fwts.uc[4] = (uint32)htonl(ltoh32(meta_ts_v2.u_c5));
8153 				fwts.uc[5] = (uint32)htonl(ltoh32(meta_ts_v2.u_c6));
8154 				fwts.uc[6] = (uint32)htonl(ltoh32(meta_ts_v2.u_c7));
8155 				fwts.uc[7] = (uint32)htonl(ltoh32(meta_ts_v2.u_c8));
8156 
8157 				DHD_INFO(("uct1:%x uct2:%x uct3:%x uct4:%x uct5:%x\n",
8158 					ntohl(fwts.ut[0]), ntohl(fwts.ut[1]), ntohl(fwts.ut[2]),
8159 					ntohl(fwts.ut[3]), ntohl(fwts.ut[4])));
8160 				DHD_INFO(("ucc1:%x ucc2:%x ucc3:%x ucc4:%x"
8161 					" ucc5:%x ucc6:%x ucc7:%x ucc8:%x\n",
8162 					ntohl(fwts.uc[0]), ntohl(fwts.uc[1]), ntohl(fwts.uc[2]),
8163 					ntohl(fwts.uc[3]), ntohl(fwts.uc[4]), ntohl(fwts.uc[5]),
8164 					ntohl(fwts.uc[6]), ntohl(fwts.uc[7])));
8165 				/* check for overflow */
8166 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8167 					/* send tx timestamp to netlink socket */
8168 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8169 						dhd->pkt_metadata_version);
8170 				}
8171 			}
8172 		} else {
8173 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8174 			if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) {
8175 				struct pktts_fwtx_v1 fwts;
8176 
8177 				fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref));
8178 				fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8179 					ltoh16(txstatus->tx_pktts.d_t2));
8180 				fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8181 					ltoh16(txstatus->tx_pktts.d_t3));
8182 				fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8183 					ltoh16(txstatus->compl_hdr.tx_pktts.d_t4));
8184 
8185 				/* check for overflow */
8186 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8187 					/* send tx timestamp to netlnik socket */
8188 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, METADATA_VER_1);
8189 				}
8190 			}
8191 		}
8192 	}
8193 #endif /* DHD_PKTTS */
8194 
8195 #if DHD_DBG_SHOW_METADATA
8196 	if (dhd->prot->metadata_dbg &&
8197 			dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
8198 		uchar *ptr;
8199 		/* The Ethernet header of TX frame was copied and removed.
8200 		 * Here, move the data pointer forward by Ethernet header size.
8201 		 */
8202 		PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
8203 		ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
8204 		bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
8205 		dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
8206 	}
8207 #endif /* DHD_DBG_SHOW_METADATA */
8208 
8209 #ifdef DHD_HP2P
8210 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8211 #ifdef DHD_HP2P_DEBUG
8212 		bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
8213 #endif /* DHD_HP2P_DEBUG */
8214 		dhd_update_hp2p_txstats(dhd, txstatus);
8215 	}
8216 #endif /* DHD_HP2P */
8217 
8218 #ifdef DHD_TIMESYNC
8219 	if (dhd->prot->tx_ts_log_enabled) {
8220 		dhd_pkt_parse_t parse;
8221 		ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
8222 
8223 		memset(&parse, 0, sizeof(parse));
8224 		dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
8225 
8226 		if (parse.proto == IP_PROT_ICMP)
8227 			dhd_timesync_log_tx_timestamp(dhd->ts,
8228 				txstatus->compl_hdr.flow_ring_id,
8229 				txstatus->cmn_hdr.if_id,
8230 				ts->low, ts->high, &parse);
8231 	}
8232 #endif /* DHD_TIMESYNC */
8233 
8234 #ifdef DHD_LBUF_AUDIT
8235 	PKTAUDIT(dhd->osh, pkt);
8236 #endif
8237 	DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
8238 		txstatus->tx_status);
8239 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8240 #ifdef DHD_PKTTS
8241 	if (meta_data_buf.va) {
8242 		DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8243 			meta_data_buf.pa, meta_data_buf.dmah);
8244 	}
8245 #endif /* DHD_PKTTS */
8246 #ifdef DHD_MEM_STATS
8247 	DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags);
8248 	DHD_MSGBUF_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
8249 		__FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt)));
8250 	dhd->txpath_mem -= PKTLEN(dhd->osh, pkt);
8251 	DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags);
8252 #endif /* DHD_MEM_STATS */
8253 	PKTFREE(dhd->osh, pkt, TRUE);
8254 
8255 	return;
8256 } /* dhd_prot_txstatus_process */
8257 
8258 /* FIXME: assuming that it is getting inline data related to the event data */
8259 /** called on MSG_TYPE_WL_EVENT message received from dongle */
8260 static void
dhd_prot_event_process(dhd_pub_t * dhd,void * msg)8261 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
8262 {
8263 	wlevent_req_msg_t *evnt;
8264 	uint32 bufid;
8265 	uint16 buflen;
8266 	int ifidx = 0;
8267 	void* pkt;
8268 	dhd_prot_t *prot = dhd->prot;
8269 
8270 	/* Event complete header */
8271 	evnt = (wlevent_req_msg_t *)msg;
8272 	bufid = ltoh32(evnt->cmn_hdr.request_id);
8273 
8274 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
8275 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
8276 			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
8277 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
8278 
8279 	buflen = ltoh16(evnt->event_data_len);
8280 
8281 	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
8282 	/* FIXME: check the event status */
8283 
8284 	/* Post another rxbuf to the device */
8285 	if (prot->cur_event_bufs_posted)
8286 		prot->cur_event_bufs_posted--;
8287 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
8288 
8289 	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
8290 
8291 	if (!pkt) {
8292 		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
8293 		return;
8294 	}
8295 
8296 #if !defined(BCM_ROUTER_DHD)
8297 	/* FIXME: make sure the length is more than dataoffset */
8298 	/* DMA RX offset updated through shared area */
8299 	if (dhd->prot->rx_dataoffset)
8300 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8301 #endif /* !BCM_ROUTER_DHD */
8302 
8303 	PKTSETLEN(dhd->osh, pkt, buflen);
8304 #ifdef DHD_LBUF_AUDIT
8305 	PKTAUDIT(dhd->osh, pkt);
8306 #endif
8307 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
8308 }
8309 
8310 #if !defined(BCM_ROUTER_DHD)
8311 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
8312 static void
BCMFASTPATH(dhd_prot_process_infobuf_complete)8313 BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf)
8314 {
8315 	info_buf_resp_t *resp;
8316 	uint32 pktid;
8317 	uint16 buflen;
8318 	void * pkt;
8319 
8320 	resp = (info_buf_resp_t *)buf;
8321 	pktid = ltoh32(resp->cmn_hdr.request_id);
8322 	buflen = ltoh16(resp->info_data_len);
8323 
8324 #ifdef DHD_PKTID_AUDIT_RING
8325 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8326 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8327 #endif /* DHD_PKTID_AUDIT_RING */
8328 
8329 	DHD_MSGBUF_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8330 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8331 		dhd->prot->rx_dataoffset));
8332 
8333 	if (dhd->debug_buf_dest_support) {
8334 		if (resp->dest < DEBUG_BUF_DEST_MAX) {
8335 			dhd->debug_buf_dest_stat[resp->dest]++;
8336 		}
8337 	}
8338 
8339 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8340 	if (!pkt)
8341 		return;
8342 
8343 #if !defined(BCM_ROUTER_DHD)
8344 	/* FIXME: make sure the length is more than dataoffset */
8345 	/* DMA RX offset updated through shared area */
8346 	if (dhd->prot->rx_dataoffset)
8347 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8348 #endif /* !BCM_ROUTER_DHD */
8349 
8350 	PKTSETLEN(dhd->osh, pkt, buflen);
8351 #ifdef DHD_LBUF_AUDIT
8352 	PKTAUDIT(dhd->osh, pkt);
8353 #endif
8354 	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
8355 	 * special ifidx of -1.  This is just internal to dhd to get the data to
8356 	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
8357 	 */
8358 	dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
8359 }
8360 #endif /* !BCM_ROUTER_DHD */
8361 
8362 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
8363 static void
BCMFASTPATH(dhd_prot_process_snapshot_complete)8364 BCMFASTPATH(dhd_prot_process_snapshot_complete)(dhd_pub_t *dhd, void *buf)
8365 {
8366 #ifdef SNAPSHOT_UPLOAD
8367 	dhd_prot_t *prot = dhd->prot;
8368 	snapshot_resp_t *resp;
8369 	uint16 status;
8370 
8371 	resp = (snapshot_resp_t *)buf;
8372 
8373 	/* check completion status */
8374 	status = resp->compl_hdr.status;
8375 	if (status != BCMPCIE_SUCCESS) {
8376 		DHD_ERROR(("%s: failed: %s (%d)\n",
8377 			__FUNCTION__,
8378 			status == BCMPCIE_BT_DMA_ERR ? "DMA_ERR" :
8379 			status == BCMPCIE_BT_DMA_DESCR_FETCH_ERR ?
8380 				"DMA_DESCR_ERR" :
8381 			status == BCMPCIE_SNAPSHOT_ERR ? "SNAPSHOT_ERR" :
8382 			status == BCMPCIE_NOT_READY ? "NOT_READY" :
8383 			status == BCMPCIE_INVALID_DATA ? "INVALID_DATA" :
8384 			status == BCMPCIE_NO_RESPONSE ? "NO_RESPONSE" :
8385 			status == BCMPCIE_NO_CLOCK ? "NO_CLOCK" :
8386 			"", status));
8387 	}
8388 
8389 	/* length may be truncated if error occurred */
8390 	prot->snapshot_upload_len = ltoh32(resp->resp_len);
8391 	prot->snapshot_type = resp->type;
8392 	prot->snapshot_cmpl_pending = FALSE;
8393 
8394 	DHD_INFO(("%s id 0x%04x, phase 0x%02x, resp_len %d, type %d\n",
8395 		__FUNCTION__, ltoh32(resp->cmn_hdr.request_id),
8396 		resp->cmn_hdr.flags,
8397 		prot->snapshot_upload_len, prot->snapshot_type));
8398 #endif	/* SNAPSHOT_UPLOAD */
8399 }
8400 
8401 #ifdef BTLOG
8402 /** called on MSG_TYPE_BT_LOG_CMPLT message received from dongle */
8403 static void
BCMFASTPATH(dhd_prot_process_btlog_complete)8404 BCMFASTPATH(dhd_prot_process_btlog_complete)(dhd_pub_t *dhd, void* buf)
8405 {
8406 	info_buf_resp_t *resp;
8407 	uint32 pktid;
8408 	uint16 buflen;
8409 	void * pkt;
8410 
8411 	resp = (info_buf_resp_t *)buf;
8412 	pktid = ltoh32(resp->cmn_hdr.request_id);
8413 	buflen = ltoh16(resp->info_data_len);
8414 
8415 	/* check completion status */
8416 	if (resp->compl_hdr.status != BCMPCIE_SUCCESS) {
8417 		DHD_ERROR(("%s: failed completion status %d\n",
8418 			__FUNCTION__, resp->compl_hdr.status));
8419 		return;
8420 	}
8421 
8422 #ifdef DHD_PKTID_AUDIT_RING
8423 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8424 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8425 #endif /* DHD_PKTID_AUDIT_RING */
8426 
8427 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8428 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8429 		dhd->prot->rx_dataoffset));
8430 
8431 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8432 
8433 	if (!pkt)
8434 		return;
8435 
8436 #if !defined(BCM_ROUTER_DHD)
8437 	/* FIXME: make sure the length is more than dataoffset */
8438 	/* DMA RX offset updated through shared area */
8439 	if (dhd->prot->rx_dataoffset)
8440 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8441 #endif /* !BCM_ROUTER_DHD */
8442 
8443 	PKTSETLEN(dhd->osh, pkt, buflen);
8444 	PKTSETNEXT(dhd->osh, pkt, NULL);
8445 
8446 	dhd_bus_rx_bt_log(dhd->bus, pkt);
8447 }
8448 #endif	/* BTLOG */
8449 
8450 /** Stop protocol: sync w/dongle state. */
dhd_prot_stop(dhd_pub_t * dhd)8451 void dhd_prot_stop(dhd_pub_t *dhd)
8452 {
8453 	ASSERT(dhd);
8454 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8455 
8456 #if defined(NDIS)
8457 	if (dhd->prot) {
8458 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map);
8459 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map);
8460 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map);
8461 #if defined(IOCTLRESP_USE_CONSTMEM)
8462 		DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl);
8463 #endif /* DHD_PCIE_PKTID */
8464 	}
8465 #endif /* NDIS */
8466 }
8467 
8468 /* Add any protocol-specific data header.
8469  * Caller must reserve prot_hdrlen prepend space.
8470  */
8471 void
BCMFASTPATH(dhd_prot_hdrpush)8472 BCMFASTPATH(dhd_prot_hdrpush)(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
8473 {
8474 	return;
8475 }
8476 
8477 uint
dhd_prot_hdrlen(dhd_pub_t * dhd,void * PKTBUF)8478 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
8479 {
8480 	return 0;
8481 }
8482 
8483 #define PKTBUF pktbuf
8484 
8485 /**
8486  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
8487  * the corresponding flow ring.
8488  */
8489 int
BCMFASTPATH(dhd_prot_txdata)8490 BCMFASTPATH(dhd_prot_txdata)(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
8491 {
8492 	unsigned long flags;
8493 	dhd_prot_t *prot = dhd->prot;
8494 	host_txbuf_post_t *txdesc = NULL;
8495 	dmaaddr_t pa, meta_pa;
8496 	uint8 *pktdata;
8497 	uint32 pktlen;
8498 	uint32 pktid;
8499 	uint8	prio;
8500 	uint16 flowid = 0;
8501 	uint16 alloced = 0;
8502 	uint16	headroom;
8503 	msgbuf_ring_t *ring;
8504 	flow_ring_table_t *flow_ring_table;
8505 	flow_ring_node_t *flow_ring_node;
8506 #if defined(BCMINTERNAL) && defined(LINUX)
8507 	void *pkt_to_free = NULL;
8508 #endif /* BCMINTERNAL && LINUX */
8509 #ifdef DHD_PKTTS
8510 	dhd_dma_buf_t	meta_data_buf;
8511 	uint16	meta_data_buf_len = dhd->pkt_metadata_buflen;
8512 	uint64 addr = 0;
8513 #endif /* DHD_PKTTS */
8514 	void *big_pktbuf = NULL;
8515 	uint8 dhd_udr = FALSE;
8516 	bool host_sfh_llc_reqd = dhd->host_sfhllc_supported;
8517 	bool llc_inserted = FALSE;
8518 
8519 	BCM_REFERENCE(llc_inserted);
8520 #ifdef PCIE_INB_DW
8521 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
8522 		DHD_ERROR(("failed to increment hostactive_devwake\n"));
8523 		return BCME_ERROR;
8524 	}
8525 #endif /* PCIE_INB_DW */
8526 
8527 	if (dhd->flow_ring_table == NULL) {
8528 		DHD_ERROR(("dhd flow_ring_table is NULL\n"));
8529 		goto fail;
8530 	}
8531 
8532 #ifdef DHD_PCIE_PKTID
8533 		if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
8534 			if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
8535 				DHD_ERROR(("%s: stop tx queue as pktid_depleted_cnt maxed\n",
8536 					__FUNCTION__));
8537 				prot->pktid_txq_stop_cnt++;
8538 				dhd_bus_stop_queue(dhd->bus);
8539 				dhd->prot->no_tx_resource = TRUE;
8540 			}
8541 			dhd->prot->pktid_depleted_cnt++;
8542 			goto fail;
8543 		} else {
8544 			dhd->prot->pktid_depleted_cnt = 0;
8545 		}
8546 #endif /* DHD_PCIE_PKTID */
8547 
8548 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) {
8549 		if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) {
8550 			DHD_ERROR(("%s:%d: PKTGET for txbuf failed\n", __FUNCTION__, __LINE__));
8551 			goto fail;
8552 		}
8553 
8554 		memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE);
8555 		DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF),
8556 				PKTLEN(dhd->osh, big_pktbuf)));
8557 		if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE,
8558 				PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) {
8559 			DHD_ERROR(("%s:%d: memcpy_s big_pktbuf failed\n", __FUNCTION__, __LINE__));
8560 			ASSERT(0);
8561 		}
8562 	}
8563 
8564 	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
8565 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
8566 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
8567 
8568 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
8569 
8570 	/*
8571 	 * XXX:
8572 	 * JIRA SW4349-436:
8573 	 * Copying the TX Buffer to an SKB that lives in the DMA Zone
8574 	 * is done here. Previously this was done from dhd_stat_xmit
8575 	 * On conditions where the Host is pumping heavy traffic to
8576 	 * the dongle, we see that the Queue that is backing up the
8577 	 * flow rings is getting full and holds the precious memory
8578 	 * from DMA Zone, leading the host to run out of memory in DMA
8579 	 * Zone. So after this change the back up queue would continue to
8580 	 * hold the pointers from Network Stack, just before putting
8581 	 * the PHY ADDR in the flow rings, we'll do the copy.
8582 	 */
8583 #if defined(BCMINTERNAL) && defined(LINUX)
8584 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) {
8585 		struct sk_buff *skb;
8586 		/*
8587 		 * We are about to add the Ethernet header and send out,
8588 		 * copy the skb here.
8589 		 */
8590 		skb = skb_copy(PKTBUF, GFP_DMA);
8591 		if (skb == NULL) {
8592 			/*
8593 			 * Memory allocation failed, the old packet can
8594 			 * live in the queue, return BCME_NORESOURCE so
8595 			 * the caller re-queues this packet
8596 			 */
8597 			DHD_ERROR(("%s: skb_copy(DMA) failed\n", __FUNCTION__));
8598 			goto fail;
8599 		}
8600 
8601 		/*
8602 		 * Now we have copied the SKB to GFP_DMA memory, make the
8603 		 * rest of the code operate on this new SKB. Hold on to
8604 		 * the original SKB. If we don't get the pkt id or flow ring
8605 		 * space we'll free the Zone memory and return "no resource"
8606 		 * so the caller would re-queue the original SKB.
8607 		 */
8608 		pkt_to_free = PKTBUF;
8609 		PKTBUF = skb;
8610 	}
8611 #endif	/* BCMINTERNAL && LINUX */
8612 
8613 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) {
8614 		PKTFREE(dhd->osh, PKTBUF, TRUE);
8615 		PKTBUF = big_pktbuf;
8616 	}
8617 
8618 	DHD_RING_LOCK(ring->ring_lock, flags);
8619 
8620 	/* Create a unique 32-bit packet id */
8621 	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
8622 		PKTBUF, PKTTYPE_DATA_TX);
8623 #if defined(DHD_PCIE_PKTID)
8624 	if (pktid == DHD_PKTID_INVALID) {
8625 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
8626 		/*
8627 		 * If we return error here, the caller would queue the packet
8628 		 * again. So we'll just free the skb allocated in DMA Zone.
8629 		 * Since we have not freed the original SKB yet the caller would
8630 		 * requeue the same.
8631 		 */
8632 		goto err_no_res_pktfree;
8633 	}
8634 #endif /* DHD_PCIE_PKTID */
8635 
8636 	/* Reserve space in the circular buffer */
8637 	txdesc = (host_txbuf_post_t *)
8638 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8639 	if (txdesc == NULL) {
8640 		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
8641 			__FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
8642 		goto err_free_pktid;
8643 	}
8644 	txdesc->flags = 0;
8645 
8646 	/* Extract the data pointer and length information */
8647 	pktdata = PKTDATA(dhd->osh, PKTBUF);
8648 	pktlen  = PKTLEN(dhd->osh, PKTBUF);
8649 
8650 	/* TODO: XXX: re-look into dropped packets */
8651 	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
8652 
8653 	dhd_handle_pktdata(dhd, ifidx, PKTBUF, pktdata, pktid,
8654 		pktlen, NULL, &dhd_udr, TRUE, FALSE, TRUE);
8655 
8656 #if defined(BCMINTERNAL) && defined(LINUX)
8657 	/*
8658 	 * We have got all the resources, pktid and ring space
8659 	 * so we can safely free the original SKB here.
8660 	 */
8661 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
8662 		PKTCFREE(dhd->osh, pkt_to_free, FALSE);
8663 #endif	/* BCMINTERNAL && LINUX */
8664 
8665 	/* Ethernet header - contains ethertype field
8666 	* Copy before we cache flush packet using DMA_MAP
8667 	*/
8668 	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
8669 
8670 #ifdef DHD_AWDL
8671 	/* the awdl ifidx will always have a non-zero value
8672 	 * if the awdl iface is created. This is because the
8673 	 * primary iface (usually eth1) will always have ifidx of 0.
8674 	 * Hence we can check for non-zero value of awdl ifidx to
8675 	 * see if awdl iface is created or not
8676 	 */
8677 	if (dhd->awdl_llc_enabled &&
8678 		dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) {
8679 		if (host_sfh_llc_reqd) {
8680 			/* if FW supports host sfh llc insertion
8681 			 * then BOTH sfh and llc needs to be inserted
8682 			 * in which case the host LLC only path
8683 			 * in FW will not be exercised - which is the
8684 			 * objective of this feature. Hence in such a
8685 			 * case disable awdl llc insertion
8686 			 */
8687 			DHD_ERROR_RLMT(("%s: FW supports host sfh + llc, this is"
8688 				"is incompatible with awdl llc insertion"
8689 				" disable host sfh llc support in FW and try\n",
8690 				__FUNCTION__));
8691 		} else {
8692 			if (dhd_ether_to_awdl_llc_hdr(dhd, (struct ether_header *)pktdata,
8693 				PKTBUF) == BCME_OK) {
8694 			llc_inserted = TRUE;
8695 			/* in work item change ether type to len by
8696 			 * re-copying the ether header
8697 			 */
8698 			memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF),
8699 				ETHER_HDR_LEN);
8700 			} else {
8701 				goto err_rollback_idx;
8702 			}
8703 		}
8704 	}
8705 #endif /* DHD_AWDL */
8706 
8707 #ifdef HOST_SFH_LLC
8708 	if (host_sfh_llc_reqd) {
8709 		if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata,
8710 				PKTBUF) == BCME_OK) {
8711 			/* adjust the data pointer and length information */
8712 			pktdata = PKTDATA(dhd->osh, PKTBUF);
8713 			pktlen  = PKTLEN(dhd->osh, PKTBUF);
8714 			txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC;
8715 		} else {
8716 			goto err_rollback_idx;
8717 		}
8718 	} else
8719 #endif /* HOST_SFH_LLC */
8720 	{
8721 		/* Extract the ethernet header and adjust the data pointer and length */
8722 		pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN;
8723 		pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8724 	}
8725 
8726 	/* Map the data pointer to a DMA-able address */
8727 	pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
8728 
8729 	if (PHYSADDRISZERO(pa)) {
8730 		DHD_ERROR(("%s: Something really bad, unless 0 is "
8731 			"a valid phyaddr for pa\n", __FUNCTION__));
8732 		ASSERT(0);
8733 		/* XXX if ASSERT() doesn't work like as Android platform,
8734 		 * try to requeue the packet to the backup queue.
8735 		 */
8736 		goto err_rollback_idx;
8737 	}
8738 
8739 #ifdef DMAMAP_STATS
8740 	dhd->dma_stats.txdata++;
8741 	dhd->dma_stats.txdata_sz += pktlen;
8742 #endif /* DMAMAP_STATS */
8743 	/* No need to lock. Save the rest of the packet's metadata */
8744 	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
8745 	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
8746 
8747 #ifdef TXP_FLUSH_NITEMS
8748 	if (ring->pend_items_count == 0)
8749 		ring->start_addr = (void *)txdesc;
8750 	ring->pend_items_count++;
8751 #endif
8752 #ifdef DHD_HMAPTEST
8753 	if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) {
8754 		/* scratch area */
8755 		dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va
8756 			+ dhd->prot->hmaptest.offset;
8757 		/* replace pa with our pa for txbuf post only */
8758 		dhd->prot->hmap_tx_buf_len = pktlen;
8759 		if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) >
8760 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
8761 			DHD_ERROR(("hmaptest: ERROR Txpost outside HMAPTEST buffer\n"));
8762 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
8763 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
8764 			dhd->prot->hmaptest.in_progress = FALSE;
8765 		} else {
8766 			/* copy pktdata to our va */
8767 			memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen);
8768 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va,
8769 				dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0);
8770 
8771 			dhd->prot->hmap_tx_buf_pa = pa;
8772 			/* store pktid for later mapping in txcpl */
8773 			dhd->prot->hmaptest_tx_pktid = pktid;
8774 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED;
8775 			DHD_ERROR(("hmaptest: d11read txpost scratch txbuf pktid=0x%08x\n", pktid));
8776 			DHD_ERROR(("hmaptest: d11read txpost txbuf va=0x%p pa.lo=0x%08x len=%d\n",
8777 				dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen));
8778 		}
8779 	}
8780 #endif /* DHD_HMAPTEST */
8781 
8782 #ifdef DHD_PKTTS
8783 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
8784 	if (dhd_get_pktts_enab(dhd) &&
8785 		dhd->pkt_metadata_buflen) {
8786 		/* Allocate memory for Meta data */
8787 		meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len,
8788 			DMA_ALIGN_LEN, &meta_data_buf._alloced,
8789 			&meta_data_buf.pa, &meta_data_buf.dmah);
8790 
8791 		if (meta_data_buf.va == NULL) {
8792 			DHD_ERROR_RLMT(("%s: dhd_dma_buf_alloc failed \r\n", __FUNCTION__));
8793 			DHD_ERROR_RLMT((" ... Proceeding without metadata buffer \r\n"));
8794 		} else {
8795 			DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map,
8796 				(void *)meta_data_buf.va,
8797 				meta_data_buf.pa,
8798 				(uint16)meta_data_buf._alloced,
8799 				meta_data_buf.dmah,
8800 				pktid);
8801 		}
8802 		memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
8803 		DHD_TRACE(("Meta data Buffer VA: %p  PA: %llx dmah: %p\r\n",
8804 			meta_data_buf.va, addr, meta_data_buf.dmah));
8805 
8806 		txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF);
8807 		txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF);
8808 		txdesc->metadata_buf_len = meta_data_buf_len;
8809 	}
8810 #endif /* DHD_PKTTS */
8811 
8812 	/* Form the Tx descriptor message buffer */
8813 
8814 	/* Common message hdr */
8815 	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
8816 	txdesc->cmn_hdr.if_id = ifidx;
8817 	txdesc->cmn_hdr.flags = ring->current_phase;
8818 
8819 	txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3;
8820 	prio = (uint8)PKTPRIO(PKTBUF);
8821 
8822 #ifdef EXT_STA
8823 	txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK <<
8824 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8825 	txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) &
8826 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK)
8827 		<< BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8828 #endif
8829 
8830 	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
8831 	txdesc->seg_cnt = 1;
8832 
8833 	txdesc->data_len = htol16((uint16) pktlen);
8834 	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
8835 	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
8836 
8837 	if (!host_sfh_llc_reqd)
8838 	{
8839 		/* Move data pointer to keep ether header in local PKTBUF for later reference */
8840 		PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8841 	}
8842 
8843 	txdesc->ext_flags = 0;
8844 
8845 #ifdef DHD_TIMESYNC
8846 	txdesc->rate = 0;
8847 
8848 	if (!llc_inserted && dhd->prot->tx_ts_log_enabled) {
8849 		dhd_pkt_parse_t parse;
8850 
8851 		dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse);
8852 
8853 		if (parse.proto == IP_PROT_ICMP) {
8854 			if (dhd->prot->no_retry)
8855 				txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY;
8856 			if (dhd->prot->no_aggr)
8857 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR;
8858 			if (dhd->prot->fixed_rate)
8859 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8860 		}
8861 	}
8862 #endif /* DHD_TIMESYNC */
8863 
8864 #ifdef DHD_SBN
8865 	if (dhd_udr) {
8866 		txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8867 	}
8868 #endif /* DHD_SBN */
8869 
8870 #ifdef DHD_TX_PROFILE
8871 	if (!llc_inserted &&
8872 		dhd->tx_profile_enab && dhd->num_profiles > 0)
8873 	{
8874 		uint8 offset;
8875 
8876 		for (offset = 0; offset < dhd->num_profiles; offset++) {
8877 			if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF),
8878 				PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]),
8879 				host_sfh_llc_reqd)) {
8880 				/* mask so other reserved bits are not modified. */
8881 				txdesc->rate |=
8882 					(((uint8)dhd->protocol_filters[offset].profile_index) &
8883 					BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK);
8884 
8885 				/* so we can use the rate field for our purposes */
8886 				txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE;
8887 
8888 				break;
8889 			}
8890 		}
8891 	}
8892 #endif /* defined(DHD_TX_PROFILE) */
8893 
8894 	/* Handle Tx metadata */
8895 	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
8896 	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
8897 		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
8898 		prot->tx_metadata_offset, headroom));
8899 
8900 	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
8901 		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
8902 
8903 		/* Adjust the data pointer to account for meta data in DMA_MAP */
8904 		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8905 
8906 		meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
8907 			prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
8908 
8909 		if (PHYSADDRISZERO(meta_pa)) {
8910 			/* Unmap the data pointer to a DMA-able address */
8911 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
8912 #ifdef TXP_FLUSH_NITEMS
8913 			/* update pend_items_count */
8914 			ring->pend_items_count--;
8915 #endif /* TXP_FLUSH_NITEMS */
8916 
8917 			DHD_ERROR(("%s: Something really bad, unless 0 is "
8918 				"a valid phyaddr for meta_pa\n", __FUNCTION__));
8919 			ASSERT(0);
8920 			/* XXX if ASSERT() doesn't work like as Android platform,
8921 			 * try to requeue the packet to the backup queue.
8922 			 */
8923 			goto err_rollback_idx;
8924 		}
8925 
8926 		/* Adjust the data pointer back to original value */
8927 		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8928 
8929 		txdesc->metadata_buf_len = prot->tx_metadata_offset;
8930 		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
8931 		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
8932 	} else {
8933 #ifdef DHD_HP2P
8934 		if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8935 			dhd_update_hp2p_txdesc(dhd, txdesc);
8936 		} else
8937 #endif /* DHD_HP2P */
8938 #ifdef DHD_PKTTS
8939 		if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) {
8940 #else
8941 		if (1) {
8942 #endif /* DHD_PKTTS */
8943 			txdesc->metadata_buf_len = htol16(0);
8944 			txdesc->metadata_buf_addr.high_addr = 0;
8945 			txdesc->metadata_buf_addr.low_addr = 0;
8946 		}
8947 	}
8948 
8949 #ifdef AGG_H2D_DB
8950 	OSL_ATOMIC_INC(dhd->osh, &ring->inflight);
8951 #endif /* AGG_H2D_DB */
8952 
8953 #ifdef DHD_PKTID_AUDIT_RING
8954 	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
8955 #endif /* DHD_PKTID_AUDIT_RING */
8956 
8957 	txdesc->cmn_hdr.request_id = htol32(pktid);
8958 
8959 	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
8960 		txdesc->cmn_hdr.request_id));
8961 
8962 #ifdef DHD_LBUF_AUDIT
8963 	PKTAUDIT(dhd->osh, PKTBUF);
8964 #endif
8965 
8966 	/* Update the write pointer in TCM & ring bell */
8967 #if defined(TXP_FLUSH_NITEMS)
8968 #if defined(DHD_HP2P)
8969 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8970 		dhd_calc_hp2p_burst(dhd, ring, flowid);
8971 	} else
8972 #endif /* HP2P */
8973 	{
8974 		if ((ring->pend_items_count == prot->txp_threshold) ||
8975 				((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
8976 #ifdef AGG_H2D_DB
8977 			if (agg_h2d_db_enab) {
8978 				dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
8979 				if ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring)) {
8980 					dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, TRUE);
8981 				}
8982 			} else
8983 #endif /* AGG_H2D_DB */
8984 			{
8985 				dhd_prot_txdata_write_flush(dhd, flowid);
8986 			}
8987 
8988 		}
8989 	}
8990 #else
8991 	/* update ring's WR index and ring doorbell to dongle */
8992 	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
8993 #endif /* TXP_FLUSH_NITEMS */
8994 
8995 #ifdef TX_STATUS_LATENCY_STATS
8996 	/* set the time when pkt is queued to flowring */
8997 	DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
8998 #elif defined(DHD_PKTTS)
8999 	if (dhd_get_pktts_enab(dhd) == TRUE) {
9000 		/* set the time when pkt is queued to flowring */
9001 		DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
9002 	}
9003 #endif /* TX_STATUS_LATENCY_STATS */
9004 
9005 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9006 
9007 	OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
9008 
9009 	/*
9010 	 * Take a wake lock, do not sleep if we have atleast one packet
9011 	 * to finish.
9012 	 */
9013 	DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
9014 
9015 #ifdef PCIE_INB_DW
9016 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9017 #endif
9018 #ifdef TX_STATUS_LATENCY_STATS
9019 	flow_ring_node->flow_info.num_tx_pkts++;
9020 #endif /* TX_STATUS_LATENCY_STATS */
9021 	return BCME_OK;
9022 
9023 err_rollback_idx:
9024 	/* roll back write pointer for unprocessed message */
9025 	if (ring->wr == 0) {
9026 		ring->wr = ring->max_items - 1;
9027 	} else {
9028 		ring->wr--;
9029 		if (ring->wr == 0) {
9030 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
9031 			ring->current_phase = ring->current_phase ?
9032 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
9033 		}
9034 	}
9035 
9036 err_free_pktid:
9037 #if defined(DHD_PCIE_PKTID)
9038 	{
9039 		void *dmah;
9040 		void *secdma;
9041 		/* Free up the PKTID. physaddr and pktlen will be garbage. */
9042 		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
9043 			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
9044 	}
9045 
9046 err_no_res_pktfree:
9047 #endif /* DHD_PCIE_PKTID */
9048 
9049 #if defined(BCMINTERNAL) && defined(LINUX)
9050 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
9051 		PKTCFREE(dhd->osh, PKTBUF, FALSE);
9052 #endif	/* BCMINTERNAL && LINUX */
9053 
9054 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9055 
9056 fail:
9057 #ifdef PCIE_INB_DW
9058 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9059 #endif
9060 	return BCME_NORESOURCE;
9061 } /* dhd_prot_txdata */
9062 
9063 #ifdef AGG_H2D_DB
9064 static void
9065 dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid)
9066 {
9067 	flow_ring_table_t *flow_ring_table;
9068 	flow_ring_node_t *flow_ring_node;
9069 	msgbuf_ring_t *ring;
9070 
9071 	if (dhd->flow_ring_table == NULL) {
9072 		return;
9073 	}
9074 
9075 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9076 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9077 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9078 
9079 	if (ring->pend_items_count) {
9080 		dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr,
9081 				ring->pend_items_count);
9082 		ring->pend_items_count = 0;
9083 		ring->start_addr = NULL;
9084 	}
9085 
9086 }
9087 #endif /* AGG_H2D_DB */
9088 
9089 /* called with a ring_lock */
9090 /** optimization to write "n" tx items at a time to ring */
9091 void
9092 BCMFASTPATH(dhd_prot_txdata_write_flush)(dhd_pub_t *dhd, uint16 flowid)
9093 {
9094 #ifdef TXP_FLUSH_NITEMS
9095 	flow_ring_table_t *flow_ring_table;
9096 	flow_ring_node_t *flow_ring_node;
9097 	msgbuf_ring_t *ring;
9098 
9099 	if (dhd->flow_ring_table == NULL) {
9100 		return;
9101 	}
9102 
9103 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9104 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9105 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9106 
9107 	if (ring->pend_items_count) {
9108 		/* update ring's WR index and ring doorbell to dongle */
9109 		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
9110 			ring->pend_items_count);
9111 		ring->pend_items_count = 0;
9112 		ring->start_addr = NULL;
9113 		dhd->prot->tx_h2d_db_cnt++;
9114 	}
9115 #endif /* TXP_FLUSH_NITEMS */
9116 }
9117 
9118 #undef PKTBUF	/* Only defined in the above routine */
9119 
9120 int
9121 BCMFASTPATH(dhd_prot_hdrpull)(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
9122 {
9123 	return 0;
9124 }
9125 
9126 /** post a set of receive buffers to the dongle */
9127 static void
9128 BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid,
9129 	uint32 rxcnt)
9130 /* XXX function name could be more descriptive, eg dhd_prot_post_rxbufs */
9131 {
9132 	dhd_prot_t *prot = dhd->prot;
9133 
9134 	if (prot->rxbufpost >= rxcnt) {
9135 		prot->rxbufpost -= (uint16)rxcnt;
9136 	} else {
9137 		/* XXX: I have seen this assert hitting.
9138 		 * Will be removed once rootcaused.
9139 		 */
9140 		/* ASSERT(0); */
9141 		prot->rxbufpost = 0;
9142 	}
9143 
9144 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
9145 		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
9146 	} else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
9147 		/* Ring DoorBell after processing the rx packets,
9148 		 * so that dongle will sync the DMA indices.
9149 		 */
9150 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
9151 	}
9152 
9153 	return;
9154 }
9155 
9156 #ifdef DHD_HMAPTEST
9157 
9158 static void
9159 dhd_msgbuf_hmaptest_cmplt(dhd_pub_t *dhd)
9160 {
9161 	dhd_prot_t *prot = dhd->prot;
9162 	uint64 end_usec;
9163 	char *readbuf;
9164 	uint32 len = dhd->prot->hmaptest.len;
9165 	uint32 i;
9166 
9167 	end_usec = OSL_SYSUPTIME_US();
9168 	end_usec -= prot->hmaptest.start_usec;
9169 	DHD_ERROR(("hmaptest cmplt: %d bytes in %llu usec, %u kBps\n",
9170 		len, end_usec, (len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
9171 
9172 	prot->hmaptest.in_progress = FALSE;
9173 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9174 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9175 	} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9176 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9177 	} else {
9178 		return;
9179 	}
9180 	readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset;
9181 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9182 		dhd->prot->hmaptest.mem.len);
9183 	if (prot->hmaptest.is_write) {
9184 		DHD_ERROR(("hmaptest cmplt: FW has written at 0x%p\n", readbuf));
9185 		DHD_ERROR(("hmaptest cmplt: pattern = \n"));
9186 		len = ALIGN_SIZE(len, (sizeof(int32)));
9187 		for (i = 0; i < len; i += (sizeof(int32))) {
9188 			DHD_ERROR(("0x%08x\n", *(int *)(readbuf + i)));
9189 		}
9190 		DHD_ERROR(("\n\n"));
9191 	}
9192 
9193 }
9194 /* program HMAPTEST window and window config registers
9195  * Reference for HMAP implementation in OS's that can easily leverage it
9196  * this function can be used as reference for programming HMAP windows
9197  * the function to program HMAP windows and enable it
9198  * can be called at init time or hmap iovar
9199  */
9200 static void
9201 dhdmsgbuf_set_hmaptest_windows(dhd_pub_t *dhd)
9202 {
9203 	uint32 nwindows = 0;
9204 	uint32 scratch_len;
9205 	uint64 scratch_lin, w1_start;
9206 	dmaaddr_t scratch_pa;
9207 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9208 	dhd_prot_t *prot = dhd->prot;
9209 	uint corerev = dhd->bus->sih->buscorerev;
9210 
9211 	scratch_pa = prot->hmaptest.mem.pa;
9212 	scratch_len = prot->hmaptest.mem.len;
9213 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9214 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9215 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9216 	/* windows are 4kb aligned and window length is 512 byte aligned
9217 	 * window start ends with 0x1000 and window length ends with 0xe00
9218 	 * make the sandbox buffer 4kb aligned and size also 4kb aligned for hmap test
9219 	 * window0 = 0 - sandbox_start
9220 	 * window1 = sandbox_end + 1 - 0xffffffff
9221 	 * window2 = 0x100000000 - 0x1fffffe00
9222 	 * window 3 is programmed only for valid test cases
9223 	 * window3 = sandbox_start - sandbox_end
9224 	 */
9225 	w1_start  = scratch_lin +  scratch_len;
9226 		DHD_ERROR(("hmaptest: window 0 offset lower=0x%p upper=0x%p length=0x%p\n",
9227 		&(hmapwindows[0].baseaddr_lo), &(hmapwindows[0].baseaddr_hi),
9228 		&(hmapwindows[0].windowlength)));
9229 	DHD_ERROR(("hmaptest: window 1 offset lower=0x%p upper=0x%p length=0x%p\n",
9230 		&(hmapwindows[1].baseaddr_lo), &(hmapwindows[1].baseaddr_hi),
9231 		&(hmapwindows[1].windowlength)));
9232 	DHD_ERROR(("hmaptest: window 2 offset lower=0x%p upper=0x%p length=0x%p\n",
9233 		&(hmapwindows[2].baseaddr_lo), &(hmapwindows[2].baseaddr_hi),
9234 			&(hmapwindows[2].windowlength)));
9235 	DHD_ERROR(("hmaptest: window 3 offset lower=0x%p upper=0x%p length=0x%p\n",
9236 		&(hmapwindows[3].baseaddr_lo), &(hmapwindows[3].baseaddr_hi),
9237 		&(hmapwindows[3].windowlength)));
9238 		DHD_ERROR(("hmaptest: w0 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9239 			0, 0, (uint64) scratch_lin));
9240 		DHD_ERROR(("hmaptest: w1 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9241 			(uint32)(w1_start & 0xffffffff),
9242 			(uint32)((w1_start >> 32) & 0xffffffff),
9243 			(uint64)(0x100000000 - w1_start)));
9244 		DHD_ERROR(("hmaptest: w2 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9245 			0, 1, (uint64)0xfffffe00));
9246 		/* setting window0 */
9247 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9248 			(uintptr_t)(&(hmapwindows[0].baseaddr_lo)), ~0, 0x0);
9249 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9250 			(uintptr_t)(&(hmapwindows[0].baseaddr_hi)), ~0, 0x0);
9251 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9252 			(uintptr_t)(&(hmapwindows[0].windowlength)), ~0,
9253 			(uint64)scratch_lin);
9254 		/* setting window1 */
9255 		w1_start  = scratch_lin +  scratch_len;
9256 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9257 			(uintptr_t)(&(hmapwindows[1].baseaddr_lo)), ~0,
9258 			(uint32)(w1_start & 0xffffffff));
9259 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9260 			(uintptr_t)(&(hmapwindows[1].baseaddr_hi)), ~0,
9261 			(uint32)((w1_start >> 32) & 0xffffffff));
9262 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9263 			(uintptr_t)(&(hmapwindows[1].windowlength)), ~0,
9264 			(0x100000000 - w1_start));
9265 		/* setting window2 */
9266 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9267 			(uintptr_t)(&(hmapwindows[2].baseaddr_lo)), ~0, 0x0);
9268 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9269 			(uintptr_t)(&(hmapwindows[2].baseaddr_hi)), ~0, 0x1);
9270 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9271 			(uintptr_t)(&(hmapwindows[2].windowlength)), ~0, 0xfffffe00);
9272 		nwindows = 3;
9273 		/* program only windows 0-2 with section1 +section2 */
9274 		/* setting window config */
9275 		/* set bit 8:15 in windowconfig to enable n windows in order */
9276 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9277 			(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, (nwindows << 8));
9278 }
9279 
9280 /* stop HMAPTEST does not check corerev
9281  * caller has to ensure corerev check
9282  */
9283 int
9284 dhdmsgbuf_hmaptest_stop(dhd_pub_t *dhd)
9285 {
9286 	uint32 window_config, nwindows, i;
9287 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9288 	uint corerev = dhd->bus->sih->buscorerev;
9289 
9290 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9291 	dhd->prot->hmaptest.in_progress = FALSE;
9292 
9293 	/* Reference for HMAP Implementation
9294 	 * Disable HMAP windows.
9295 	 * As windows were programmed in bus:hmap set call
9296 	 * disabling in hmaptest_stop.
9297 	 */
9298 	DHD_ERROR(("hmap: disable hmap windows\n"));
9299 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9300 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9301 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9302 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9303 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, 0);
9304 	/* clear all windows */
9305 	for (i = 0; i < nwindows; i++) {
9306 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9307 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), ~0, 0);
9308 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9309 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), ~0, 0);
9310 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9311 			(uintptr_t)(&(hmapwindows[i].windowlength)), ~0, 0);
9312 	}
9313 
9314 	return BCME_OK;
9315 }
9316 
9317 /* HMAP iovar intercept process */
9318 int
9319 dhdmsgbuf_hmap(dhd_pub_t *dhd, pcie_hmap_t *hmap_params, bool set)
9320 {
9321 
9322 	uint32 scratch_len;
9323 	uint64 scratch_lin, w1_start;
9324 	dmaaddr_t scratch_pa;
9325 	uint32 addr_lo, addr_hi, window_length, window_config, nwindows, i;
9326 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9327 
9328 	dhd_prot_t *prot = dhd->prot;
9329 	dhd_bus_t *bus = dhd->bus;
9330 	uint corerev = bus->sih->buscorerev;
9331 	scratch_pa = prot->hmaptest.mem.pa;
9332 	scratch_len = prot->hmaptest.mem.len;
9333 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9334 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9335 	w1_start  = scratch_lin +  scratch_len;
9336 	DHD_ERROR(("HMAP:  pcicorerev = %d\n", corerev));
9337 
9338 	if (corerev < 24) {
9339 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9340 		return BCME_UNSUPPORTED;
9341 	}
9342 	if (set) {
9343 		if (hmap_params->enable) {
9344 			dhdmsgbuf_set_hmaptest_windows(dhd);
9345 		} else {
9346 			dhdmsgbuf_hmaptest_stop(dhd); /* stop will clear all programmed windows */
9347 		}
9348 	}
9349 
9350 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9351 		dhd->prot->hmaptest.mem.len);
9352 
9353 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9354 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9355 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9356 	prot->hmap_enabled = nwindows ? TRUE : FALSE;
9357 
9358 	/* getting window config */
9359 	/* set bit 8:15 in windowconfig to enable n windows in order */
9360 	DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled")));
9361 	DHD_ERROR(("hmap: window config = 0x%08x\n", window_config));
9362 	DHD_ERROR(("hmap: Windows\n"));
9363 
9364 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9365 	/* getting windows */
9366 	if (nwindows > 8)
9367 		return BCME_ERROR;
9368 	for (i = 0; i < nwindows; i++) {
9369 		addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9370 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), 0, 0);
9371 		addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9372 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), 0, 0);
9373 		window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9374 			(uintptr_t)(&(hmapwindows[i].windowlength)), 0, 0);
9375 
9376 		DHD_ERROR(("hmap: window %d address lower=0x%08x upper=0x%08x length=0x%08x\n",
9377 			i, addr_lo, addr_hi, window_length));
9378 	}
9379 	addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9380 		(uint)(PCI_HMAP_VIOLATION_ADDR_U(corerev)), 0, 0);
9381 	addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9382 		(uint)(PCI_HMAP_VIOLATION_ADDR_L(corerev)), 0, 0);
9383 	window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9384 		(uint)(PCI_HMAP_VIOLATION_INFO(corerev)), 0, 0);
9385 	DHD_ERROR(("hmap: violation regs\n"));
9386 	DHD_ERROR(("hmap: violationaddr_hi =0x%08x\n", addr_hi));
9387 	DHD_ERROR(("hmap: violationaddr_lo =0x%08x\n", addr_lo));
9388 	DHD_ERROR(("hmap: violation_info   =0x%08x\n", window_length));
9389 	DHD_ERROR(("hmap: Buffer allocated for HMAPTEST Start=0x%0llx len =0x%08x End =0x%0llx\n",
9390 		(uint64) scratch_lin, scratch_len, (uint64) w1_start));
9391 
9392 	return BCME_OK;
9393 }
9394 
9395 /* hmaptest iovar process
9396  * This iovar triggers HMAPTEST with given params
9397  * on chips that have HMAP
9398  * DHD programs hmap window registers with host addresses here.
9399  */
9400 int
9401 dhdmsgbuf_hmaptest(dhd_pub_t *dhd, pcie_hmaptest_t *hmaptest_params)
9402 {
9403 
9404 	dhd_prot_t *prot = dhd->prot;
9405 	int ret = BCME_OK;
9406 	uint32 offset = 0;
9407 	uint64 scratch_lin;
9408 	dhd_bus_t *bus = dhd->bus;
9409 	uint corerev = bus->sih->buscorerev;
9410 
9411 	if (prot->hmaptest.in_progress) {
9412 		DHD_ERROR(("HMAPTEST already running. Try again.\n"));
9413 		return BCME_BUSY;
9414 	}
9415 
9416 	prot->hmaptest.in_progress = TRUE;
9417 
9418 	if (corerev < 24) {
9419 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9420 		return BCME_UNSUPPORTED;
9421 	}
9422 	prot->hmaptest.accesstype = hmaptest_params->accesstype;
9423 	prot->hmaptest.is_write = hmaptest_params->is_write;
9424 	prot->hmaptest.len = hmaptest_params->xfer_len;
9425 	prot->hmaptest.offset = hmaptest_params->host_offset;
9426 	offset = prot->hmaptest.offset;
9427 
9428 	DHD_ERROR(("hmaptest: is_write =%d accesstype=%d offset =%d len=%d value=0x%08x\n",
9429 		prot->hmaptest.is_write, prot->hmaptest.accesstype,
9430 		offset, prot->hmaptest.len, hmaptest_params->value));
9431 
9432 	DHD_ERROR(("hmaptest  dma_lo=0x%08x hi=0x%08x pa\n",
9433 		(uint32)PHYSADDRLO(prot->hmaptest.mem.pa),
9434 		(uint32)PHYSADDRHI(prot->hmaptest.mem.pa)));
9435 
9436 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9437 		if (prot->hmaptest.is_write) {
9438 			/* if d11 is writing then post rxbuf from scratch area */
9439 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE;
9440 		} else {
9441 			/* if d11 is reading then post txbuf from scratch area */
9442 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE;
9443 		}
9444 
9445 	} else {
9446 		uint32 pattern = 0xdeadbeef;
9447 		uint32 i;
9448 		uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ));
9449 		char *fillbuf = (char *)dhd->prot->hmaptest.mem.va
9450 			+ offset;
9451 		if ((fillbuf + maxbuflen) >
9452 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
9453 			DHD_ERROR(("hmaptest: M2m/ARM ERROR offset + len outside buffer\n"));
9454 			dhd->prot->hmaptest.in_progress = FALSE;
9455 			return BCME_BADARG;
9456 		}
9457 
9458 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9459 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9460 		} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9461 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9462 		} else {
9463 			prot->hmaptest.in_progress = FALSE;
9464 			DHD_ERROR(("hmaptest: accesstype error\n"));
9465 			return BCME_BADARG;
9466 		}
9467 
9468 		/* fill a pattern at offset */
9469 		maxbuflen = ALIGN_SIZE(maxbuflen, (sizeof(uint32)));
9470 		memset(fillbuf, 0, maxbuflen);
9471 		DHD_ERROR(("hmaptest: dhd write pattern at addr=0x%p\n",
9472 			fillbuf));
9473 		DHD_ERROR(("pattern = %08x, %u times",
9474 			pattern, (uint32)(maxbuflen / sizeof(uint32))));
9475 		for (i = 0; i < maxbuflen; i += sizeof(uint32)) {
9476 			*(uint32 *)(fillbuf + i) = pattern;
9477 		}
9478 		OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9479 			dhd->prot->hmaptest.mem.len);
9480 		DHD_ERROR(("\n\n"));
9481 
9482 	}
9483 
9484 	/*
9485 	 * Do not calculate address from scratch buffer + offset,
9486 	 * if user supplied absolute address
9487 	 */
9488 	if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) {
9489 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9490 			DHD_ERROR(("hmaptest: accesstype D11 does not support absolute addr\n"));
9491 			return BCME_UNSUPPORTED;
9492 		}
9493 	} else {
9494 		scratch_lin  = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff)
9495 			| (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32);
9496 		scratch_lin += offset;
9497 		hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff));
9498 		hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff));
9499 	}
9500 
9501 	DHD_INFO(("HMAPTEST Started...\n"));
9502 	prot->hmaptest.start_usec = OSL_SYSUPTIME_US();
9503 	return ret;
9504 
9505 }
9506 
9507 #endif /* DHD_HMAPTEST */
9508 
9509 /* called before an ioctl is sent to the dongle */
9510 static void
9511 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
9512 {
9513 	dhd_prot_t *prot = dhd->prot;
9514 	int slen = 0;
9515 
9516 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
9517 		pcie_bus_tput_params_t *tput_params;
9518 
9519 		slen = strlen("pcie_bus_tput") + 1;
9520 		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
9521 		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
9522 			sizeof(tput_params->host_buf_addr));
9523 		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
9524 	}
9525 
9526 #ifdef DHD_HMAPTEST
9527 	if (buf != NULL && !strcmp(buf, "bus:hmap")) {
9528 		pcie_hmap_t *hmap_params;
9529 		slen = strlen("bus:hmap") + 1;
9530 		hmap_params = (pcie_hmap_t*)((char *)buf + slen);
9531 		dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR));
9532 	}
9533 
9534 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9535 		pcie_hmaptest_t *hmaptest_params;
9536 
9537 		slen = strlen("bus:hmaptest") + 1;
9538 		hmaptest_params = (pcie_hmaptest_t*)((char *)buf + slen);
9539 		dhdmsgbuf_hmaptest(dhd, hmaptest_params);
9540 	}
9541 #endif /* DHD_HMAPTEST */
9542 }
9543 
9544 /* called after an ioctl returns from dongle */
9545 static void
9546 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
9547 	int ifidx, int ret, int len)
9548 {
9549 
9550 #ifdef DHD_HMAPTEST
9551 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9552 		dhd_msgbuf_hmaptest_cmplt(dhd);
9553 	}
9554 #endif /* DHD_HMAPTEST */
9555 
9556 	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
9557 		int slen;
9558 		/* Intercept the wme_dp ioctl here */
9559 		if (!strcmp(buf, "wme_dp")) {
9560 			int val = 0;
9561 			slen = strlen("wme_dp") + 1;
9562 			if (len >= (int)(slen + sizeof(int)))
9563 				bcopy(((char *)buf + slen), &val, sizeof(int));
9564 			dhd->wme_dp = (uint8) ltoh32(val);
9565 		}
9566 
9567 #ifdef DHD_AWDL
9568 		/* Intercept the awdl_peer_op ioctl here */
9569 		if (!strcmp(buf, "awdl_peer_op")) {
9570 			slen = strlen("awdl_peer_op") + 1;
9571 			dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen);
9572 		}
9573 		/* Intercept the awdl ioctl here, delete flow rings if awdl is
9574 		 * disabled
9575 		 */
9576 		if (!strcmp(buf, "awdl")) {
9577 			int val = 0;
9578 			slen = strlen("awdl") + 1;
9579 			if (len >= (int)(slen + sizeof(int))) {
9580 				bcopy(((char *)buf + slen), &val, sizeof(int));
9581 				val = ltoh32(val);
9582 				if (val == TRUE) {
9583 					/**
9584 					 * Though we are updating the link status when we recieve
9585 					 * WLC_E_LINK from dongle, it is not gaurenteed always.
9586 					 * So intercepting the awdl command fired from app to
9587 					 * update the status.
9588 					 */
9589 					dhd_update_interface_link_status(dhd, (uint8)ifidx, TRUE);
9590 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
9591 					/* reset AWDL stats data structures when AWDL is enabled */
9592 					dhd_clear_awdl_stats(dhd);
9593 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
9594 				} else if (val == FALSE) {
9595 					dhd_update_interface_link_status(dhd, (uint8)ifidx, FALSE);
9596 					dhd_del_all_sta(dhd, (uint8)ifidx);
9597 					dhd_awdl_peer_op(dhd, (uint8)ifidx, NULL, 0);
9598 
9599 				}
9600 			}
9601 
9602 		}
9603 
9604 		/* store the awdl min extension count and presence mode values
9605 		 * set by the user, same will be inserted in the LLC header for
9606 		 * each tx packet on the awdl iface
9607 		*/
9608 		slen = strlen("awdl_extcounts");
9609 		if (!strncmp(buf, "awdl_extcounts", slen)) {
9610 			awdl_extcount_t *extcnt = NULL;
9611 			slen = slen + 1;
9612 			if ((len - slen) >= sizeof(*extcnt)) {
9613 				extcnt = (awdl_extcount_t *)((char *)buf + slen);
9614 				dhd->awdl_minext = extcnt->minExt;
9615 			}
9616 		}
9617 
9618 		slen = strlen("awdl_presencemode");
9619 		if (!strncmp(buf, "awdl_presencemode", slen)) {
9620 			slen = slen + 1;
9621 			if ((len - slen) >= sizeof(uint8)) {
9622 				dhd->awdl_presmode = *((uint8 *)((char *)buf + slen));
9623 			}
9624 		}
9625 #endif /* DHD_AWDL */
9626 	}
9627 
9628 }
9629 
9630 #ifdef DHD_PM_CONTROL_FROM_FILE
9631 extern bool g_pm_control;
9632 #endif /* DHD_PM_CONTROL_FROM_FILE */
9633 
9634 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
9635 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
9636 {
9637 	int ret = -1;
9638 	uint8 action;
9639 
9640 	if (dhd->bus->is_linkdown) {
9641 		DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
9642 		goto done;
9643 	}
9644 
9645 	if (dhd_query_bus_erros(dhd)) {
9646 		DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
9647 		goto done;
9648 	}
9649 
9650 	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
9651 		DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
9652 			" bus state: %d, sent hang: %d\n", __FUNCTION__,
9653 			dhd->busstate, dhd->hang_was_sent));
9654 		goto done;
9655 	}
9656 
9657 	if (dhd->busstate == DHD_BUS_SUSPEND) {
9658 		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
9659 		goto done;
9660 	}
9661 
9662 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9663 
9664 #ifdef DHD_PCIE_REG_ACCESS
9665 #ifdef BOARD_HIKEY
9666 #ifndef PCIE_LNK_SPEED_GEN1
9667 #define PCIE_LNK_SPEED_GEN1		0x1
9668 #endif
9669 	/* BUG_ON if link speed is GEN1 in Hikey for 4389B0 */
9670 	if (dhd->bus->sih->buscorerev == 72) {
9671 		if (dhd_get_pcie_linkspeed(dhd) == PCIE_LNK_SPEED_GEN1) {
9672 			DHD_ERROR(("%s: ******* Link Speed is GEN1 *********\n", __FUNCTION__));
9673 			BUG_ON(1);
9674 		}
9675 	}
9676 #endif /* BOARD_HIKEY */
9677 #endif /* DHD_PCIE_REG_ACCESS */
9678 
9679 	if (ioc->cmd == WLC_SET_PM) {
9680 #ifdef DHD_PM_CONTROL_FROM_FILE
9681 		if (g_pm_control == TRUE) {
9682 			DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
9683 				__FUNCTION__, buf ? *(char *)buf : 0));
9684 			goto done;
9685 		}
9686 #endif /* DHD_PM_CONTROL_FROM_FILE */
9687 #ifdef DHD_PM_OVERRIDE
9688 		{
9689 			extern bool g_pm_override;
9690 			if (g_pm_override == TRUE) {
9691 				DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n",
9692 					__FUNCTION__, buf ? *(char *)buf : 0));
9693 				goto done;
9694 			}
9695 		}
9696 #endif /* DHD_PM_OVERRIDE */
9697 		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
9698 	}
9699 
9700 	ASSERT(len <= WLC_IOCTL_MAXLEN);
9701 
9702 	if (len > WLC_IOCTL_MAXLEN)
9703 		goto done;
9704 
9705 	action = ioc->set;
9706 
9707 	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
9708 
9709 #if defined(EXT_STA)
9710 	wl_dbglog_ioctl_add(ioc, len, NULL);
9711 #endif
9712 	if (action & WL_IOCTL_ACTION_SET) {
9713 		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9714 	} else {
9715 		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9716 		if (ret > 0)
9717 			ioc->used = ret;
9718 	}
9719 
9720 	/* Too many programs assume ioctl() returns 0 on success */
9721 	if (ret >= 0) {
9722 		ret = 0;
9723 	} else {
9724 #ifndef DETAIL_DEBUG_LOG_FOR_IOCTL
9725 		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
9726 #endif /* !DETAIL_DEBUG_LOG_FOR_IOCTL */
9727 		dhd->dongle_error = ret;
9728 	}
9729 
9730 	dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
9731 
9732 done:
9733 	return ret;
9734 
9735 } /* dhd_prot_ioctl */
9736 
9737 /** test / loopback */
9738 
9739 /*
9740  * XXX: This will fail with new PCIe Split header Full Dongle using fixed
9741  * sized messages in control submission ring. We seem to be sending the lpbk
9742  * data via the control message, wherein the lpbk data may be larger than 1
9743  * control message that is being committed.
9744  */
9745 int
9746 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
9747 {
9748 	unsigned long flags;
9749 	dhd_prot_t *prot = dhd->prot;
9750 	uint16 alloced = 0;
9751 
9752 	ioct_reqst_hdr_t *ioct_rqst;
9753 
9754 	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
9755 	uint16 msglen = len + hdrlen;
9756 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9757 
9758 	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
9759 	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
9760 
9761 #ifdef PCIE_INB_DW
9762 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
9763 		return BCME_ERROR;
9764 #endif /* PCIE_INB_DW */
9765 
9766 	DHD_RING_LOCK(ring->ring_lock, flags);
9767 
9768 	ioct_rqst = (ioct_reqst_hdr_t *)
9769 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9770 
9771 	if (ioct_rqst == NULL) {
9772 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9773 #ifdef PCIE_INB_DW
9774 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9775 #endif
9776 		return 0;
9777 	}
9778 
9779 	{
9780 		uint8 *ptr;
9781 		uint16 i;
9782 
9783 		ptr = (uint8 *)ioct_rqst; /* XXX: failure!!! */
9784 		for (i = 0; i < msglen; i++) {
9785 			ptr[i] = i % 256;
9786 		}
9787 	}
9788 
9789 	/* Common msg buf hdr */
9790 	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9791 	ring->seqnum++;
9792 
9793 	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
9794 	ioct_rqst->msg.if_id = 0;
9795 	ioct_rqst->msg.flags = ring->current_phase;
9796 
9797 	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
9798 
9799 	/* update ring's WR index and ring doorbell to dongle */
9800 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
9801 
9802 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9803 
9804 #ifdef PCIE_INB_DW
9805 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9806 #endif
9807 
9808 	return 0;
9809 }
9810 
9811 /** test / loopback */
9812 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
9813 {
9814 	if (dmaxfer == NULL)
9815 		return;
9816 
9817 	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9818 	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
9819 }
9820 
9821 /** test / loopback */
9822 int
9823 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
9824 {
9825 	dhd_prot_t *prot = dhdp->prot;
9826 	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
9827 	dmaxref_mem_map_t *dmap = NULL;
9828 
9829 	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
9830 	if (!dmap) {
9831 		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
9832 		goto mem_alloc_fail;
9833 	}
9834 	dmap->srcmem = &(dmaxfer->srcmem);
9835 	dmap->dstmem = &(dmaxfer->dstmem);
9836 
9837 	DMAXFER_FREE(dhdp, dmap);
9838 	return BCME_OK;
9839 
9840 mem_alloc_fail:
9841 	if (dmap) {
9842 		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
9843 	}
9844 	return BCME_NOMEM;
9845 } /* dhd_prepare_schedule_dmaxfer_free */
9846 
9847 /** test / loopback */
9848 void
9849 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
9850 {
9851 
9852 	dhd_dma_buf_free(dhdp, dmmap->srcmem);
9853 	dhd_dma_buf_free(dhdp, dmmap->dstmem);
9854 
9855 	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
9856 
9857 	dhdp->bus->dmaxfer_complete = TRUE;
9858 	dhd_os_dmaxfer_wake(dhdp);
9859 } /* dmaxfer_free_prev_dmaaddr */
9860 
9861 /** test / loopback */
9862 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
9863 	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
9864 {
9865 	uint i = 0, j = 0;
9866 	if (!dmaxfer)
9867 		return BCME_ERROR;
9868 
9869 	/* First free up existing buffers */
9870 	dmaxfer_free_dmaaddr(dhd, dmaxfer);
9871 
9872 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
9873 		return BCME_NOMEM;
9874 	}
9875 
9876 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
9877 		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9878 		return BCME_NOMEM;
9879 	}
9880 
9881 	dmaxfer->len = len;
9882 
9883 	/* Populate source with a pattern like below
9884 	 * 0x00000000
9885 	 * 0x01010101
9886 	 * 0x02020202
9887 	 * 0x03030303
9888 	 * 0x04040404
9889 	 * 0x05050505
9890 	 * ...
9891 	 * 0xFFFFFFFF
9892 	 */
9893 	while (i < dmaxfer->len) {
9894 		((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
9895 		i++;
9896 		if (i % 4 == 0) {
9897 			j++;
9898 		}
9899 	}
9900 
9901 	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
9902 
9903 	dmaxfer->srcdelay = srcdelay;
9904 	dmaxfer->destdelay = destdelay;
9905 
9906 	return BCME_OK;
9907 } /* dmaxfer_prepare_dmaaddr */
9908 
9909 static void
9910 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
9911 {
9912 	dhd_prot_t *prot = dhd->prot;
9913 	uint64 end_usec;
9914 	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
9915 	int buf_free_scheduled;
9916 	int err = 0;
9917 
9918 	BCM_REFERENCE(cmplt);
9919 	end_usec = OSL_SYSUPTIME_US();
9920 
9921 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9922 	/* restore interrupt poll period to the previous existing value */
9923 	dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period);
9924 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9925 
9926 	DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
9927 	prot->dmaxfer.status = cmplt->compl_hdr.status;
9928 	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9929 	if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM &&
9930 		prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM &&
9931 		prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM &&
9932 		prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) {
9933 		err = memcmp(prot->dmaxfer.srcmem.va,
9934 			prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9935 	}
9936 	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
9937 		if (err ||
9938 		        cmplt->compl_hdr.status != BCME_OK) {
9939 		        DHD_ERROR(("DMA loopback failed\n"));
9940 			/* it is observed that some times the completion
9941 			 * header status is set as OK, but the memcmp fails
9942 			 * hence always explicitly set the dmaxfer status
9943 			 * as error if this happens.
9944 			 */
9945 			prot->dmaxfer.status = BCME_ERROR;
9946 			prhex("XFER SRC: ",
9947 			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
9948 			prhex("XFER DST: ",
9949 			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9950 		}
9951 		else {
9952 			switch (prot->dmaxfer.d11_lpbk) {
9953 			case M2M_DMA_LPBK: {
9954 				DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
9955 				} break;
9956 			case D11_LPBK: {
9957 				DHD_ERROR(("DMA successful with d11 loopback\n"));
9958 				} break;
9959 			case BMC_LPBK: {
9960 				DHD_ERROR(("DMA successful with bmc loopback\n"));
9961 				} break;
9962 			case M2M_NON_DMA_LPBK: {
9963 				DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
9964 				} break;
9965 			case D11_HOST_MEM_LPBK: {
9966 				DHD_ERROR(("DMA successful d11 host mem loopback\n"));
9967 				} break;
9968 			case BMC_HOST_MEM_LPBK: {
9969 				DHD_ERROR(("DMA successful bmc host mem loopback\n"));
9970 				} break;
9971 			case M2M_WRITE_TO_RAM: {
9972 				DHD_ERROR(("DMA successful pcie m2m write to ram\n"));
9973 				} break;
9974 			case M2M_READ_FROM_RAM: {
9975 				DHD_ERROR(("DMA successful pcie m2m read from ram\n"));
9976 				prhex("XFER DST: ",
9977 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9978 				} break;
9979 			case D11_WRITE_TO_RAM: {
9980 				DHD_ERROR(("DMA successful D11 write to ram\n"));
9981 				} break;
9982 			case D11_READ_FROM_RAM: {
9983 				DHD_ERROR(("DMA successful D11 read from ram\n"));
9984 				prhex("XFER DST: ",
9985 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9986 				} break;
9987 			default: {
9988 				DHD_ERROR(("Invalid loopback option\n"));
9989 				} break;
9990 			}
9991 
9992 			if (DHD_LPBKDTDUMP_ON()) {
9993 				/* debug info print of the Tx and Rx buffers */
9994 				dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
9995 					prot->dmaxfer.len, DHD_INFO_VAL);
9996 				dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
9997 					prot->dmaxfer.len, DHD_INFO_VAL);
9998 			}
9999 		}
10000 	}
10001 
10002 	buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
10003 	end_usec -= prot->dmaxfer.start_usec;
10004 	if (end_usec) {
10005 		prot->dmaxfer.time_taken = end_usec;
10006 		DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
10007 			prot->dmaxfer.len, (unsigned long)end_usec,
10008 			(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
10009 	}
10010 	dhd->prot->dmaxfer.in_progress = FALSE;
10011 
10012 	if (buf_free_scheduled != BCME_OK) {
10013 		dhd->bus->dmaxfer_complete = TRUE;
10014 		dhd_os_dmaxfer_wake(dhd);
10015 	}
10016 }
10017 
10018 /** Test functionality.
10019  * Transfers bytes from host to dongle and to host again using DMA
10020  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
10021  * by a spinlock.
10022  */
10023 int
10024 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
10025 	uint d11_lpbk, uint core_num, uint32 mem_addr)
10026 {
10027 	unsigned long flags;
10028 	int ret = BCME_OK;
10029 	dhd_prot_t *prot = dhd->prot;
10030 	pcie_dma_xfer_params_t *dmap;
10031 	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
10032 	uint16 alloced = 0;
10033 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10034 
10035 	/* XXX: prot->dmaxfer.in_progress is not protected by lock */
10036 	if (prot->dmaxfer.in_progress) {
10037 		DHD_ERROR(("DMA is in progress...\n"));
10038 		return BCME_ERROR;
10039 	}
10040 
10041 	if (d11_lpbk >= MAX_LPBK) {
10042 		DHD_ERROR(("loopback mode should be either"
10043 			" 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
10044 		return BCME_ERROR;
10045 	}
10046 
10047 #ifdef PCIE_INB_DW
10048 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
10049 		return BCME_ERROR;
10050 	}
10051 #endif /* PCIE_INB_DW */
10052 
10053 	prot->dmaxfer.in_progress = TRUE;
10054 	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
10055 	        &prot->dmaxfer)) != BCME_OK) {
10056 		prot->dmaxfer.in_progress = FALSE;
10057 #ifdef PCIE_INB_DW
10058 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10059 #endif
10060 		return ret;
10061 	}
10062 	DHD_RING_LOCK(ring->ring_lock, flags);
10063 	dmap = (pcie_dma_xfer_params_t *)
10064 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10065 
10066 	if (dmap == NULL) {
10067 		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
10068 		prot->dmaxfer.in_progress = FALSE;
10069 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10070 #ifdef PCIE_INB_DW
10071 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10072 #endif
10073 		return BCME_NOMEM;
10074 	}
10075 
10076 	/* Common msg buf hdr */
10077 	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
10078 	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
10079 	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10080 	dmap->cmn_hdr.flags = ring->current_phase;
10081 	ring->seqnum++;
10082 
10083 	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
10084 	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
10085 	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
10086 	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
10087 	dmap->xfer_len = htol32(prot->dmaxfer.len);
10088 	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
10089 	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
10090 	prot->dmaxfer.d11_lpbk = d11_lpbk;
10091 	if (d11_lpbk == M2M_WRITE_TO_RAM) {
10092 		dmap->host_ouput_buf_addr.high = 0x0;
10093 		dmap->host_ouput_buf_addr.low = mem_addr;
10094 	} else if (d11_lpbk == M2M_READ_FROM_RAM) {
10095 		dmap->host_input_buf_addr.high = 0x0;
10096 		dmap->host_input_buf_addr.low = mem_addr;
10097 	} else if (d11_lpbk == D11_WRITE_TO_RAM) {
10098 		dmap->host_ouput_buf_addr.high = 0x0;
10099 		dmap->host_ouput_buf_addr.low = mem_addr;
10100 	} else if (d11_lpbk == D11_READ_FROM_RAM) {
10101 		dmap->host_input_buf_addr.high = 0x0;
10102 		dmap->host_input_buf_addr.low = mem_addr;
10103 	}
10104 	dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
10105 			<< PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
10106 			((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
10107 			 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
10108 	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
10109 
10110 	/* update ring's WR index and ring doorbell to dongle */
10111 	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
10112 
10113 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10114 
10115 	DHD_ERROR(("DMA loopback Started... on core[%d]\n", core_num));
10116 #ifdef PCIE_INB_DW
10117 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10118 #endif
10119 
10120 	return BCME_OK;
10121 } /* dhdmsgbuf_dmaxfer_req */
10122 
10123 int
10124 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
10125 {
10126 	dhd_prot_t *prot = dhd->prot;
10127 
10128 	if (prot->dmaxfer.in_progress)
10129 		result->status = DMA_XFER_IN_PROGRESS;
10130 	else if (prot->dmaxfer.status == 0)
10131 		result->status = DMA_XFER_SUCCESS;
10132 	else
10133 		result->status = DMA_XFER_FAILED;
10134 
10135 	result->type = prot->dmaxfer.d11_lpbk;
10136 	result->error_code = prot->dmaxfer.status;
10137 	result->num_bytes = prot->dmaxfer.len;
10138 	result->time_taken = prot->dmaxfer.time_taken;
10139 	if (prot->dmaxfer.time_taken) {
10140 		/* throughput in kBps */
10141 		result->tput =
10142 			(prot->dmaxfer.len * (1000 * 1000 / 1024)) /
10143 			(uint32)prot->dmaxfer.time_taken;
10144 	}
10145 
10146 	return BCME_OK;
10147 }
10148 
10149 /** Called in the process of submitting an ioctl to the dongle */
10150 static int
10151 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10152 {
10153 	int ret = 0;
10154 	uint copylen = 0;
10155 
10156 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10157 
10158 	if (dhd->bus->is_linkdown) {
10159 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10160 			__FUNCTION__));
10161 		return -EIO;
10162 	}
10163 
10164 	if (dhd->busstate == DHD_BUS_DOWN) {
10165 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10166 		return -EIO;
10167 	}
10168 
10169 	/* don't talk to the dongle if fw is about to be reloaded */
10170 	if (dhd->hang_was_sent) {
10171 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10172 			__FUNCTION__));
10173 		return -EIO;
10174 	}
10175 
10176 	if (cmd == WLC_GET_VAR && buf)
10177 	{
10178 		if (!len || !*(uint8 *)buf) {
10179 			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
10180 			ret = BCME_BADARG;
10181 			goto done;
10182 		}
10183 
10184 		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
10185 		copylen = MIN(len, BCME_STRLEN);
10186 
10187 		if ((len >= strlen("bcmerrorstr")) &&
10188 			(!strcmp((char *)buf, "bcmerrorstr"))) {
10189 			strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
10190 			goto done;
10191 		} else if ((len >= strlen("bcmerror")) &&
10192 			!strcmp((char *)buf, "bcmerror")) {
10193 			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
10194 			goto done;
10195 		}
10196 	}
10197 
10198 	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
10199 	    action, ifidx, cmd, len));
10200 #ifdef REPORT_FATAL_TIMEOUTS
10201 	/*
10202 	 * These timers "should" be started before sending H2D interrupt.
10203 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10204 	 * responds back immediately. From the DPC we would stop the cmd, bus
10205 	 * timers. But the process context could have switched out leading to
10206 	 * a situation where the timers are Not started yet, but are actually stopped.
10207 	 *
10208 	 * Disable preemption from the time we start the timer until we are done
10209 	 * with seding H2D interrupts.
10210 	 */
10211 	OSL_DISABLE_PREEMPTION(dhd->osh);
10212 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10213 	dhd_start_cmd_timer(dhd);
10214 	dhd_start_bus_timer(dhd);
10215 #endif /* REPORT_FATAL_TIMEOUTS */
10216 
10217 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10218 
10219 #ifdef REPORT_FATAL_TIMEOUTS
10220 	/* For some reason if we fail to ring door bell, stop the timers */
10221 	if (ret < 0) {
10222 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10223 		dhd_stop_cmd_timer(dhd);
10224 		dhd_stop_bus_timer(dhd);
10225 		OSL_ENABLE_PREEMPTION(dhd->osh);
10226 		goto done;
10227 	}
10228 	OSL_ENABLE_PREEMPTION(dhd->osh);
10229 #else
10230 	if (ret < 0) {
10231 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10232 		goto done;
10233 	}
10234 #endif /* REPORT_FATAL_TIMEOUTS */
10235 
10236 	/* wait for IOCTL completion message from dongle and get first fragment */
10237 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10238 
10239 done:
10240 	return ret;
10241 }
10242 
10243 void
10244 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
10245 {
10246 	uint32 intstatus;
10247 	dhd_prot_t *prot = dhd->prot;
10248 	dhd->rxcnt_timeout++;
10249 	dhd->rx_ctlerrs++;
10250 	DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
10251 		"trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
10252 		dhd->is_sched_error ? " due to scheduling problem" : "",
10253 		dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
10254 		prot->ioctl_state, dhd->busstate, prot->ioctl_received));
10255 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
10256 		/* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
10257 		 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
10258 		 * Customer informs that it is hard to find any clue from the
10259 		 * host memory dump since the important tasklet or workqueue information
10260 		 * is already disappered due the latency while printing out the timestamp
10261 		 * logs for debugging scan timeout issue.
10262 		 * For this reason, customer requestes us to trigger Kernel Panic rather than
10263 		 * taking a SOCRAM dump.
10264 		 */
10265 		if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
10266 			/* change g_assert_type to trigger Kernel panic */
10267 			g_assert_type = 2;
10268 			/* use ASSERT() to trigger panic */
10269 			ASSERT(0);
10270 		}
10271 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
10272 
10273 	if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
10274 			prot->curr_ioctl_cmd == WLC_GET_VAR) {
10275 		char iovbuf[32];
10276 		int dump_size = 128;
10277 		uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
10278 		memset(iovbuf, 0, sizeof(iovbuf));
10279 		strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
10280 		iovbuf[sizeof(iovbuf) - 1] = '\0';
10281 		DHD_ERROR(("Current IOVAR (%s): %s\n",
10282 			prot->curr_ioctl_cmd == WLC_SET_VAR ?
10283 			"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
10284 		DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
10285 		prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
10286 		DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
10287 	}
10288 
10289 	/* Check the PCIe link status by reading intstatus register */
10290 	intstatus = si_corereg(dhd->bus->sih,
10291 		dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10292 	if (intstatus == (uint32)-1) {
10293 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10294 		dhd->bus->is_linkdown = TRUE;
10295 	}
10296 
10297 	dhd_bus_dump_console_buffer(dhd->bus);
10298 	dhd_prot_debug_info_print(dhd);
10299 }
10300 
10301 /**
10302  * Waits for IOCTL completion message from the dongle, copies this into caller
10303  * provided parameter 'buf'.
10304  */
10305 static int
10306 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
10307 {
10308 	dhd_prot_t *prot = dhd->prot;
10309 	int timeleft;
10310 	unsigned long flags;
10311 	int ret = 0;
10312 	static uint cnt = 0;
10313 
10314 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10315 
10316 	if (dhd_query_bus_erros(dhd)) {
10317 		ret = -EIO;
10318 		goto out;
10319 	}
10320 #ifdef GDB_PROXY
10321 	/* Loop while timeout is caused by firmware stop in GDB */
10322 	{
10323 		uint32 prev_stop_count;
10324 		do {
10325 			prev_stop_count = dhd->gdb_proxy_stop_count;
10326 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10327 		} while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) ||
10328 			(dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK)));
10329 	}
10330 #else
10331 	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10332 #endif /* GDB_PROXY */
10333 
10334 #ifdef DHD_RECOVER_TIMEOUT
10335 	if (prot->ioctl_received == 0) {
10336 		uint32 intstatus = si_corereg(dhd->bus->sih,
10337 			dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10338 		int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
10339 		if ((intstatus) && (intstatus != (uint32)-1) &&
10340 			(timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
10341 			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
10342 				" host_irq_disabled=%d\n",
10343 				__FUNCTION__, intstatus, host_irq_disbled));
10344 			dhd_pcie_intr_count_dump(dhd);
10345 			dhd_print_tasklet_status(dhd);
10346 			dhd_prot_process_ctrlbuf(dhd);
10347 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10348 			/* Clear Interrupts */
10349 			dhdpcie_bus_clear_intstatus(dhd->bus);
10350 		}
10351 	}
10352 #endif /* DHD_RECOVER_TIMEOUT */
10353 
10354 	if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10355 		cnt++;
10356 		if (cnt <= dhd->conf->ctrl_resched) {
10357 			uint buscorerev = dhd->bus->sih->buscorerev;
10358 			uint32 intstatus = 0, intmask = 0;
10359 			intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
10360 			intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
10361 			if (intstatus) {
10362 				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
10363 					__FUNCTION__, cnt, intstatus, intmask));
10364 				dhd->bus->intstatus = intstatus;
10365 				dhd->bus->ipend = TRUE;
10366 				dhd->bus->dpc_sched = TRUE;
10367 				dhd_sched_dpc(dhd);
10368 				timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
10369 			}
10370 		}
10371 	} else {
10372 		cnt = 0;
10373 	}
10374 
10375 	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10376 		if (dhd->check_trap_rot) {
10377 			/* check dongle trap first */
10378 			DHD_ERROR(("Check dongle trap in the case of iovar timeout\n"));
10379 			dhd_bus_checkdied(dhd->bus, NULL, 0);
10380 
10381 			if (dhd->dongle_trap_occured) {
10382 #ifdef SUPPORT_LINKDOWN_RECOVERY
10383 #ifdef CONFIG_ARCH_MSM
10384 				dhd->bus->no_cfg_restore = 1;
10385 #endif /* CONFIG_ARCH_MSM */
10386 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10387 				ret = -EREMOTEIO;
10388 				goto out;
10389 			}
10390 		}
10391 		/* check if resumed on time out related to scheduling issue */
10392 		dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
10393 
10394 		dhd->iovar_timeout_occured = TRUE;
10395 		dhd_msgbuf_iovar_timeout_dump(dhd);
10396 
10397 #ifdef DHD_FW_COREDUMP
10398 		/* Collect socram dump */
10399 		if (dhd->memdump_enabled) {
10400 			/* collect core dump */
10401 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
10402 			dhd_bus_mem_dump(dhd);
10403 		}
10404 #endif /* DHD_FW_COREDUMP */
10405 
10406 #ifdef DHD_EFI
10407 		/*
10408 		* for ioctl timeout, recovery is triggered only for EFI case, because
10409 		* in linux, dhd daemon will itself trap the FW,
10410 		* so if recovery is triggered
10411 		* then there is a race between FLR and daemon initiated trap
10412 		*/
10413 		dhd_schedule_reset(dhd);
10414 #endif /* DHD_EFI */
10415 
10416 #ifdef SUPPORT_LINKDOWN_RECOVERY
10417 #ifdef CONFIG_ARCH_MSM
10418 		dhd->bus->no_cfg_restore = 1;
10419 #endif /* CONFIG_ARCH_MSM */
10420 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10421 		ret = -ETIMEDOUT;
10422 		goto out;
10423 	} else {
10424 		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
10425 			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
10426 				__FUNCTION__, prot->ioctl_received));
10427 			ret = -EINVAL;
10428 			goto out;
10429 		}
10430 		dhd->rxcnt_timeout = 0;
10431 		dhd->rx_ctlpkts++;
10432 		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
10433 			__FUNCTION__, prot->ioctl_resplen));
10434 	}
10435 
10436 	if (dhd->prot->ioctl_resplen > len)
10437 		dhd->prot->ioctl_resplen = (uint16)len;
10438 	if (buf)
10439 		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
10440 
10441 	ret = (int)(dhd->prot->ioctl_status);
10442 
10443 out:
10444 	DHD_GENERAL_LOCK(dhd, flags);
10445 	dhd->prot->ioctl_state = 0;
10446 	dhd->prot->ioctl_resplen = 0;
10447 	dhd->prot->ioctl_received = IOCTL_WAIT;
10448 	dhd->prot->curr_ioctl_cmd = 0;
10449 	DHD_GENERAL_UNLOCK(dhd, flags);
10450 
10451 	return ret;
10452 } /* dhd_msgbuf_wait_ioctl_cmplt */
10453 
10454 static int
10455 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10456 {
10457 	int ret = 0;
10458 
10459 	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
10460 
10461 	if (dhd->bus->is_linkdown) {
10462 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10463 			__FUNCTION__));
10464 		return -EIO;
10465 	}
10466 
10467 	if (dhd->busstate == DHD_BUS_DOWN) {
10468 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10469 		return -EIO;
10470 	}
10471 
10472 	/* don't talk to the dongle if fw is about to be reloaded */
10473 	if (dhd->hang_was_sent) {
10474 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10475 			__FUNCTION__));
10476 		return -EIO;
10477 	}
10478 
10479 	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
10480 		action, ifidx, cmd, len));
10481 
10482 #ifdef REPORT_FATAL_TIMEOUTS
10483 	/*
10484 	 * These timers "should" be started before sending H2D interrupt.
10485 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10486 	 * responds back immediately. From the DPC we would stop the cmd, bus
10487 	 * timers. But the process context could have switched out leading to
10488 	 * a situation where the timers are Not started yet, but are actually stopped.
10489 	 *
10490 	 * Disable preemption from the time we start the timer until we are done
10491 	 * with seding H2D interrupts.
10492 	 */
10493 	OSL_DISABLE_PREEMPTION(dhd->osh);
10494 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10495 	dhd_start_cmd_timer(dhd);
10496 	dhd_start_bus_timer(dhd);
10497 #endif /* REPORT_FATAL_TIMEOUTS */
10498 
10499 	/* Fill up msgbuf for ioctl req */
10500 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10501 
10502 #ifdef REPORT_FATAL_TIMEOUTS
10503 	/* For some reason if we fail to ring door bell, stop the timers */
10504 	if (ret < 0) {
10505 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10506 		dhd_stop_cmd_timer(dhd);
10507 		dhd_stop_bus_timer(dhd);
10508 		OSL_ENABLE_PREEMPTION(dhd->osh);
10509 		goto done;
10510 	}
10511 
10512 	OSL_ENABLE_PREEMPTION(dhd->osh);
10513 #else
10514 	if (ret < 0) {
10515 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10516 		goto done;
10517 	}
10518 #endif /* REPORT_FATAL_TIMEOUTS */
10519 
10520 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10521 
10522 done:
10523 	return ret;
10524 }
10525 
10526 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
10527 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
10528 {
10529 	return 0;
10530 }
10531 
10532 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
10533 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
10534                              void *params, int plen, void *arg, int len, bool set)
10535 {
10536 	return BCME_UNSUPPORTED;
10537 }
10538 
10539 #ifdef DHD_DUMP_PCIE_RINGS
10540 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
10541 	unsigned long *file_posn, bool file_write)
10542 {
10543 	dhd_prot_t *prot;
10544 	msgbuf_ring_t *ring;
10545 	int ret = 0;
10546 	uint16 h2d_flowrings_total;
10547 	uint16 flowid;
10548 
10549 	if (!(dhd) || !(dhd->prot)) {
10550 		goto exit;
10551 	}
10552 	prot = dhd->prot;
10553 
10554 	/* Below is the same ring dump sequence followed in parser as well. */
10555 	ring = &prot->h2dring_ctrl_subn;
10556 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10557 		goto exit;
10558 
10559 	ring = &prot->h2dring_rxp_subn;
10560 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10561 		goto exit;
10562 
10563 	ring = &prot->d2hring_ctrl_cpln;
10564 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10565 		goto exit;
10566 
10567 	ring = &prot->d2hring_tx_cpln;
10568 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10569 		goto exit;
10570 
10571 	ring = &prot->d2hring_rx_cpln;
10572 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10573 		goto exit;
10574 
10575 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
10576 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
10577 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
10578 			goto exit;
10579 		}
10580 	}
10581 
10582 #ifdef EWP_EDL
10583 	if (dhd->dongle_edl_support) {
10584 		ring = prot->d2hring_edl;
10585 		if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
10586 			goto exit;
10587 	}
10588 	else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
10589 #else
10590 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
10591 #endif /* EWP_EDL */
10592 	{
10593 		ring = prot->h2dring_info_subn;
10594 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10595 			goto exit;
10596 
10597 		ring = prot->d2hring_info_cpln;
10598 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10599 			goto exit;
10600 	}
10601 
10602 exit :
10603 	return ret;
10604 }
10605 
10606 /* Write to file */
10607 static
10608 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
10609 	const void *user_buf, unsigned long *file_posn)
10610 {
10611 	int ret = 0;
10612 
10613 	if (ring == NULL) {
10614 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10615 			__FUNCTION__));
10616 		return BCME_ERROR;
10617 	}
10618 	if (file) {
10619 		ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
10620 				((unsigned long)(ring->max_items) * (ring->item_len)));
10621 		if (ret < 0) {
10622 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10623 			ret = BCME_ERROR;
10624 		}
10625 	} else if (user_buf) {
10626 		ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
10627 			((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
10628 	}
10629 	return ret;
10630 }
10631 
10632 #ifdef EWP_EDL
10633 /* Write to file */
10634 static
10635 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
10636 	unsigned long *file_posn)
10637 {
10638 	int ret = 0, nitems = 0;
10639 	char *buf = NULL, *ptr = NULL;
10640 	uint8 *msg_addr = NULL;
10641 	uint16	rd = 0;
10642 
10643 	if (ring == NULL) {
10644 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10645 			__FUNCTION__));
10646 		ret = BCME_ERROR;
10647 		goto done;
10648 	}
10649 
10650 	buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10651 	if (buf == NULL) {
10652 		DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
10653 		ret = BCME_ERROR;
10654 		goto done;
10655 	}
10656 	ptr = buf;
10657 
10658 	for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
10659 		msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
10660 		memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
10661 		ptr += D2HRING_EDL_HDR_SIZE;
10662 	}
10663 	if (file) {
10664 		ret = dhd_os_write_file_posn(file, file_posn, buf,
10665 				(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
10666 		if (ret < 0) {
10667 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10668 			goto done;
10669 		}
10670 	}
10671 	else {
10672 		ret = dhd_export_debug_data(buf, NULL, user_buf,
10673 			(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
10674 	}
10675 
10676 done:
10677 	if (buf) {
10678 		MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10679 	}
10680 	return ret;
10681 }
10682 #endif /* EWP_EDL */
10683 #endif /* DHD_DUMP_PCIE_RINGS */
10684 
10685 /** Add prot dump output to a buffer */
10686 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10687 {
10688 #if defined(BCM_ROUTER_DHD)
10689 	bcm_bprintf(b, "DHD Router: 1GMAC HotBRC forwarding mode\n");
10690 #endif /* BCM_ROUTER_DHD */
10691 
10692 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
10693 		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
10694 	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
10695 		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
10696 	else
10697 		bcm_bprintf(b, "\nd2h_sync: NONE:");
10698 	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
10699 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
10700 
10701 	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
10702 		dhd->dma_h2d_ring_upd_support,
10703 		dhd->dma_d2h_ring_upd_support,
10704 		dhd->prot->rw_index_sz);
10705 	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10706 		h2d_max_txpost, dhd->prot->h2d_max_txpost);
10707 #if defined(DHD_HTPUT_TUNABLES)
10708 	bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n",
10709 		h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost);
10710 #endif /* DHD_HTPUT_TUNABLES */
10711 	bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
10712 	bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
10713 	bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
10714 	bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt);
10715 #ifdef DHD_DMA_INDICES_SEQNUM
10716 	bcm_bprintf(b, "host_seqnum %u dngl_seqnum %u\n", dhd_prot_read_seqnum(dhd, TRUE),
10717 		dhd_prot_read_seqnum(dhd, FALSE));
10718 #endif /* DHD_DMA_INDICES_SEQNUM */
10719 	bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt);
10720 #ifdef AGG_H2D_DB
10721 	bcm_bprintf(b, "agg_h2d_db_enab:%d agg_h2d_db_timeout:%d agg_h2d_db_inflight_thresh:%d\n",
10722 		agg_h2d_db_enab, agg_h2d_db_timeout, agg_h2d_db_inflight_thresh);
10723 	bcm_bprintf(b, "agg_h2d_db: timer_db_cnt:%d direct_db_cnt:%d\n",
10724 		dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt);
10725 	dhd_agg_inflight_stats_dump(dhd, b);
10726 #endif /* AGG_H2D_DB */
10727 }
10728 
10729 /* Update local copy of dongle statistics */
10730 void dhd_prot_dstats(dhd_pub_t *dhd)
10731 {
10732 	return;
10733 }
10734 
10735 /** Called by upper DHD layer */
10736 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
10737 	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
10738 {
10739 	return 0;
10740 }
10741 
10742 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
10743 int
10744 dhd_post_dummy_msg(dhd_pub_t *dhd)
10745 {
10746 	unsigned long flags;
10747 	hostevent_hdr_t *hevent = NULL;
10748 	uint16 alloced = 0;
10749 
10750 	dhd_prot_t *prot = dhd->prot;
10751 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10752 
10753 #ifdef PCIE_INB_DW
10754 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10755 		return BCME_ERROR;
10756 #endif /* PCIE_INB_DW */
10757 	DHD_RING_LOCK(ring->ring_lock, flags);
10758 
10759 	hevent = (hostevent_hdr_t *)
10760 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10761 
10762 	if (hevent == NULL) {
10763 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10764 #ifdef PCIE_INB_DW
10765 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10766 #endif
10767 		return -1;
10768 	}
10769 
10770 	/* CMN msg header */
10771 	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10772 	ring->seqnum++;
10773 	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
10774 	hevent->msg.if_id = 0;
10775 	hevent->msg.flags = ring->current_phase;
10776 
10777 	/* Event payload */
10778 	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
10779 
10780 	/* Since, we are filling the data directly into the bufptr obtained
10781 	 * from the msgbuf, we can directly call the write_complete
10782 	 */
10783 	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
10784 
10785 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10786 
10787 #ifdef PCIE_INB_DW
10788 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10789 #endif
10790 
10791 	return 0;
10792 }
10793 
10794 /**
10795  * If exactly_nitems is true, this function will allocate space for nitems or fail
10796  * If exactly_nitems is false, this function will allocate space for nitems or less
10797  */
10798 static void *
10799 BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
10800 	uint16 nitems, uint16 * alloced, bool exactly_nitems)
10801 {
10802 	void * ret_buf;
10803 
10804 	if (nitems == 0) {
10805 		DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name));
10806 		return NULL;
10807 	}
10808 
10809 	/* Alloc space for nitems in the ring */
10810 	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10811 
10812 	if (ret_buf == NULL) {
10813 		/* if alloc failed , invalidate cached read ptr */
10814 		if (dhd->dma_d2h_ring_upd_support) {
10815 			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
10816 		} else {
10817 			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
10818 #ifdef SUPPORT_LINKDOWN_RECOVERY
10819 			/* Check if ring->rd is valid */
10820 			if (ring->rd >= ring->max_items) {
10821 				DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
10822 				dhd->bus->read_shm_fail = TRUE;
10823 				return NULL;
10824 			}
10825 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10826 		}
10827 
10828 		/* Try allocating once more */
10829 		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10830 
10831 		if (ret_buf == NULL) {
10832 			DHD_INFO(("%s: Ring space not available  \n", ring->name));
10833 			return NULL;
10834 		}
10835 	}
10836 
10837 	if (ret_buf == HOST_RING_BASE(ring)) {
10838 		DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name));
10839 		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
10840 	}
10841 
10842 	/* Return alloced space */
10843 	return ret_buf;
10844 }
10845 
10846 /**
10847  * Non inline ioct request.
10848  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
10849  * Form a separate request buffer where a 4 byte cmn header is added in the front
10850  * buf contents from parent function is copied to remaining section of this buffer
10851  */
10852 static int
10853 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
10854 {
10855 	dhd_prot_t *prot = dhd->prot;
10856 	ioctl_req_msg_t *ioct_rqst;
10857 	void * ioct_buf;	/* For ioctl payload */
10858 	uint16  rqstlen, resplen;
10859 	unsigned long flags;
10860 	uint16 alloced = 0;
10861 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10862 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10863 	ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10864 	ktime_t begin_time, end_time;
10865 	s64 diff_ns;
10866 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10867 
10868 	if (dhd_query_bus_erros(dhd)) {
10869 		return -EIO;
10870 	}
10871 
10872 	rqstlen = len;
10873 	resplen = len;
10874 
10875 	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
10876 	/* 8K allocation of dongle buffer fails */
10877 	/* dhd doesnt give separate input & output buf lens */
10878 	/* so making the assumption that input length can never be more than 2k */
10879 	rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
10880 
10881 #ifdef PCIE_INB_DW
10882 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10883 		return BCME_ERROR;
10884 
10885 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10886 	preempt_disable();
10887 	begin_time = ktime_get();
10888 	R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr));
10889 	end_time = ktime_get();
10890 	preempt_enable();
10891 	diff_ns = ktime_to_ns(ktime_sub(end_time, begin_time));
10892 	/* Check if the delta is greater than 1 msec */
10893 	if (diff_ns > (1 * NSEC_PER_MSEC)) {
10894 		DHD_ERROR(("%s: found latency over 1ms (%lld ns), ds state=%d\n", __func__,
10895 		       diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus)));
10896 	}
10897 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10898 #endif /* PCIE_INB_DW */
10899 
10900 	DHD_RING_LOCK(ring->ring_lock, flags);
10901 
10902 	if (prot->ioctl_state) {
10903 		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
10904 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10905 #ifdef PCIE_INB_DW
10906 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10907 #endif
10908 		return BCME_BUSY;
10909 	} else {
10910 		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
10911 	}
10912 
10913 	/* Request for cbuf space */
10914 	ioct_rqst = (ioctl_req_msg_t*)
10915 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10916 	if (ioct_rqst == NULL) {
10917 		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
10918 		prot->ioctl_state = 0;
10919 		prot->curr_ioctl_cmd = 0;
10920 		prot->ioctl_received = IOCTL_WAIT;
10921 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10922 #ifdef PCIE_INB_DW
10923 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10924 #endif
10925 		return -1;
10926 	}
10927 
10928 	/* Common msg buf hdr */
10929 	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
10930 	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
10931 	ioct_rqst->cmn_hdr.flags = ring->current_phase;
10932 	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
10933 	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10934 	ring->seqnum++;
10935 
10936 	ioct_rqst->cmd = htol32(cmd);
10937 	prot->curr_ioctl_cmd = cmd;
10938 	ioct_rqst->output_buf_len = htol16(resplen);
10939 	prot->ioctl_trans_id++;
10940 	ioct_rqst->trans_id = prot->ioctl_trans_id;
10941 
10942 	/* populate ioctl buffer info */
10943 	ioct_rqst->input_buf_len = htol16(rqstlen);
10944 	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
10945 	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
10946 	/* copy ioct payload */
10947 	ioct_buf = (void *) prot->ioctbuf.va;
10948 
10949 	prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
10950 
10951 	if (buf)
10952 		memcpy(ioct_buf, buf, len);
10953 
10954 	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
10955 
10956 	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
10957 		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
10958 
10959 	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
10960 		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
10961 		ioct_rqst->trans_id));
10962 
10963 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
10964 	dhd_prot_ioctl_trace(dhd, ioct_rqst, buf, len);
10965 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
10966 
10967 	/* update ring's WR index and ring doorbell to dongle */
10968 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
10969 
10970 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10971 
10972 #ifdef PCIE_INB_DW
10973 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10974 #endif
10975 
10976 	return 0;
10977 } /* dhd_fillup_ioct_reqst */
10978 
10979 /**
10980  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
10981  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
10982  * information is posted to the dongle.
10983  *
10984  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
10985  * each flowring in pool of flowrings.
10986  *
10987  * returns BCME_OK=0 on success
10988  * returns non-zero negative error value on failure.
10989  */
10990 static int
10991 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
10992 	uint16 max_items, uint16 item_len, uint16 ringid)
10993 {
10994 	int dma_buf_alloced = BCME_NOMEM;
10995 	uint32 dma_buf_len;
10996 	dhd_prot_t *prot = dhd->prot;
10997 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
10998 	dhd_dma_buf_t *dma_buf = NULL;
10999 
11000 	ASSERT(ring);
11001 	ASSERT(name);
11002 	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
11003 
11004 	/* Init name */
11005 	strlcpy((char *)ring->name, name, sizeof(ring->name));
11006 
11007 	ring->idx = ringid;
11008 
11009 #if defined(DHD_HTPUT_TUNABLES)
11010 	/* Use HTPUT max items */
11011 	if (DHD_IS_FLOWRING(ringid, max_flowrings) &&
11012 		DHD_IS_FLOWID_HTPUT(dhd, DHD_RINGID_TO_FLOWID(ringid))) {
11013 		max_items = prot->h2d_htput_max_txpost;
11014 	}
11015 #endif /* DHD_HTPUT_TUNABLES */
11016 
11017 	dma_buf_len = max_items * item_len;
11018 
11019 	ring->max_items = max_items;
11020 	ring->item_len = item_len;
11021 
11022 	/* A contiguous space may be reserved for all flowrings */
11023 	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11024 		/* Carve out from the contiguous DMA-able flowring buffer */
11025 		uint16 flowid;
11026 		uint32 base_offset;
11027 		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
11028 
11029 		dma_buf = &ring->dma_buf;
11030 
11031 		flowid = DHD_RINGID_TO_FLOWID(ringid);
11032 		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
11033 
11034 		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
11035 
11036 		dma_buf->len = dma_buf_len;
11037 		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
11038 		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
11039 		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
11040 
11041 		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
11042 		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
11043 
11044 		dma_buf->dmah   = rsv_buf->dmah;
11045 		dma_buf->secdma = rsv_buf->secdma;
11046 
11047 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11048 	} else {
11049 #ifdef EWP_EDL
11050 		if (ring == dhd->prot->d2hring_edl) {
11051 			/* For EDL ring, memory is alloced during attach,
11052 			* so just need to copy the dma_buf to the ring's dma_buf
11053 			*/
11054 			memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
11055 			dma_buf = &ring->dma_buf;
11056 			if (dma_buf->va == NULL) {
11057 				return BCME_NOMEM;
11058 			}
11059 		} else
11060 #endif /* EWP_EDL */
11061 		{
11062 			/* Allocate a dhd_dma_buf */
11063 			dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
11064 			if (dma_buf_alloced != BCME_OK) {
11065 				return BCME_NOMEM;
11066 			}
11067 		}
11068 	}
11069 
11070 	/* CAUTION: Save ring::base_addr in little endian format! */
11071 	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
11072 
11073 	ring->ring_lock = osl_spin_lock_init(dhd->osh);
11074 
11075 	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
11076 		"ring start %p buf phys addr  %x:%x \n",
11077 		ring->name, ring->max_items, ring->item_len,
11078 		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
11079 		ltoh32(ring->base_addr.low_addr)));
11080 
11081 	return BCME_OK;
11082 } /* dhd_prot_ring_attach */
11083 
11084 /**
11085  * dhd_prot_ring_init - Post the common ring information to dongle.
11086  *
11087  * Used only for common rings.
11088  *
11089  * The flowrings information is passed via the create flowring control message
11090  * (tx_flowring_create_request_t) sent over the H2D control submission common
11091  * ring.
11092  */
11093 static void
11094 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11095 {
11096 	ring->wr = 0;
11097 	ring->rd = 0;
11098 	ring->curr_rd = 0;
11099 
11100 	/* CAUTION: ring::base_addr already in Little Endian */
11101 	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
11102 		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
11103 	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
11104 		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
11105 	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
11106 		sizeof(uint16), RING_ITEM_LEN, ring->idx);
11107 
11108 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11109 		sizeof(uint16), RING_WR_UPD, ring->idx);
11110 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11111 		sizeof(uint16), RING_RD_UPD, ring->idx);
11112 
11113 	/* ring inited */
11114 	ring->inited = TRUE;
11115 
11116 } /* dhd_prot_ring_init */
11117 
11118 /**
11119  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
11120  * Reset WR and RD indices to 0.
11121  */
11122 static void
11123 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11124 {
11125 	DHD_TRACE(("%s\n", __FUNCTION__));
11126 
11127 	dhd_dma_buf_reset(dhd, &ring->dma_buf);
11128 
11129 	ring->rd = ring->wr = 0;
11130 	ring->curr_rd = 0;
11131 	ring->inited = FALSE;
11132 	ring->create_pending = FALSE;
11133 }
11134 
11135 /**
11136  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
11137  * hanging off the msgbuf_ring.
11138  */
11139 static void
11140 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11141 {
11142 	dhd_prot_t *prot = dhd->prot;
11143 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11144 	ASSERT(ring);
11145 
11146 	ring->inited = FALSE;
11147 	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
11148 
11149 	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
11150 	 * memory, then simply stop using it.
11151 	 */
11152 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11153 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11154 		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
11155 	} else {
11156 #ifdef EWP_EDL
11157 		if (ring == dhd->prot->d2hring_edl) {
11158 			/* For EDL ring, do not free ring mem here,
11159 			* it is done in dhd_detach
11160 			*/
11161 			memset(&ring->dma_buf, 0, sizeof(ring->dma_buf));
11162 		} else
11163 #endif /* EWP_EDL */
11164 		{
11165 			dhd_dma_buf_free(dhd, &ring->dma_buf);
11166 		}
11167 	}
11168 
11169 	osl_spin_lock_deinit(dhd->osh, ring->ring_lock);
11170 
11171 } /* dhd_prot_ring_detach */
11172 
11173 /* Fetch number of H2D flowrings given the total number of h2d rings */
11174 uint16
11175 dhd_get_max_flow_rings(dhd_pub_t *dhd)
11176 {
11177 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
11178 		return dhd->bus->max_tx_flowrings;
11179 	else
11180 		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
11181 }
11182 
11183 /**
11184  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
11185  *
11186  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
11187  * Dongle includes common rings when it advertizes the number of H2D rings.
11188  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
11189  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
11190  *
11191  * dhd_prot_ring_attach is invoked to perform the actual initialization and
11192  * attaching the DMA-able buffer.
11193  *
11194  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
11195  * initialized msgbuf_ring_t object.
11196  *
11197  * returns BCME_OK=0 on success
11198  * returns non-zero negative error value on failure.
11199  */
11200 static int
11201 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
11202 {
11203 	uint16 flowid;
11204 	msgbuf_ring_t *ring;
11205 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11206 	dhd_prot_t *prot = dhd->prot;
11207 	char ring_name[RING_NAME_MAX_LENGTH];
11208 
11209 	if (prot->h2d_flowrings_pool != NULL)
11210 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
11211 
11212 	ASSERT(prot->h2d_rings_total == 0);
11213 
11214 	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
11215 	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
11216 
11217 	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
11218 		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
11219 			__FUNCTION__, prot->h2d_rings_total));
11220 		return BCME_ERROR;
11221 	}
11222 
11223 	/* Subtract number of H2D common rings, to determine number of flowrings */
11224 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11225 
11226 	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
11227 
11228 	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
11229 	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
11230 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11231 
11232 	if (prot->h2d_flowrings_pool == NULL) {
11233 		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
11234 			__FUNCTION__, h2d_flowrings_total));
11235 		goto fail;
11236 	}
11237 
11238 	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
11239 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11240 		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
11241 		/* For HTPUT case max_items will be changed inside dhd_prot_ring_attach */
11242 		if (dhd_prot_ring_attach(dhd, ring, ring_name,
11243 		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
11244 		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
11245 			goto attach_fail;
11246 		}
11247 	}
11248 
11249 	return BCME_OK;
11250 
11251 attach_fail:
11252 	/* XXX: On a per project basis, one may decide whether to continue with
11253 	 * "fewer" flowrings, and what value of fewer suffices.
11254 	 */
11255 	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
11256 
11257 fail:
11258 	prot->h2d_rings_total = 0;
11259 	return BCME_NOMEM;
11260 
11261 } /* dhd_prot_flowrings_pool_attach */
11262 
11263 /**
11264  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
11265  * Invokes dhd_prot_ring_reset to perform the actual reset.
11266  *
11267  * The DMA-able buffer is not freed during reset and neither is the flowring
11268  * pool freed.
11269  *
11270  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
11271  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
11272  * from a previous flowring pool instantiation will be reused.
11273  *
11274  * This will avoid a fragmented DMA-able memory condition, if multiple
11275  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
11276  * cycle.
11277  */
11278 static void
11279 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
11280 {
11281 	uint16 flowid, h2d_flowrings_total;
11282 	msgbuf_ring_t *ring;
11283 	dhd_prot_t *prot = dhd->prot;
11284 
11285 	if (prot->h2d_flowrings_pool == NULL) {
11286 		ASSERT(prot->h2d_rings_total == 0);
11287 		return;
11288 	}
11289 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11290 	/* Reset each flowring in the flowring pool */
11291 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11292 		dhd_prot_ring_reset(dhd, ring);
11293 		ring->inited = FALSE;
11294 	}
11295 
11296 	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
11297 }
11298 
11299 /**
11300  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
11301  * DMA-able buffers for flowrings.
11302  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
11303  * de-initialization of each msgbuf_ring_t.
11304  */
11305 static void
11306 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
11307 {
11308 	int flowid;
11309 	msgbuf_ring_t *ring;
11310 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11311 	dhd_prot_t *prot = dhd->prot;
11312 
11313 	if (prot->h2d_flowrings_pool == NULL) {
11314 		ASSERT(prot->h2d_rings_total == 0);
11315 		return;
11316 	}
11317 
11318 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11319 	/* Detach the DMA-able buffer for each flowring in the flowring pool */
11320 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11321 		dhd_prot_ring_detach(dhd, ring);
11322 	}
11323 
11324 	MFREE(prot->osh, prot->h2d_flowrings_pool,
11325 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11326 
11327 	prot->h2d_rings_total = 0;
11328 
11329 } /* dhd_prot_flowrings_pool_detach */
11330 
11331 /**
11332  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
11333  * msgbuf_ring from the flowring pool, and assign it.
11334  *
11335  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
11336  * ring information to the dongle, a flowring's information is passed via a
11337  * flowring create control message.
11338  *
11339  * Only the ring state (WR, RD) index are initialized.
11340  */
11341 static msgbuf_ring_t *
11342 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
11343 {
11344 	msgbuf_ring_t *ring;
11345 	dhd_prot_t *prot = dhd->prot;
11346 
11347 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11348 	ASSERT(flowid < prot->h2d_rings_total);
11349 	ASSERT(prot->h2d_flowrings_pool != NULL);
11350 
11351 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11352 
11353 	/* ASSERT flow_ring->inited == FALSE */
11354 
11355 	ring->wr = 0;
11356 	ring->rd = 0;
11357 	ring->curr_rd = 0;
11358 	ring->inited = TRUE;
11359 	/**
11360 	 * Every time a flowring starts dynamically, initialize current_phase with 0
11361 	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
11362 	 */
11363 	ring->current_phase = 0;
11364 	return ring;
11365 }
11366 
11367 /**
11368  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
11369  * msgbuf_ring back to the flow_ring pool.
11370  */
11371 void
11372 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
11373 {
11374 	msgbuf_ring_t *ring;
11375 	dhd_prot_t *prot = dhd->prot;
11376 
11377 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11378 	ASSERT(flowid < prot->h2d_rings_total);
11379 	ASSERT(prot->h2d_flowrings_pool != NULL);
11380 
11381 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11382 
11383 	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
11384 	/* ASSERT flow_ring->inited == TRUE */
11385 
11386 	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11387 
11388 	ring->wr = 0;
11389 	ring->rd = 0;
11390 	ring->inited = FALSE;
11391 
11392 	ring->curr_rd = 0;
11393 }
11394 
11395 #ifdef AGG_H2D_DB
11396 void
11397 dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flowid)
11398 {
11399 	dhd_prot_t *prot = dhd->prot;
11400 	msgbuf_ring_t *ring;
11401 	uint16 inflight;
11402 	bool db_req = FALSE;
11403 	bool flush;
11404 
11405 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11406 	flush = !!ring->pend_items_count;
11407 	dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
11408 
11409 	inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight);
11410 	if (flush && inflight) {
11411 		if (inflight <= agg_h2d_db_inflight_thresh) {
11412 			db_req = TRUE;
11413 		}
11414 		dhd_agg_inflights_stats_update(dhd, inflight);
11415 		dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, db_req);
11416 	}
11417 }
11418 #endif /* AGG_H2D_DB */
11419 
11420 /* Assumes only one index is updated at a time */
11421 /* FIXME Need to fix it */
11422 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
11423 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
11424 /* If exactly_nitems is false, this function will allocate space for nitems or less */
11425 static void *
11426 BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
11427 	bool exactly_nitems)
11428 {
11429 	void *ret_ptr = NULL;
11430 	uint16 ring_avail_cnt;
11431 
11432 	ASSERT(nitems <= ring->max_items);
11433 
11434 	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
11435 
11436 	if ((ring_avail_cnt == 0) ||
11437 	       (exactly_nitems && (ring_avail_cnt < nitems) &&
11438 	       ((ring->max_items - ring->wr) >= nitems))) {
11439 		DHD_MSGBUF_INFO(("Space not available: ring %s items %d write %d read %d\n",
11440 			ring->name, nitems, ring->wr, ring->rd));
11441 		return NULL;
11442 	}
11443 	*alloced = MIN(nitems, ring_avail_cnt);
11444 
11445 	/* Return next available space */
11446 	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
11447 
11448 	/* Update write index */
11449 	if ((ring->wr + *alloced) == ring->max_items)
11450 		ring->wr = 0;
11451 	else if ((ring->wr + *alloced) < ring->max_items)
11452 		ring->wr += *alloced;
11453 	else {
11454 		/* Should never hit this */
11455 		ASSERT(0);
11456 		return NULL;
11457 	}
11458 
11459 	return ret_ptr;
11460 } /* dhd_prot_get_ring_space */
11461 
11462 #ifdef AGG_H2D_DB
11463 
11464 static void
11465 dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11466 		uint16 nitems)
11467 {
11468 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11469 	unsigned long flags_bus;
11470 
11471 #ifdef DHD_FAKE_TX_STATUS
11472 	/* if fake tx status is enabled, we should not update
11473 	 * dongle side rd/wr index for the tx flowring
11474 	 * and also should not ring the doorbell
11475 	 */
11476 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11477 		return;
11478 	}
11479 #endif /* DHD_FAKE_TX_STATUS */
11480 
11481 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11482 
11483 	/* cache flush */
11484 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11485 
11486 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11487 			dhd_prot_dma_indx_set(dhd, ring->wr,
11488 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11489 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11490 			dhd_prot_dma_indx_set(dhd, ring->wr,
11491 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11492 	} else {
11493 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11494 				sizeof(uint16), RING_WR_UPD, ring->idx);
11495 	}
11496 
11497 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11498 }
11499 
11500 static void
11501 dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db)
11502 {
11503 	dhd_prot_t *prot = dhd->prot;
11504 	flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
11505 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
11506 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
11507 	uint32 db_index;
11508 	uint corerev;
11509 
11510 	if (ring_db == TRUE) {
11511 		dhd_msgbuf_agg_h2d_db_timer_cancel(dhd);
11512 		prot->agg_h2d_db_info.direct_db_cnt++;
11513 		/* raise h2d interrupt */
11514 		if (IDMA_ACTIVE(dhd) || (IFRM_ACTIVE(dhd))) {
11515 			db_index = IDMA_IDX0;
11516 			/* this api is called in wl down path..in that case sih is freed already */
11517 			if (dhd->bus->sih) {
11518 				corerev = dhd->bus->sih->buscorerev;
11519 				/* We need to explictly configure the type of DMA for
11520 				 * core rev >= 24
11521 				 */
11522 				if (corerev >= 24) {
11523 					db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11524 				}
11525 			}
11526 			prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11527 		} else {
11528 			prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11529 		}
11530 	} else {
11531 		dhd_msgbuf_agg_h2d_db_timer_start(prot);
11532 	}
11533 }
11534 
11535 #endif /* AGG_H2D_DB */
11536 
11537 /**
11538  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
11539  * new messages in a H2D ring. The messages are flushed from cache prior to
11540  * posting the new WR index. The new WR index will be updated in the DMA index
11541  * array or directly in the dongle's ring state memory.
11542  * A PCIE doorbell will be generated to wake up the dongle.
11543  * This is a non-atomic function, make sure the callers
11544  * always hold appropriate locks.
11545  */
11546 static void
11547 BCMFASTPATH(__dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11548 	uint16 nitems)
11549 {
11550 	dhd_prot_t *prot = dhd->prot;
11551 	uint32 db_index;
11552 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11553 	uint corerev;
11554 
11555 	/* cache flush */
11556 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11557 
11558 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11559 			dhd_prot_dma_indx_set(dhd, ring->wr,
11560 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11561 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11562 			dhd_prot_dma_indx_set(dhd, ring->wr,
11563 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11564 	} else {
11565 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11566 				sizeof(uint16), RING_WR_UPD, ring->idx);
11567 	}
11568 
11569 	/* raise h2d interrupt */
11570 	if (IDMA_ACTIVE(dhd) ||
11571 		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
11572 		db_index = IDMA_IDX0;
11573 		/* this api is called in wl down path..in that case sih is freed already */
11574 		if (dhd->bus->sih) {
11575 			corerev = dhd->bus->sih->buscorerev;
11576 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11577 			if (corerev >= 24) {
11578 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11579 			}
11580 		}
11581 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11582 	} else {
11583 		prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11584 	}
11585 }
11586 
11587 static void
11588 BCMFASTPATH(dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11589 	uint16 nitems)
11590 {
11591 	unsigned long flags_bus;
11592 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11593 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11594 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11595 }
11596 
11597 static void
11598 BCMFASTPATH(dhd_prot_ring_doorbell)(dhd_pub_t *dhd, uint32 value)
11599 {
11600 	unsigned long flags_bus;
11601 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11602 	dhd->prot->mb_ring_fn(dhd->bus, value);
11603 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11604 }
11605 
11606 /**
11607  * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
11608  * which will hold DHD_BUS_LP_STATE_LOCK to update WR pointer, Ring DB and also update
11609  * bus_low_power_state to indicate D3_INFORM sent in the same BUS_LP_STATE_LOCK.
11610  */
11611 static void
11612 BCMFASTPATH(dhd_prot_ring_write_complete_mbdata)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
11613 	uint16 nitems, uint32 mb_data)
11614 {
11615 	unsigned long flags_bus;
11616 
11617 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11618 
11619 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11620 
11621 	/* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
11622 	if (mb_data == H2D_HOST_D3_INFORM) {
11623 		__DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus);
11624 	}
11625 
11626 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11627 }
11628 
11629 /**
11630  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
11631  * from a D2H ring. The new RD index will be updated in the DMA Index array or
11632  * directly in dongle's ring state memory.
11633  */
11634 static void
11635 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
11636 {
11637 	dhd_prot_t *prot = dhd->prot;
11638 	uint32 db_index;
11639 	uint corerev;
11640 
11641 	/* update read index */
11642 	/* If dma'ing h2d indices supported
11643 	 * update the r -indices in the
11644 	 * host memory o/w in TCM
11645 	 */
11646 	if (IDMA_ACTIVE(dhd)) {
11647 		dhd_prot_dma_indx_set(dhd, ring->rd,
11648 			D2H_DMA_INDX_RD_UPD, ring->idx);
11649 		db_index = IDMA_IDX1;
11650 		if (dhd->bus->sih) {
11651 			corerev = dhd->bus->sih->buscorerev;
11652 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11653 			if (corerev >= 24) {
11654 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11655 			}
11656 		}
11657 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
11658 	} else if (dhd->dma_h2d_ring_upd_support) {
11659 		dhd_prot_dma_indx_set(dhd, ring->rd,
11660 		                      D2H_DMA_INDX_RD_UPD, ring->idx);
11661 	} else {
11662 		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11663 			sizeof(uint16), RING_RD_UPD, ring->idx);
11664 	}
11665 }
11666 
11667 static int
11668 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
11669 	uint16 ring_type, uint32 req_id)
11670 {
11671 	unsigned long flags;
11672 	d2h_ring_create_req_t  *d2h_ring;
11673 	uint16 alloced = 0;
11674 	int ret = BCME_OK;
11675 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11676 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11677 
11678 #ifdef PCIE_INB_DW
11679 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11680 		return BCME_ERROR;
11681 #endif /* PCIE_INB_DW */
11682 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11683 
11684 	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
11685 
11686 	if (ring_to_create == NULL) {
11687 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11688 		ret = BCME_ERROR;
11689 		goto err;
11690 	}
11691 
11692 	/* Request for ring buffer space */
11693 	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
11694 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11695 		&alloced, FALSE);
11696 
11697 	if (d2h_ring == NULL) {
11698 		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
11699 			__FUNCTION__));
11700 		ret = BCME_NOMEM;
11701 		goto err;
11702 	}
11703 	ring_to_create->create_req_id = (uint16)req_id;
11704 	ring_to_create->create_pending = TRUE;
11705 
11706 	/* Common msg buf hdr */
11707 	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
11708 	d2h_ring->msg.if_id = 0;
11709 	d2h_ring->msg.flags = ctrl_ring->current_phase;
11710 	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11711 	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
11712 	DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
11713 			ring_to_create->idx, max_h2d_rings));
11714 
11715 	d2h_ring->ring_type = ring_type;
11716 	d2h_ring->max_items = htol16(ring_to_create->max_items);
11717 	d2h_ring->len_item = htol16(ring_to_create->item_len);
11718 	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11719 	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11720 
11721 	d2h_ring->flags = 0;
11722 	d2h_ring->msg.epoch =
11723 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11724 	ctrl_ring->seqnum++;
11725 
11726 #ifdef EWP_EDL
11727 	if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
11728 		DHD_ERROR(("%s: sending d2h EDL ring create: "
11729 			"\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
11730 			__FUNCTION__, ltoh16(d2h_ring->max_items),
11731 			ltoh16(d2h_ring->len_item),
11732 			ltoh16(d2h_ring->ring_id),
11733 			d2h_ring->ring_ptr.low_addr,
11734 			d2h_ring->ring_ptr.high_addr));
11735 	}
11736 #endif /* EWP_EDL */
11737 
11738 	/* Update the flow_ring's WRITE index */
11739 	dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
11740 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11741 
11742 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11743 
11744 #ifdef PCIE_INB_DW
11745 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11746 #endif
11747 
11748 	return ret;
11749 err:
11750 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11751 
11752 #ifdef PCIE_INB_DW
11753 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11754 #endif
11755 	return ret;
11756 }
11757 
11758 static int
11759 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
11760 {
11761 	unsigned long flags;
11762 	h2d_ring_create_req_t  *h2d_ring;
11763 	uint16 alloced = 0;
11764 	uint8 i = 0;
11765 	int ret = BCME_OK;
11766 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11767 
11768 #ifdef PCIE_INB_DW
11769 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11770 		return BCME_ERROR;
11771 #endif /* PCIE_INB_DW */
11772 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11773 
11774 	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
11775 
11776 	if (ring_to_create == NULL) {
11777 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11778 		ret = BCME_ERROR;
11779 		goto err;
11780 	}
11781 
11782 	/* Request for ring buffer space */
11783 	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
11784 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11785 		&alloced, FALSE);
11786 
11787 	if (h2d_ring == NULL) {
11788 		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
11789 			__FUNCTION__));
11790 		ret = BCME_NOMEM;
11791 		goto err;
11792 	}
11793 	ring_to_create->create_req_id = (uint16)id;
11794 	ring_to_create->create_pending = TRUE;
11795 
11796 	/* Common msg buf hdr */
11797 	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
11798 	h2d_ring->msg.if_id = 0;
11799 	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11800 	h2d_ring->msg.flags = ctrl_ring->current_phase;
11801 	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
11802 	h2d_ring->ring_type = ring_type;
11803 	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
11804 	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
11805 	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
11806 	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11807 	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11808 
11809 	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
11810 		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
11811 	}
11812 
11813 	h2d_ring->flags = 0;
11814 	h2d_ring->msg.epoch =
11815 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11816 	ctrl_ring->seqnum++;
11817 
11818 	/* Update the flow_ring's WRITE index */
11819 	dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
11820 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11821 
11822 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11823 
11824 #ifdef PCIE_INB_DW
11825 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11826 #endif
11827 	return ret;
11828 err:
11829 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11830 
11831 #ifdef PCIE_INB_DW
11832 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11833 #endif
11834 	return ret;
11835 }
11836 
11837 /**
11838  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
11839  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
11840  * See dhd_prot_dma_indx_init()
11841  */
11842 void
11843 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
11844 {
11845 	uint8 *ptr;
11846 	uint16 offset;
11847 	dhd_prot_t *prot = dhd->prot;
11848 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11849 
11850 	switch (type) {
11851 		case H2D_DMA_INDX_WR_UPD:
11852 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11853 			offset = DHD_H2D_RING_OFFSET(ringid);
11854 			break;
11855 
11856 		case D2H_DMA_INDX_RD_UPD:
11857 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11858 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11859 			break;
11860 
11861 		case H2D_IFRM_INDX_WR_UPD:
11862 			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
11863 			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
11864 			break;
11865 
11866 		default:
11867 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11868 				__FUNCTION__));
11869 			return;
11870 	}
11871 
11872 	ASSERT(prot->rw_index_sz != 0);
11873 	ptr += offset * prot->rw_index_sz;
11874 
11875 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11876 	*(uint16*)ptr = htol16(new_index);
11877 
11878 	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
11879 
11880 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11881 		__FUNCTION__, new_index, type, ringid, ptr, offset));
11882 
11883 } /* dhd_prot_dma_indx_set */
11884 
11885 /**
11886  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
11887  * array.
11888  * Dongle DMAes an entire array to host memory (if the feature is enabled).
11889  * See dhd_prot_dma_indx_init()
11890  */
11891 static uint16
11892 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
11893 {
11894 	uint8 *ptr;
11895 	uint16 data;
11896 	uint16 offset;
11897 	dhd_prot_t *prot = dhd->prot;
11898 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11899 
11900 	switch (type) {
11901 		case H2D_DMA_INDX_WR_UPD:
11902 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11903 			offset = DHD_H2D_RING_OFFSET(ringid);
11904 			break;
11905 
11906 		case H2D_DMA_INDX_RD_UPD:
11907 #ifdef DHD_DMA_INDICES_SEQNUM
11908 			if (prot->h2d_dma_indx_rd_copy_buf) {
11909 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf);
11910 			} else
11911 #endif /* DHD_DMA_INDICES_SEQNUM */
11912 			{
11913 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
11914 			}
11915 			offset = DHD_H2D_RING_OFFSET(ringid);
11916 			break;
11917 
11918 		case D2H_DMA_INDX_WR_UPD:
11919 #ifdef DHD_DMA_INDICES_SEQNUM
11920 			if (prot->d2h_dma_indx_wr_copy_buf) {
11921 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf);
11922 			} else
11923 #endif /* DHD_DMA_INDICES_SEQNUM */
11924 			{
11925 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
11926 			}
11927 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11928 			break;
11929 
11930 		case D2H_DMA_INDX_RD_UPD:
11931 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11932 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11933 			break;
11934 
11935 		default:
11936 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11937 				__FUNCTION__));
11938 			return 0;
11939 	}
11940 
11941 	ASSERT(prot->rw_index_sz != 0);
11942 	ptr += offset * prot->rw_index_sz;
11943 
11944 	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
11945 
11946 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11947 	data = LTOH16(*((uint16*)ptr));
11948 
11949 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11950 		__FUNCTION__, data, type, ringid, ptr, offset));
11951 
11952 	return (data);
11953 
11954 } /* dhd_prot_dma_indx_get */
11955 
11956 #ifdef DHD_DMA_INDICES_SEQNUM
11957 void
11958 dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num)
11959 {
11960 	uint8 *ptr;
11961 	dhd_prot_t *prot = dhd->prot;
11962 
11963 	/* Update host sequence number in first four bytes of scratchbuf */
11964 	ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11965 	*(uint32*)ptr = htol32(seq_num);
11966 	OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len);
11967 
11968 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, seq_num, ptr));
11969 
11970 } /* dhd_prot_dma_indx_set */
11971 
11972 uint32
11973 dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host)
11974 {
11975 	uint8 *ptr;
11976 	dhd_prot_t *prot = dhd->prot;
11977 	uint32 data;
11978 
11979 	OSL_CACHE_INV((void *)ptr, d2h_dma_scratch_buf.len);
11980 
11981 	/* First four bytes of scratchbuf contains the host sequence number.
11982 	 * Next four bytes of scratchbuf contains the Dongle sequence number.
11983 	 */
11984 	if (host) {
11985 		ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11986 		data = LTOH32(*((uint32*)ptr));
11987 	} else {
11988 		ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32));
11989 		data = LTOH32(*((uint32*)ptr));
11990 	}
11991 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, data, ptr));
11992 	return data;
11993 } /* dhd_prot_dma_indx_set */
11994 
11995 void
11996 dhd_prot_save_dmaidx(dhd_pub_t *dhd)
11997 {
11998 	dhd_prot_t *prot = dhd->prot;
11999 	uint32 dngl_seqnum;
12000 
12001 	dngl_seqnum = dhd_prot_read_seqnum(dhd, FALSE);
12002 
12003 	DHD_TRACE(("%s: host_seqnum %u dngl_seqnum %u\n", __FUNCTION__,
12004 			prot->host_seqnum, dngl_seqnum));
12005 	if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) {
12006 		if (prot->host_seqnum == dngl_seqnum) {
12007 			memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz,
12008 				prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz);
12009 			memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz,
12010 				prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz);
12011 			dhd_prot_write_host_seqnum(dhd, prot->host_seqnum);
12012 			/* Ring DoorBell */
12013 			dhd_prot_ring_doorbell(dhd, DHD_DMA_INDX_SEQ_H2D_DB_MAGIC);
12014 			prot->host_seqnum++;
12015 			prot->host_seqnum %= D2H_EPOCH_MODULO;
12016 		}
12017 	}
12018 }
12019 
12020 int
12021 dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, uint8 type)
12022 {
12023 	dhd_prot_t *prot = dhd->prot;
12024 
12025 	switch (type) {
12026 		case D2H_DMA_INDX_WR_BUF:
12027 			prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12028 			if (prot->d2h_dma_indx_wr_copy_buf == NULL) {
12029 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12030 					__FUNCTION__, buf_sz));
12031 				goto ret_no_mem;
12032 			}
12033 			prot->d2h_dma_indx_wr_copy_bufsz = buf_sz;
12034 		break;
12035 
12036 		case H2D_DMA_INDX_RD_BUF:
12037 			prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12038 			if (prot->h2d_dma_indx_rd_copy_buf == NULL) {
12039 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12040 					__FUNCTION__, buf_sz));
12041 				goto ret_no_mem;
12042 			}
12043 			prot->h2d_dma_indx_rd_copy_bufsz = buf_sz;
12044 			break;
12045 
12046 		default:
12047 			break;
12048 	}
12049 	return BCME_OK;
12050 ret_no_mem:
12051 	return BCME_NOMEM;
12052 
12053 }
12054 #endif /* DHD_DMA_INDICES_SEQNUM */
12055 
12056 /**
12057  * An array of DMA read/write indices, containing information about host rings, can be maintained
12058  * either in host memory or in device memory, dependent on preprocessor options. This function is,
12059  * dependent on these options, called during driver initialization. It reserves and initializes
12060  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
12061  * address of these host memory blocks are communicated to the dongle later on. By reading this host
12062  * memory, the dongle learns about the state of the host rings.
12063  */
12064 
12065 static INLINE int
12066 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
12067 	dhd_dma_buf_t *dma_buf, uint32 bufsz)
12068 {
12069 	int rc;
12070 
12071 	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
12072 		return BCME_OK;
12073 
12074 	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
12075 
12076 	return rc;
12077 }
12078 
12079 int
12080 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
12081 {
12082 	uint32 bufsz;
12083 	dhd_prot_t *prot = dhd->prot;
12084 	dhd_dma_buf_t *dma_buf;
12085 
12086 	if (prot == NULL) {
12087 		DHD_ERROR(("prot is not inited\n"));
12088 		return BCME_ERROR;
12089 	}
12090 
12091 	/* Dongle advertizes 2B or 4B RW index size */
12092 	ASSERT(rw_index_sz != 0);
12093 	prot->rw_index_sz = rw_index_sz;
12094 
12095 	bufsz = rw_index_sz * length;
12096 
12097 	switch (type) {
12098 		case H2D_DMA_INDX_WR_BUF:
12099 			dma_buf = &prot->h2d_dma_indx_wr_buf;
12100 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12101 				goto ret_no_mem;
12102 			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
12103 				dma_buf->len, rw_index_sz, length));
12104 			break;
12105 
12106 		case H2D_DMA_INDX_RD_BUF:
12107 			dma_buf = &prot->h2d_dma_indx_rd_buf;
12108 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12109 				goto ret_no_mem;
12110 			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
12111 				dma_buf->len, rw_index_sz, length));
12112 			break;
12113 
12114 		case D2H_DMA_INDX_WR_BUF:
12115 			dma_buf = &prot->d2h_dma_indx_wr_buf;
12116 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12117 				goto ret_no_mem;
12118 			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
12119 				dma_buf->len, rw_index_sz, length));
12120 			break;
12121 
12122 		case D2H_DMA_INDX_RD_BUF:
12123 			dma_buf = &prot->d2h_dma_indx_rd_buf;
12124 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12125 				goto ret_no_mem;
12126 			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
12127 				dma_buf->len, rw_index_sz, length));
12128 			break;
12129 
12130 		case H2D_IFRM_INDX_WR_BUF:
12131 			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
12132 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12133 				goto ret_no_mem;
12134 			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
12135 				dma_buf->len, rw_index_sz, length));
12136 			break;
12137 
12138 		default:
12139 			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
12140 			return BCME_BADOPTION;
12141 	}
12142 
12143 	return BCME_OK;
12144 
12145 ret_no_mem:
12146 	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
12147 		__FUNCTION__, type, bufsz));
12148 	return BCME_NOMEM;
12149 
12150 } /* dhd_prot_dma_indx_init */
12151 
12152 /**
12153  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
12154  * from, or NULL if there are no more messages to read.
12155  */
12156 static uint8*
12157 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
12158 {
12159 	uint16 wr;
12160 	uint16 rd;
12161 	uint16 depth;
12162 	uint16 items;
12163 	void  *read_addr = NULL; /* address of next msg to be read in ring */
12164 	uint16 d2h_wr = 0;
12165 
12166 	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
12167 		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
12168 		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
12169 
12170 	/* Remember the read index in a variable.
12171 	 * This is becuase ring->rd gets updated in the end of this function
12172 	 * So if we have to print the exact read index from which the
12173 	 * message is read its not possible.
12174 	 */
12175 	ring->curr_rd = ring->rd;
12176 
12177 	/* update write pointer */
12178 	if (dhd->dma_d2h_ring_upd_support) {
12179 		/* DMAing write/read indices supported */
12180 		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
12181 		ring->wr = d2h_wr;
12182 	} else {
12183 		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
12184 	}
12185 
12186 	wr = ring->wr;
12187 	rd = ring->rd;
12188 	depth = ring->max_items;
12189 
12190 	/* check for avail space, in number of ring items */
12191 	items = READ_AVAIL_SPACE(wr, rd, depth);
12192 	if (items == 0)
12193 		return NULL;
12194 
12195 	/*
12196 	 * Note that there are builds where Assert translates to just printk
12197 	 * so, even if we had hit this condition we would never halt. Now
12198 	 * dhd_prot_process_msgtype can get into an big loop if this
12199 	 * happens.
12200 	 */
12201 	if (items > ring->max_items) {
12202 		DHD_ERROR(("\r\n======================= \r\n"));
12203 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
12204 			__FUNCTION__, ring, ring->name, ring->max_items, items));
12205 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
12206 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
12207 			dhd->busstate, dhd->bus->wait_for_d3_ack));
12208 		DHD_ERROR(("\r\n======================= \r\n"));
12209 #ifdef SUPPORT_LINKDOWN_RECOVERY
12210 		if (wr >= ring->max_items) {
12211 			dhd->bus->read_shm_fail = TRUE;
12212 		}
12213 #else
12214 #ifdef DHD_FW_COREDUMP
12215 		if (dhd->memdump_enabled) {
12216 			/* collect core dump */
12217 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
12218 			dhd_bus_mem_dump(dhd);
12219 
12220 		}
12221 #endif /* DHD_FW_COREDUMP */
12222 #endif /* SUPPORT_LINKDOWN_RECOVERY */
12223 
12224 		*available_len = 0;
12225 		dhd_schedule_reset(dhd);
12226 
12227 		return NULL;
12228 	}
12229 
12230 	/* if space is available, calculate address to be read */
12231 	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
12232 
12233 	/* update read pointer */
12234 	if ((ring->rd + items) >= ring->max_items)
12235 		ring->rd = 0;
12236 	else
12237 		ring->rd += items;
12238 
12239 	ASSERT(ring->rd < ring->max_items);
12240 
12241 	/* convert items to bytes : available_len must be 32bits */
12242 	*available_len = (uint32)(items * ring->item_len);
12243 
12244 	/* XXX Double cache invalidate for ARM with L2 cache/prefetch */
12245 	OSL_CACHE_INV(read_addr, *available_len);
12246 
12247 	/* return read address */
12248 	return read_addr;
12249 
12250 } /* dhd_prot_get_read_addr */
12251 
12252 /**
12253  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
12254  * make sure the callers always hold appropriate locks.
12255  */
12256 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
12257 {
12258 	h2d_mailbox_data_t *h2d_mb_data;
12259 	uint16 alloced = 0;
12260 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
12261 	unsigned long flags;
12262 	int num_post = 1;
12263 	int i;
12264 
12265 	DHD_MSGBUF_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
12266 		__FUNCTION__, mb_data));
12267 	if (!ctrl_ring->inited) {
12268 		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
12269 		return BCME_ERROR;
12270 	}
12271 
12272 #ifdef PCIE_INB_DW
12273 	if ((INBAND_DW_ENAB(dhd->bus)) &&
12274 		(dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
12275 			DW_DEVICE_DS_DEV_SLEEP)) {
12276 		if (mb_data == H2D_HOST_CONS_INT) {
12277 			/* One additional device_wake post needed */
12278 			num_post = 2;
12279 		}
12280 	}
12281 #endif /* PCIE_INB_DW */
12282 
12283 	for (i = 0; i < num_post; i ++) {
12284 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12285 		/* Request for ring buffer space */
12286 		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
12287 			ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
12288 			&alloced, FALSE);
12289 
12290 		if (h2d_mb_data == NULL) {
12291 			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
12292 				__FUNCTION__));
12293 			DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12294 			return BCME_NOMEM;
12295 		}
12296 
12297 		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
12298 		/* Common msg buf hdr */
12299 		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
12300 		h2d_mb_data->msg.flags = ctrl_ring->current_phase;
12301 
12302 		h2d_mb_data->msg.epoch =
12303 			ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12304 		ctrl_ring->seqnum++;
12305 
12306 		/* Update flow create message */
12307 		h2d_mb_data->mail_box_data = htol32(mb_data);
12308 #ifdef PCIE_INB_DW
12309 		/* post device_wake first */
12310 		if ((num_post == 2) && (i == 0)) {
12311 			h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
12312 		} else
12313 #endif /* PCIE_INB_DW */
12314 		{
12315 			h2d_mb_data->mail_box_data = htol32(mb_data);
12316 		}
12317 
12318 		DHD_MSGBUF_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
12319 
12320 		/* upd wrt ptr and raise interrupt */
12321 		dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
12322 			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
12323 
12324 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12325 
12326 #ifdef PCIE_INB_DW
12327 		/* Add a delay if device_wake is posted */
12328 		if ((num_post == 2) && (i == 0)) {
12329 			OSL_DELAY(1000);
12330 		}
12331 #endif /* PCIE_INB_DW */
12332 	}
12333 	return 0;
12334 }
12335 
12336 /** Creates a flow ring and informs dongle of this event */
12337 int
12338 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12339 {
12340 	tx_flowring_create_request_t *flow_create_rqst;
12341 	msgbuf_ring_t *flow_ring;
12342 	dhd_prot_t *prot = dhd->prot;
12343 	unsigned long flags;
12344 	uint16 alloced = 0;
12345 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
12346 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
12347 
12348 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
12349 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
12350 	if (flow_ring == NULL) {
12351 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
12352 			__FUNCTION__, flow_ring_node->flowid));
12353 		return BCME_NOMEM;
12354 	}
12355 
12356 #ifdef PCIE_INB_DW
12357 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12358 		return BCME_ERROR;
12359 #endif /* PCIE_INB_DW */
12360 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12361 
12362 	/* Request for ctrl_ring buffer space */
12363 	flow_create_rqst = (tx_flowring_create_request_t *)
12364 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
12365 
12366 	if (flow_create_rqst == NULL) {
12367 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
12368 		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
12369 			__FUNCTION__, flow_ring_node->flowid));
12370 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12371 #ifdef PCIE_INB_DW
12372 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12373 #endif
12374 		return BCME_NOMEM;
12375 	}
12376 
12377 	flow_ring_node->prot_info = (void *)flow_ring;
12378 
12379 	/* Common msg buf hdr */
12380 	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
12381 	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12382 	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
12383 	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
12384 
12385 	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12386 	ctrl_ring->seqnum++;
12387 
12388 	/* Update flow create message */
12389 	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
12390 	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12391 	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
12392 	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
12393 	/* CAUTION: ring::base_addr already in Little Endian */
12394 	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
12395 	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
12396 	flow_create_rqst->max_items = htol16(flow_ring->max_items);
12397 	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
12398 	flow_create_rqst->if_flags = 0;
12399 
12400 #ifdef DHD_HP2P
12401 	/* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
12402 	/* and traffic is not multicast */
12403 	/* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
12404 	if (dhd->hp2p_capable && dhd->hp2p_ring_more &&
12405 		flow_ring_node->flow_info.tid == HP2P_PRIO &&
12406 		(dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
12407 		!ETHER_ISMULTI(flow_create_rqst->da)) {
12408 		flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
12409 		flow_ring_node->hp2p_ring = TRUE;
12410 		/* Allow multiple HP2P Flow if mf override is enabled */
12411 		if (!dhd->hp2p_mf_enable) {
12412 			dhd->hp2p_ring_more = FALSE;
12413 		}
12414 
12415 		DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
12416 				__FUNCTION__, flow_ring_node->flow_info.tid,
12417 				flow_ring_node->flowid));
12418 	}
12419 #endif /* DHD_HP2P */
12420 
12421 	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
12422 	 * currently it is not used for priority. so uses solely for ifrm mask
12423 	 */
12424 	if (IFRM_ACTIVE(dhd))
12425 		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
12426 
12427 	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
12428 		" prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid,
12429 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
12430 		flow_ring_node->flow_info.ifindex, flow_ring->max_items));
12431 
12432 	/* Update the flow_ring's WRITE index */
12433 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
12434 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12435 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12436 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
12437 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12438 			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
12439 	} else {
12440 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
12441 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
12442 	}
12443 
12444 	/* update control subn ring's WR index and ring doorbell to dongle */
12445 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
12446 
12447 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12448 
12449 #ifdef PCIE_INB_DW
12450 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12451 #endif
12452 	return BCME_OK;
12453 } /* dhd_prot_flow_ring_create */
12454 
12455 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
12456 static void
12457 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
12458 {
12459 	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
12460 
12461 	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
12462 		ltoh16(flow_create_resp->cmplt.status),
12463 		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
12464 
12465 	dhd_bus_flow_ring_create_response(dhd->bus,
12466 		ltoh16(flow_create_resp->cmplt.flow_ring_id),
12467 		ltoh16(flow_create_resp->cmplt.status));
12468 }
12469 
12470 #if !defined(BCM_ROUTER_DHD)
12471 static void
12472 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
12473 {
12474 	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
12475 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12476 		ltoh16(resp->cmplt.status),
12477 		ltoh16(resp->cmplt.ring_id),
12478 		ltoh32(resp->cmn_hdr.request_id)));
12479 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
12480 		(ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
12481 		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
12482 		return;
12483 	}
12484 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12485 		!dhd->prot->h2dring_info_subn->create_pending) {
12486 		DHD_ERROR(("info ring create status for not pending submit ring\n"));
12487 	}
12488 #ifdef BTLOG
12489 	if (dhd->prot->h2dring_btlog_subn &&
12490 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12491 		!dhd->prot->h2dring_btlog_subn->create_pending) {
12492 		DHD_ERROR(("btlog ring create status for not pending submit ring\n"));
12493 	}
12494 #endif	/* BTLOG */
12495 
12496 	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12497 		DHD_ERROR(("info/btlog ring create failed with status %d\n",
12498 			ltoh16(resp->cmplt.status)));
12499 		return;
12500 	}
12501 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12502 		dhd->prot->h2dring_info_subn->create_pending = FALSE;
12503 		dhd->prot->h2dring_info_subn->inited = TRUE;
12504 		DHD_ERROR(("info buffer post after ring create\n"));
12505 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
12506 	}
12507 #ifdef BTLOG
12508 	if (dhd->prot->h2dring_btlog_subn &&
12509 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12510 		dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
12511 		dhd->prot->h2dring_btlog_subn->inited = TRUE;
12512 		DHD_ERROR(("btlog buffer post after ring create\n"));
12513 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
12514 	}
12515 #endif	/* BTLOG */
12516 }
12517 #endif /* !BCM_ROUTER_DHD */
12518 
12519 static void
12520 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
12521 {
12522 	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
12523 	DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12524 		ltoh16(resp->cmplt.status),
12525 		ltoh16(resp->cmplt.ring_id),
12526 		ltoh32(resp->cmn_hdr.request_id)));
12527 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
12528 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
12529 #ifdef DHD_HP2P
12530 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
12531 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
12532 #endif /* DHD_HP2P */
12533 		TRUE) {
12534 		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
12535 		return;
12536 	}
12537 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
12538 #ifdef EWP_EDL
12539 		if (!dhd->dongle_edl_support)
12540 #endif
12541 		{
12542 
12543 			if (!dhd->prot->d2hring_info_cpln->create_pending) {
12544 				DHD_ERROR(("info ring create status for not pending cpl ring\n"));
12545 				return;
12546 			}
12547 
12548 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12549 				DHD_ERROR(("info cpl ring create failed with status %d\n",
12550 					ltoh16(resp->cmplt.status)));
12551 				return;
12552 			}
12553 			dhd->prot->d2hring_info_cpln->create_pending = FALSE;
12554 			dhd->prot->d2hring_info_cpln->inited = TRUE;
12555 		}
12556 #ifdef EWP_EDL
12557 		else {
12558 			if (!dhd->prot->d2hring_edl->create_pending) {
12559 				DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
12560 				return;
12561 			}
12562 
12563 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12564 				DHD_ERROR(("edl cpl ring create failed with status %d\n",
12565 					ltoh16(resp->cmplt.status)));
12566 				return;
12567 			}
12568 			dhd->prot->d2hring_edl->create_pending = FALSE;
12569 			dhd->prot->d2hring_edl->inited = TRUE;
12570 		}
12571 #endif /* EWP_EDL */
12572 	}
12573 
12574 #ifdef BTLOG
12575 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) {
12576 		if (!dhd->prot->d2hring_btlog_cpln->create_pending) {
12577 			DHD_ERROR(("btlog ring create status for not pending cpl ring\n"));
12578 			return;
12579 		}
12580 
12581 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12582 			DHD_ERROR(("btlog cpl ring create failed with status %d\n",
12583 				ltoh16(resp->cmplt.status)));
12584 			return;
12585 		}
12586 		dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
12587 		dhd->prot->d2hring_btlog_cpln->inited = TRUE;
12588 	}
12589 #endif	/* BTLOG */
12590 #ifdef DHD_HP2P
12591 	if (dhd->prot->d2hring_hp2p_txcpl &&
12592 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
12593 		if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
12594 			DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
12595 			return;
12596 		}
12597 
12598 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12599 			DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
12600 				ltoh16(resp->cmplt.status)));
12601 			return;
12602 		}
12603 		dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
12604 		dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
12605 	}
12606 	if (dhd->prot->d2hring_hp2p_rxcpl &&
12607 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
12608 		if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
12609 			DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
12610 			return;
12611 		}
12612 
12613 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12614 			DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
12615 				ltoh16(resp->cmplt.status)));
12616 			return;
12617 		}
12618 		dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
12619 		dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
12620 	}
12621 #endif /* DHD_HP2P */
12622 }
12623 
12624 static void
12625 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
12626 {
12627 	d2h_mailbox_data_t *d2h_data;
12628 
12629 	d2h_data = (d2h_mailbox_data_t *)buf;
12630 	DHD_MSGBUF_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
12631 		d2h_data->d2h_mailbox_data));
12632 	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
12633 }
12634 
12635 static void
12636 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
12637 {
12638 #ifdef DHD_TIMESYNC
12639 	host_timestamp_msg_cpl_t  *host_ts_cpl;
12640 	uint32 pktid;
12641 	dhd_prot_t *prot = dhd->prot;
12642 
12643 	host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
12644 	DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
12645 		host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
12646 
12647 	pktid = ltoh32(host_ts_cpl->msg.request_id);
12648 	if (prot->hostts_req_buf_inuse == FALSE) {
12649 		DHD_ERROR(("No Pending Host TS req, but completion\n"));
12650 		return;
12651 	}
12652 	prot->hostts_req_buf_inuse = FALSE;
12653 	if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
12654 		DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
12655 			pktid, DHD_H2D_HOSTTS_REQ_PKTID));
12656 		return;
12657 	}
12658 	dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
12659 		host_ts_cpl->cmplt.status);
12660 #else /* DHD_TIMESYNC */
12661 	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
12662 #endif /* DHD_TIMESYNC */
12663 
12664 }
12665 
12666 /** called on e.g. flow ring delete */
12667 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
12668 {
12669 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12670 	dhd_prot_ring_detach(dhd, flow_ring);
12671 	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
12672 }
12673 
12674 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d,
12675 	struct bcmstrbuf *strbuf, const char * fmt)
12676 {
12677 	const char *default_fmt =
12678 		"TRD:%d HLRD:%d HDRD:%d TWR:%d HLWR:%d HDWR:%d  BASE(VA) %p BASE(PA) %x:%x SIZE %d "
12679 		"WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
12680 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12681 	uint16 rd, wr, drd = 0, dwr = 0;
12682 	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
12683 
12684 	if (fmt == NULL) {
12685 		fmt = default_fmt;
12686 	}
12687 
12688 	if (dhd->bus->is_linkdown) {
12689 		DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
12690 		return;
12691 	}
12692 
12693 	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
12694 	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
12695 	if (dhd->dma_d2h_ring_upd_support) {
12696 		if (h2d) {
12697 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx);
12698 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12699 		} else {
12700 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
12701 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
12702 		}
12703 	}
12704 	bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr,
12705 		flow_ring->dma_buf.va,
12706 		ltoh32(flow_ring->base_addr.high_addr),
12707 		ltoh32(flow_ring->base_addr.low_addr),
12708 		flow_ring->item_len, flow_ring->max_items,
12709 		dma_buf_len);
12710 }
12711 
12712 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
12713 {
12714 	dhd_prot_t *prot = dhd->prot;
12715 	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
12716 		dhd->prot->device_ipc_version,
12717 		dhd->prot->host_ipc_version,
12718 		dhd->prot->active_ipc_version);
12719 
12720 	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
12721 		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
12722 	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
12723 		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
12724 #ifdef BTLOG
12725 	bcm_bprintf(strbuf, "max BTLOG bufs to post: %d, \t posted %d \n",
12726 		dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost);
12727 #endif	/* BTLOG */
12728 	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
12729 		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
12730 	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
12731 		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
12732 	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
12733 		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
12734 
12735 	bcm_bprintf(strbuf, "Total RX bufs posted: %d, \t RX cpl got %d \n",
12736 		dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl);
12737 
12738 	bcm_bprintf(strbuf, "Total TX packets: %lu, \t TX cpl got %lu \n",
12739 		dhd->actual_tx_pkts, dhd->tot_txcpl);
12740 
12741 	bcm_bprintf(strbuf,
12742 		"%14s %18s %18s %17s %17s %14s %14s %10s\n",
12743 		"Type", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
12744 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
12745 	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
12746 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf,
12747 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12748 	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
12749 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf,
12750 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12751 	bcm_bprintf(strbuf, "%14s", "H2DRxPost");
12752 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf,
12753 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12754 	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
12755 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf,
12756 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12757 	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
12758 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf,
12759 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12760 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
12761 		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
12762 		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf,
12763 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12764 		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
12765 		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf,
12766 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12767 	}
12768 	if (dhd->prot->d2hring_edl != NULL) {
12769 		bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
12770 		dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf,
12771 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12772 	}
12773 
12774 	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
12775 		OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
12776 		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
12777 		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
12778 		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
12779 
12780 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
12781 	dhd_prot_ioctl_dump(dhd->prot, strbuf);
12782 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
12783 #ifdef DHD_MMIO_TRACE
12784 	dhd_dump_bus_mmio_trace(dhd->bus, strbuf);
12785 #endif /* DHD_MMIO_TRACE */
12786 	dhd_dump_bus_ds_trace(dhd->bus, strbuf);
12787 #ifdef DHD_FLOW_RING_STATUS_TRACE
12788 	dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf);
12789 	dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf);
12790 #endif /* DHD_FLOW_RING_STATUS_TRACE */
12791 }
12792 
12793 int
12794 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12795 {
12796 	tx_flowring_delete_request_t *flow_delete_rqst;
12797 	dhd_prot_t *prot = dhd->prot;
12798 	unsigned long flags;
12799 	uint16 alloced = 0;
12800 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12801 
12802 #ifdef PCIE_INB_DW
12803 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12804 		return BCME_ERROR;
12805 #endif /* PCIE_INB_DW */
12806 
12807 	DHD_RING_LOCK(ring->ring_lock, flags);
12808 
12809 	/* Request for ring buffer space */
12810 	flow_delete_rqst = (tx_flowring_delete_request_t *)
12811 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12812 
12813 	if (flow_delete_rqst == NULL) {
12814 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12815 		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
12816 #ifdef PCIE_INB_DW
12817 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12818 #endif
12819 		return BCME_NOMEM;
12820 	}
12821 
12822 	/* Common msg buf hdr */
12823 	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
12824 	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12825 	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
12826 	flow_delete_rqst->msg.flags = ring->current_phase;
12827 
12828 	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12829 	ring->seqnum++;
12830 
12831 	/* Update Delete info */
12832 	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12833 	flow_delete_rqst->reason = htol16(BCME_OK);
12834 
12835 	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
12836 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
12837 		flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
12838 		flow_ring_node->flow_info.ifindex));
12839 
12840 	/* update ring's WR index and ring doorbell to dongle */
12841 	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
12842 
12843 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12844 
12845 #ifdef PCIE_INB_DW
12846 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12847 #endif
12848 	return BCME_OK;
12849 }
12850 
12851 static void
12852 BCMFASTPATH(dhd_prot_flow_ring_fastdelete)(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
12853 {
12854 	flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
12855 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
12856 	host_txbuf_cmpl_t txstatus;
12857 	host_txbuf_post_t *txdesc;
12858 	uint16 wr_idx;
12859 
12860 	DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
12861 		__FUNCTION__, flowid, rd_idx, ring->wr));
12862 
12863 	memset(&txstatus, 0, sizeof(txstatus));
12864 	txstatus.compl_hdr.flow_ring_id = flowid;
12865 	txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
12866 	wr_idx = ring->wr;
12867 
12868 	while (wr_idx != rd_idx) {
12869 		if (wr_idx)
12870 			wr_idx--;
12871 		else
12872 			wr_idx = ring->max_items - 1;
12873 		txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
12874 			(wr_idx * ring->item_len));
12875 		txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
12876 		dhd_prot_txstatus_process(dhd, &txstatus);
12877 	}
12878 }
12879 
12880 static void
12881 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
12882 {
12883 	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
12884 
12885 	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
12886 		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
12887 
12888 	if (dhd->fast_delete_ring_support) {
12889 		dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
12890 			flow_delete_resp->read_idx);
12891 	}
12892 	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
12893 		flow_delete_resp->cmplt.status);
12894 }
12895 
12896 static void
12897 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
12898 {
12899 #ifdef IDLE_TX_FLOW_MGMT
12900 	tx_idle_flowring_resume_response_t	*flow_resume_resp =
12901 		(tx_idle_flowring_resume_response_t *)msg;
12902 
12903 	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
12904 		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
12905 
12906 	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
12907 		flow_resume_resp->cmplt.status);
12908 #endif /* IDLE_TX_FLOW_MGMT */
12909 }
12910 
12911 static void
12912 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
12913 {
12914 #ifdef IDLE_TX_FLOW_MGMT
12915 	int16 status;
12916 	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
12917 		(tx_idle_flowring_suspend_response_t *)msg;
12918 	status = flow_suspend_resp->cmplt.status;
12919 
12920 	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
12921 		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
12922 		status));
12923 
12924 	if (status != BCME_OK) {
12925 
12926 		DHD_ERROR(("%s Error in Suspending Flow rings!!"
12927 			"Dongle will still be polling idle rings!!Status = %d \n",
12928 			__FUNCTION__, status));
12929 	}
12930 #endif /* IDLE_TX_FLOW_MGMT */
12931 }
12932 
12933 int
12934 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12935 {
12936 	tx_flowring_flush_request_t *flow_flush_rqst;
12937 	dhd_prot_t *prot = dhd->prot;
12938 	unsigned long flags;
12939 	uint16 alloced = 0;
12940 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12941 
12942 #ifdef PCIE_INB_DW
12943 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12944 		return BCME_ERROR;
12945 #endif /* PCIE_INB_DW */
12946 
12947 	DHD_RING_LOCK(ring->ring_lock, flags);
12948 
12949 	/* Request for ring buffer space */
12950 	flow_flush_rqst = (tx_flowring_flush_request_t *)
12951 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12952 	if (flow_flush_rqst == NULL) {
12953 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12954 		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
12955 #ifdef PCIE_INB_DW
12956 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12957 #endif
12958 		return BCME_NOMEM;
12959 	}
12960 
12961 	/* Common msg buf hdr */
12962 	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
12963 	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12964 	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
12965 	flow_flush_rqst->msg.flags = ring->current_phase;
12966 	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12967 	ring->seqnum++;
12968 
12969 	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12970 	flow_flush_rqst->reason = htol16(BCME_OK);
12971 
12972 	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
12973 
12974 	/* update ring's WR index and ring doorbell to dongle */
12975 	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
12976 
12977 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12978 
12979 #ifdef PCIE_INB_DW
12980 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12981 #endif
12982 	return BCME_OK;
12983 } /* dhd_prot_flow_ring_flush */
12984 
12985 static void
12986 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
12987 {
12988 	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
12989 
12990 	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
12991 		flow_flush_resp->cmplt.status));
12992 
12993 	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
12994 		flow_flush_resp->cmplt.status);
12995 }
12996 
12997 /**
12998  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
12999  * doorbell information is transferred to dongle via the d2h ring config control
13000  * message.
13001  */
13002 void
13003 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
13004 {
13005 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
13006 	uint16 ring_idx;
13007 	uint8 *msg_next;
13008 	void *msg_start;
13009 	uint16 alloced = 0;
13010 	unsigned long flags;
13011 	dhd_prot_t *prot = dhd->prot;
13012 	ring_config_req_t *ring_config_req;
13013 	bcmpcie_soft_doorbell_t *soft_doorbell;
13014 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
13015 	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
13016 
13017 #ifdef PCIE_INB_DW
13018 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
13019 		return BCME_ERROR;
13020 #endif /* PCIE_INB_DW */
13021 	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
13022 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
13023 	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
13024 
13025 	if (msg_start == NULL) {
13026 		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
13027 			__FUNCTION__, d2h_rings));
13028 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13029 #ifdef PCIE_INB_DW
13030 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13031 #endif
13032 		return;
13033 	}
13034 
13035 	msg_next = (uint8*)msg_start;
13036 
13037 	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
13038 
13039 		/* position the ring_config_req into the ctrl subm ring */
13040 		ring_config_req = (ring_config_req_t *)msg_next;
13041 
13042 		/* Common msg header */
13043 		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
13044 		ring_config_req->msg.if_id = 0;
13045 		ring_config_req->msg.flags = 0;
13046 
13047 		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
13048 		ctrl_ring->seqnum++;
13049 
13050 		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
13051 
13052 		/* Ring Config subtype and d2h ring_id */
13053 		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
13054 		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
13055 
13056 		/* Host soft doorbell configuration */
13057 		soft_doorbell = &prot->soft_doorbell[ring_idx];
13058 
13059 		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
13060 		ring_config_req->soft_doorbell.haddr.high =
13061 			htol32(soft_doorbell->haddr.high);
13062 		ring_config_req->soft_doorbell.haddr.low =
13063 			htol32(soft_doorbell->haddr.low);
13064 		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
13065 		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
13066 
13067 		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
13068 			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
13069 			ring_config_req->soft_doorbell.haddr.low,
13070 			ring_config_req->soft_doorbell.value));
13071 
13072 		msg_next = msg_next + ctrl_ring->item_len;
13073 	}
13074 
13075 	/* update control subn ring's WR index and ring doorbell to dongle */
13076 	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
13077 
13078 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13079 
13080 #ifdef PCIE_INB_DW
13081 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13082 #endif
13083 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
13084 }
13085 
13086 static void
13087 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
13088 {
13089 	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
13090 		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
13091 		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
13092 }
13093 
13094 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13095 void
13096 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
13097 {
13098 	uint32 *ext_data = dhd->extended_trap_data;
13099 	hnd_ext_trap_hdr_t *hdr;
13100 	const bcm_tlv_t *tlv;
13101 
13102 	if (ext_data == NULL) {
13103 		return;
13104 	}
13105 	/* First word is original trap_data */
13106 	ext_data++;
13107 
13108 	/* Followed by the extended trap data header */
13109 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13110 
13111 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13112 	if (tlv) {
13113 		memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
13114 	}
13115 }
13116 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
13117 
13118 typedef struct {
13119 	char name[HANG_INFO_TRAP_T_NAME_MAX];
13120 	uint32 offset;
13121 } hang_info_trap_t;
13122 
13123 #ifdef DHD_EWPR_VER2
13124 static hang_info_trap_t hang_info_trap_tbl[] = {
13125 	{"reason", 0},
13126 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13127 	{"stype", 0},
13128 	TRAP_T_NAME_OFFSET(type),
13129 	TRAP_T_NAME_OFFSET(epc),
13130 	{"resrvd", 0},
13131 	{"resrvd", 0},
13132 	{"resrvd", 0},
13133 	{"resrvd", 0},
13134 	{"", 0}
13135 };
13136 #else
13137 static hang_info_trap_t hang_info_trap_tbl[] = {
13138 	{"reason", 0},
13139 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13140 	{"stype", 0},
13141 	TRAP_T_NAME_OFFSET(type),
13142 	TRAP_T_NAME_OFFSET(epc),
13143 	TRAP_T_NAME_OFFSET(cpsr),
13144 	TRAP_T_NAME_OFFSET(spsr),
13145 	TRAP_T_NAME_OFFSET(r0),
13146 	TRAP_T_NAME_OFFSET(r1),
13147 	TRAP_T_NAME_OFFSET(r2),
13148 	TRAP_T_NAME_OFFSET(r3),
13149 	TRAP_T_NAME_OFFSET(r4),
13150 	TRAP_T_NAME_OFFSET(r5),
13151 	TRAP_T_NAME_OFFSET(r6),
13152 	TRAP_T_NAME_OFFSET(r7),
13153 	TRAP_T_NAME_OFFSET(r8),
13154 	TRAP_T_NAME_OFFSET(r9),
13155 	TRAP_T_NAME_OFFSET(r10),
13156 	TRAP_T_NAME_OFFSET(r11),
13157 	TRAP_T_NAME_OFFSET(r12),
13158 	TRAP_T_NAME_OFFSET(r13),
13159 	TRAP_T_NAME_OFFSET(r14),
13160 	TRAP_T_NAME_OFFSET(pc),
13161 	{"", 0}
13162 };
13163 #endif /* DHD_EWPR_VER2 */
13164 
13165 #define TAG_TRAP_IS_STATE(tag) \
13166 	((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
13167 	(tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
13168 	(tag == TAG_TRAP_CODE))
13169 
13170 static void
13171 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
13172 		int *bytes_written, int *cnt, char *cookie)
13173 {
13174 	uint8 *ptr;
13175 	int remain_len;
13176 	int i;
13177 
13178 	ptr = (uint8 *)src;
13179 
13180 	memset(dest, 0, len);
13181 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13182 
13183 	/* hang reason, hang info ver */
13184 	for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
13185 			i++, (*cnt)++) {
13186 		if (field_name) {
13187 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13188 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13189 					hang_info_trap_tbl[i].name, HANG_KEY_DEL);
13190 		}
13191 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13192 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13193 				hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
13194 
13195 	}
13196 
13197 	if (*cnt < HANG_FIELD_CNT_MAX) {
13198 		if (field_name) {
13199 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13200 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13201 					"cookie", HANG_KEY_DEL);
13202 		}
13203 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13204 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
13205 				cookie, HANG_KEY_DEL);
13206 		(*cnt)++;
13207 	}
13208 
13209 	if (*cnt < HANG_FIELD_CNT_MAX) {
13210 		if (field_name) {
13211 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13212 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13213 					hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
13214 					HANG_KEY_DEL);
13215 		}
13216 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13217 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13218 				hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
13219 				HANG_KEY_DEL);
13220 		(*cnt)++;
13221 	}
13222 
13223 	if (*cnt < HANG_FIELD_CNT_MAX) {
13224 		if (field_name) {
13225 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13226 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13227 					hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
13228 					HANG_KEY_DEL);
13229 		}
13230 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13231 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13232 				*(uint32 *)
13233 				(ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
13234 				HANG_KEY_DEL);
13235 		(*cnt)++;
13236 	}
13237 #ifdef DHD_EWPR_VER2
13238 	/* put 0 for HG03 ~ HG06 (reserved for future use) */
13239 	for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
13240 			i++, (*cnt)++) {
13241 		if (field_name) {
13242 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13243 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13244 				hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
13245 				HANG_KEY_DEL);
13246 		}
13247 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13248 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13249 			hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
13250 			HANG_KEY_DEL);
13251 	}
13252 #endif /* DHD_EWPR_VER2 */
13253 }
13254 #ifndef DHD_EWPR_VER2
13255 static void
13256 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
13257 		int *bytes_written, int *cnt, char *cookie)
13258 {
13259 	uint8 *ptr;
13260 	int remain_len;
13261 	int i;
13262 
13263 	ptr = (uint8 *)src;
13264 
13265 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13266 
13267 	for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
13268 			(hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
13269 			i++, (*cnt)++) {
13270 		if (field_name) {
13271 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13272 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
13273 					HANG_RAW_DEL, hang_info_trap_tbl[i].name);
13274 		}
13275 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13276 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13277 				HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
13278 	}
13279 }
13280 
13281 static void
13282 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13283 {
13284 	int remain_len;
13285 	int i = 0;
13286 	const uint32 *stack;
13287 	uint32 *ext_data = dhd->extended_trap_data;
13288 	hnd_ext_trap_hdr_t *hdr;
13289 	const bcm_tlv_t *tlv;
13290 	int remain_stack_cnt = 0;
13291 	uint32 dummy_data = 0;
13292 	int bigdata_key_stack_cnt = 0;
13293 
13294 	if (ext_data == NULL) {
13295 		return;
13296 	}
13297 	/* First word is original trap_data */
13298 	ext_data++;
13299 
13300 	/* Followed by the extended trap data header */
13301 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13302 
13303 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13304 
13305 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13306 
13307 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13308 	if (tlv) {
13309 		stack = (const uint32 *)tlv->data;
13310 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13311 				"%08x", *(uint32 *)(stack++));
13312 		(*cnt)++;
13313 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13314 			return;
13315 		}
13316 		for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
13317 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13318 			/* Raw data for bigdata use '_' and Key data for bigdata use space */
13319 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13320 				"%c%08x",
13321 				i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
13322 				*(uint32 *)(stack++));
13323 
13324 			(*cnt)++;
13325 			if ((*cnt >= HANG_FIELD_CNT_MAX) ||
13326 					(i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
13327 				return;
13328 			}
13329 		}
13330 	}
13331 
13332 	remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
13333 
13334 	for (i = 0; i < remain_stack_cnt; i++) {
13335 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13336 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13337 				HANG_RAW_DEL, dummy_data);
13338 		(*cnt)++;
13339 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13340 			return;
13341 		}
13342 	}
13343 	GCC_DIAGNOSTIC_POP();
13344 
13345 }
13346 
13347 static void
13348 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13349 {
13350 	int remain_len;
13351 	int i;
13352 	const uint32 *data;
13353 	uint32 *ext_data = dhd->extended_trap_data;
13354 	hnd_ext_trap_hdr_t *hdr;
13355 	const bcm_tlv_t *tlv;
13356 	int remain_trap_data = 0;
13357 	uint8 buf_u8[sizeof(uint32)] = { 0, };
13358 	const uint8 *p_u8;
13359 
13360 	if (ext_data == NULL) {
13361 		return;
13362 	}
13363 	/* First word is original trap_data */
13364 	ext_data++;
13365 
13366 	/* Followed by the extended trap data header */
13367 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13368 
13369 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13370 	if (tlv) {
13371 		/* header include tlv hader */
13372 		remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
13373 	}
13374 
13375 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13376 	if (tlv) {
13377 		/* header include tlv hader */
13378 		remain_trap_data -= (tlv->len + sizeof(uint16));
13379 	}
13380 
13381 	data = (const uint32 *)(hdr->data + (hdr->len  - remain_trap_data));
13382 
13383 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13384 
13385 	for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
13386 			i++, (*cnt)++) {
13387 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13388 		GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13389 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13390 				HANG_RAW_DEL, *(uint32 *)(data++));
13391 		GCC_DIAGNOSTIC_POP();
13392 	}
13393 
13394 	if (*cnt >= HANG_FIELD_CNT_MAX) {
13395 		return;
13396 	}
13397 
13398 	remain_trap_data -= (sizeof(uint32) * i);
13399 
13400 	if (remain_trap_data > sizeof(buf_u8)) {
13401 		DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
13402 		remain_trap_data =  sizeof(buf_u8);
13403 	}
13404 
13405 	if (remain_trap_data) {
13406 		p_u8 = (const uint8 *)data;
13407 		for (i = 0; i < remain_trap_data; i++) {
13408 			buf_u8[i] = *(const uint8 *)(p_u8++);
13409 		}
13410 
13411 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13412 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13413 				HANG_RAW_DEL, ltoh32_ua(buf_u8));
13414 		(*cnt)++;
13415 	}
13416 }
13417 #endif /* DHD_EWPR_VER2 */
13418 
13419 static void
13420 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
13421 {
13422 	uint32 i;
13423 	uint32 *ext_data = dhd->extended_trap_data;
13424 	hnd_ext_trap_hdr_t *hdr;
13425 	const bcm_tlv_t *tlv;
13426 
13427 	/* First word is original trap_data */
13428 	ext_data++;
13429 
13430 	/* Followed by the extended trap data header */
13431 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13432 
13433 	/* Dump a list of all tags found  before parsing data */
13434 	for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
13435 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
13436 		if (tlv) {
13437 			if (!TAG_TRAP_IS_STATE(i)) {
13438 				*subtype = i;
13439 				return;
13440 			}
13441 		}
13442 	}
13443 }
13444 #ifdef DHD_EWPR_VER2
13445 static void
13446 copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13447 {
13448 	int remain_len;
13449 	uint32 *ext_data = dhd->extended_trap_data;
13450 	hnd_ext_trap_hdr_t *hdr;
13451 	char *base64_out = NULL;
13452 	int base64_cnt;
13453 	int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
13454 
13455 	if (ext_data == NULL) {
13456 		return;
13457 	}
13458 	/* First word is original trap_data */
13459 	ext_data++;
13460 
13461 	/* Followed by the extended trap data header */
13462 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13463 
13464 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13465 
13466 	if (remain_len <= 0) {
13467 		DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
13468 		return;
13469 	}
13470 
13471 	if (remain_len < max_base64_len) {
13472 		DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
13473 			remain_len));
13474 		max_base64_len = remain_len;
13475 	}
13476 
13477 	base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
13478 	if (base64_out == NULL) {
13479 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
13480 			__FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
13481 		return;
13482 	}
13483 
13484 	if (hdr->len > 0) {
13485 		base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
13486 		if (base64_cnt == 0) {
13487 			DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
13488 		}
13489 	}
13490 
13491 	*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
13492 			base64_out);
13493 	(*cnt)++;
13494 	MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
13495 }
13496 #endif /* DHD_EWPR_VER2 */
13497 
13498 void
13499 copy_hang_info_trap(dhd_pub_t *dhd)
13500 {
13501 	trap_t tr;
13502 	int bytes_written;
13503 	int trap_subtype = 0;
13504 
13505 	if (!dhd || !dhd->hang_info) {
13506 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13507 			dhd, (dhd ? dhd->hang_info : NULL)));
13508 		return;
13509 	}
13510 
13511 	if (!dhd->dongle_trap_occured) {
13512 		DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
13513 		return;
13514 	}
13515 
13516 	memset(&tr, 0x00, sizeof(struct _trap_struct));
13517 
13518 	copy_ext_trap_sig(dhd, &tr);
13519 	get_hang_info_trap_subtype(dhd, &trap_subtype);
13520 
13521 	hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
13522 	hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
13523 
13524 	bytes_written = 0;
13525 	dhd->hang_info_cnt = 0;
13526 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13527 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13528 
13529 	copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13530 			&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13531 
13532 	DHD_INFO(("hang info head cnt: %d len: %d data: %s\n",
13533 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13534 
13535 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13536 
13537 #ifdef DHD_EWPR_VER2
13538 	/* stack info & trap info are included in etd data */
13539 
13540 	/* extended trap data dump */
13541 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13542 		copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13543 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13544 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13545 	}
13546 #else
13547 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13548 		copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13549 		DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
13550 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13551 	}
13552 
13553 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13554 		copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13555 				&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13556 		DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
13557 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13558 	}
13559 
13560 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13561 		copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13562 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13563 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13564 	}
13565 #endif /* DHD_EWPR_VER2 */
13566 }
13567 
13568 void
13569 copy_hang_info_linkdown(dhd_pub_t *dhd)
13570 {
13571 	int bytes_written = 0;
13572 	int remain_len;
13573 
13574 	if (!dhd || !dhd->hang_info) {
13575 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13576 			dhd, (dhd ? dhd->hang_info : NULL)));
13577 		return;
13578 	}
13579 
13580 	if (!dhd->bus->is_linkdown) {
13581 		DHD_ERROR(("%s: link down is not happened\n", __FUNCTION__));
13582 		return;
13583 	}
13584 
13585 	dhd->hang_info_cnt = 0;
13586 
13587 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13588 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13589 
13590 	/* hang reason code (0x8808) */
13591 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13592 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13593 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13594 				HANG_REASON_PCIE_LINK_DOWN_EP_DETECT, HANG_KEY_DEL);
13595 		dhd->hang_info_cnt++;
13596 	}
13597 
13598 	/* EWP version */
13599 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13600 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13601 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13602 				VENDOR_SEND_HANG_EXT_INFO_VER, HANG_KEY_DEL);
13603 		dhd->hang_info_cnt++;
13604 	}
13605 
13606 	/* cookie - dump time stamp */
13607 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13608 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13609 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c",
13610 				dhd->debug_dump_time_hang_str, HANG_KEY_DEL);
13611 		dhd->hang_info_cnt++;
13612 	}
13613 
13614 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13615 
13616 	/* dump PCIE RC registers */
13617 	dhd_dump_pcie_rc_regs_for_linkdown(dhd, &bytes_written);
13618 
13619 	DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
13620 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13621 
13622 }
13623 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13624 
13625 int
13626 dhd_prot_debug_info_print(dhd_pub_t *dhd)
13627 {
13628 	dhd_prot_t *prot = dhd->prot;
13629 	msgbuf_ring_t *ring;
13630 	uint16 rd, wr, drd, dwr;
13631 	uint32 dma_buf_len;
13632 	uint64 current_time;
13633 	ulong ring_tcm_rd_addr; /* dongle address */
13634 	ulong ring_tcm_wr_addr; /* dongle address */
13635 
13636 	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
13637 	DHD_ERROR(("DHD: %s\n", dhd_version));
13638 	DHD_ERROR(("Firmware: %s\n", fw_version));
13639 
13640 #ifdef DHD_FW_COREDUMP
13641 	DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
13642 	DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
13643 #endif /* DHD_FW_COREDUMP */
13644 
13645 	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
13646 	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
13647 		prot->device_ipc_version,
13648 		prot->host_ipc_version,
13649 		prot->active_ipc_version));
13650 	DHD_ERROR(("d2h_intr_method -> %s d2h_intr_control -> %s\n",
13651 			dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX",
13652 			dhd->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK"));
13653 	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
13654 		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
13655 	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
13656 		prot->max_infobufpost, prot->infobufpost));
13657 	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
13658 		prot->max_eventbufpost, prot->cur_event_bufs_posted));
13659 	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
13660 		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
13661 	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
13662 		prot->max_rxbufpost, prot->rxbufpost));
13663 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13664 		h2d_max_txpost, prot->h2d_max_txpost));
13665 #if defined(DHD_HTPUT_TUNABLES)
13666 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13667 		h2d_htput_max_txpost, prot->h2d_htput_max_txpost));
13668 #endif /* DHD_HTPUT_TUNABLES */
13669 
13670 	current_time = OSL_LOCALTIME_NS();
13671 	DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
13672 	DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
13673 		" ioctl_ack_time="SEC_USEC_FMT
13674 		" ioctl_cmplt_time="SEC_USEC_FMT"\n",
13675 		GET_SEC_USEC(prot->ioctl_fillup_time),
13676 		GET_SEC_USEC(prot->ioctl_ack_time),
13677 		GET_SEC_USEC(prot->ioctl_cmplt_time)));
13678 
13679 	/* Check PCIe INT registers */
13680 	if (!dhd_pcie_dump_int_regs(dhd)) {
13681 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
13682 		dhd->bus->is_linkdown = TRUE;
13683 	}
13684 
13685 	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
13686 
13687 	ring = &prot->h2dring_ctrl_subn;
13688 	dma_buf_len = ring->max_items * ring->item_len;
13689 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13690 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13691 	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13692 		"SIZE %d \r\n",
13693 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13694 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13695 	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13696 	if (dhd->dma_d2h_ring_upd_support) {
13697 		drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13698 		dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13699 		DHD_ERROR(("CtrlPost: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13700 	}
13701 	if (dhd->bus->is_linkdown) {
13702 		DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
13703 			" due to PCIe link down\r\n"));
13704 	} else {
13705 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13706 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13707 		DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13708 	}
13709 	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13710 
13711 	ring = &prot->d2hring_ctrl_cpln;
13712 	dma_buf_len = ring->max_items * ring->item_len;
13713 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13714 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13715 	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13716 		"SIZE %d \r\n",
13717 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13718 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13719 	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13720 	if (dhd->dma_d2h_ring_upd_support) {
13721 		drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13722 		dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13723 		DHD_ERROR(("CtrlCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13724 	}
13725 	if (dhd->bus->is_linkdown) {
13726 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
13727 			" due to PCIe link down\r\n"));
13728 	} else {
13729 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13730 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13731 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13732 	}
13733 	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13734 
13735 	ring = prot->h2dring_info_subn;
13736 	if (ring) {
13737 		dma_buf_len = ring->max_items * ring->item_len;
13738 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13739 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13740 		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13741 			"SIZE %d \r\n",
13742 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13743 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13744 			dma_buf_len));
13745 		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13746 		if (dhd->dma_d2h_ring_upd_support) {
13747 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13748 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13749 			DHD_ERROR(("InfoSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13750 		}
13751 		if (dhd->bus->is_linkdown) {
13752 			DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
13753 				" due to PCIe link down\r\n"));
13754 		} else {
13755 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13756 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13757 			DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13758 		}
13759 		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13760 	}
13761 	ring = prot->d2hring_info_cpln;
13762 	if (ring) {
13763 		dma_buf_len = ring->max_items * ring->item_len;
13764 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13765 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13766 		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13767 			"SIZE %d \r\n",
13768 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13769 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13770 			dma_buf_len));
13771 		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13772 		if (dhd->dma_d2h_ring_upd_support) {
13773 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13774 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13775 			DHD_ERROR(("InfoCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13776 		}
13777 		if (dhd->bus->is_linkdown) {
13778 			DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
13779 				" due to PCIe link down\r\n"));
13780 		} else {
13781 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13782 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13783 			DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13784 		}
13785 		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13786 	}
13787 #ifdef EWP_EDL
13788 	ring = prot->d2hring_edl;
13789 	if (ring) {
13790 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13791 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13792 		dma_buf_len = ring->max_items * ring->item_len;
13793 		DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13794 			"SIZE %d \r\n",
13795 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13796 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13797 			dma_buf_len));
13798 		DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13799 		if (dhd->dma_d2h_ring_upd_support) {
13800 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13801 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13802 			DHD_ERROR(("EdlRing: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13803 		}
13804 		if (dhd->bus->is_linkdown) {
13805 			DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
13806 				" due to PCIe link down\r\n"));
13807 		} else {
13808 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13809 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13810 			DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13811 		}
13812 		DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
13813 			ring->seqnum % D2H_EPOCH_MODULO));
13814 	}
13815 #endif /* EWP_EDL */
13816 
13817 	ring = &prot->d2hring_tx_cpln;
13818 	if (ring) {
13819 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13820 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13821 		dma_buf_len = ring->max_items * ring->item_len;
13822 		DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13823 			"SIZE %d \r\n",
13824 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13825 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13826 			dma_buf_len));
13827 		DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13828 		if (dhd->dma_d2h_ring_upd_support) {
13829 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13830 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13831 			DHD_ERROR(("TxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13832 		}
13833 		if (dhd->bus->is_linkdown) {
13834 			DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
13835 				" due to PCIe link down\r\n"));
13836 		} else {
13837 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13838 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13839 			DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13840 		}
13841 		DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13842 	}
13843 
13844 	ring = &prot->d2hring_rx_cpln;
13845 	if (ring) {
13846 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13847 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13848 		dma_buf_len = ring->max_items * ring->item_len;
13849 		DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13850 			"SIZE %d \r\n",
13851 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13852 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13853 			dma_buf_len));
13854 		DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13855 		if (dhd->dma_d2h_ring_upd_support) {
13856 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13857 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13858 			DHD_ERROR(("RxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13859 		}
13860 		if (dhd->bus->is_linkdown) {
13861 			DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
13862 				" due to PCIe link down\r\n"));
13863 		} else {
13864 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13865 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13866 			DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13867 		}
13868 		DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13869 	}
13870 
13871 	ring = &prot->h2dring_rxp_subn;
13872 	if (ring) {
13873 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13874 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13875 		dma_buf_len = ring->max_items * ring->item_len;
13876 		DHD_ERROR(("RxSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13877 			"SIZE %d \r\n",
13878 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13879 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13880 			dma_buf_len));
13881 		DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13882 		if (dhd->dma_d2h_ring_upd_support) {
13883 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13884 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13885 			DHD_ERROR(("RxSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13886 		}
13887 		if (dhd->bus->is_linkdown) {
13888 			DHD_ERROR(("RxSub: From Shared Mem: RD and WR are invalid"
13889 				" due to PCIe link down\r\n"));
13890 		} else {
13891 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13892 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13893 			DHD_ERROR(("RxSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13894 		}
13895 		DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13896 	}
13897 
13898 	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
13899 		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
13900 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
13901 	DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
13902 		__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
13903 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
13904 
13905 	DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
13906 	DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
13907 	DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
13908 	dhd_pcie_debug_info_dump(dhd);
13909 #ifdef DHD_LB_STATS
13910 	DHD_ERROR(("\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
13911 		dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt));
13912 	DHD_ERROR(("\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
13913 		dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt));
13914 #endif /* DHD_LB_STATS */
13915 #ifdef DHD_TIMESYNC
13916 	dhd_timesync_debug_info_print(dhd);
13917 #endif /* DHD_TIMESYNC */
13918 	return 0;
13919 }
13920 
13921 int
13922 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
13923 {
13924 	uint32 *ptr;
13925 	uint32 value;
13926 
13927 	if (dhd->prot->d2h_dma_indx_wr_buf.va) {
13928 		uint32 i;
13929 		uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
13930 
13931 		OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
13932 			dhd->prot->d2h_dma_indx_wr_buf.len);
13933 
13934 		ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
13935 
13936 		bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
13937 
13938 		bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%4p\n", ptr);
13939 		value = ltoh32(*ptr);
13940 		bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
13941 		ptr++;
13942 		value = ltoh32(*ptr);
13943 		bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
13944 
13945 		ptr++;
13946 		bcm_bprintf(b, "RPTR block Flow rings , 0x%4p\n", ptr);
13947 		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
13948 			value = ltoh32(*ptr);
13949 			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
13950 			ptr++;
13951 		}
13952 	}
13953 
13954 	if (dhd->prot->h2d_dma_indx_rd_buf.va) {
13955 		OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
13956 			dhd->prot->h2d_dma_indx_rd_buf.len);
13957 
13958 		ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
13959 
13960 		bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%4p\n", ptr);
13961 		value = ltoh32(*ptr);
13962 		bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
13963 		ptr++;
13964 		value = ltoh32(*ptr);
13965 		bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
13966 		ptr++;
13967 		value = ltoh32(*ptr);
13968 		bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
13969 	}
13970 
13971 	return 0;
13972 }
13973 
13974 uint32
13975 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
13976 {
13977 	dhd_prot_t *prot = dhd->prot;
13978 #if DHD_DBG_SHOW_METADATA
13979 	prot->metadata_dbg = val;
13980 #endif
13981 	return (uint32)prot->metadata_dbg;
13982 }
13983 
13984 uint32
13985 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
13986 {
13987 	dhd_prot_t *prot = dhd->prot;
13988 	return (uint32)prot->metadata_dbg;
13989 }
13990 
13991 uint32
13992 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
13993 {
13994 #if !(defined(BCM_ROUTER_DHD))
13995 	dhd_prot_t *prot = dhd->prot;
13996 	if (rx)
13997 		prot->rx_metadata_offset = (uint16)val;
13998 	else
13999 		prot->tx_metadata_offset = (uint16)val;
14000 #endif /* ! BCM_ROUTER_DHD */
14001 	return dhd_prot_metadatalen_get(dhd, rx);
14002 }
14003 
14004 uint32
14005 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
14006 {
14007 	dhd_prot_t *prot = dhd->prot;
14008 	if (rx)
14009 		return prot->rx_metadata_offset;
14010 	else
14011 		return prot->tx_metadata_offset;
14012 }
14013 
14014 /** optimization to write "n" tx items at a time to ring */
14015 uint32
14016 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
14017 {
14018 	dhd_prot_t *prot = dhd->prot;
14019 	if (set)
14020 		prot->txp_threshold = (uint16)val;
14021 	val = prot->txp_threshold;
14022 	return val;
14023 }
14024 
14025 #ifdef DHD_RX_CHAINING
14026 
14027 static INLINE void
14028 BCMFASTPATH(dhd_rxchain_reset)(rxchain_info_t *rxchain)
14029 {
14030 	rxchain->pkt_count = 0;
14031 }
14032 
14033 static void
14034 BCMFASTPATH(dhd_rxchain_frame)(dhd_pub_t *dhd, void *pkt, uint ifidx)
14035 {
14036 	uint8 *eh;
14037 	uint8 prio;
14038 	dhd_prot_t *prot = dhd->prot;
14039 	rxchain_info_t *rxchain = &prot->rxchain;
14040 
14041 	ASSERT(!PKTISCHAINED(pkt));
14042 	ASSERT(PKTCLINK(pkt) == NULL);
14043 	ASSERT(PKTCGETATTR(pkt) == 0);
14044 
14045 	eh = PKTDATA(dhd->osh, pkt);
14046 	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
14047 
14048 	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
14049 		rxchain->h_da, rxchain->h_prio))) {
14050 		/* Different flow - First release the existing chain */
14051 		dhd_rxchain_commit(dhd);
14052 	}
14053 
14054 	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
14055 	/* so that the chain can be handed off to CTF bridge as is. */
14056 	if (rxchain->pkt_count == 0) {
14057 		/* First packet in chain */
14058 		rxchain->pkthead = rxchain->pkttail = pkt;
14059 
14060 		/* Keep a copy of ptr to ether_da, ether_sa and prio */
14061 		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
14062 		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
14063 		rxchain->h_prio = prio;
14064 		rxchain->ifidx = ifidx;
14065 		rxchain->pkt_count++;
14066 	} else {
14067 		/* Same flow - keep chaining */
14068 		PKTSETCLINK(rxchain->pkttail, pkt);
14069 		rxchain->pkttail = pkt;
14070 		rxchain->pkt_count++;
14071 	}
14072 
14073 	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
14074 		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
14075 		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
14076 		PKTSETCHAINED(dhd->osh, pkt);
14077 		PKTCINCRCNT(rxchain->pkthead);
14078 		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
14079 	} else {
14080 		dhd_rxchain_commit(dhd);
14081 		return;
14082 	}
14083 
14084 	/* If we have hit the max chain length, dispatch the chain and reset */
14085 	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
14086 		dhd_rxchain_commit(dhd);
14087 	}
14088 }
14089 
14090 static void
14091 BCMFASTPATH(dhd_rxchain_commit)(dhd_pub_t *dhd)
14092 {
14093 	dhd_prot_t *prot = dhd->prot;
14094 	rxchain_info_t *rxchain = &prot->rxchain;
14095 
14096 	if (rxchain->pkt_count == 0)
14097 		return;
14098 
14099 	/* Release the packets to dhd_linux */
14100 	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
14101 
14102 	/* Reset the chain */
14103 	dhd_rxchain_reset(rxchain);
14104 }
14105 
14106 #endif /* DHD_RX_CHAINING */
14107 
14108 #ifdef IDLE_TX_FLOW_MGMT
14109 int
14110 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
14111 {
14112 	tx_idle_flowring_resume_request_t *flow_resume_rqst;
14113 	msgbuf_ring_t *flow_ring;
14114 	dhd_prot_t *prot = dhd->prot;
14115 	unsigned long flags;
14116 	uint16 alloced = 0;
14117 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14118 
14119 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
14120 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
14121 	if (flow_ring == NULL) {
14122 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
14123 			__FUNCTION__, flow_ring_node->flowid));
14124 		return BCME_NOMEM;
14125 	}
14126 
14127 #ifdef PCIE_INB_DW
14128 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14129 		return BCME_ERROR;
14130 #endif /* PCIE_INB_DW */
14131 
14132 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14133 
14134 	/* Request for ctrl_ring buffer space */
14135 	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
14136 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
14137 
14138 	if (flow_resume_rqst == NULL) {
14139 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
14140 		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
14141 			__FUNCTION__, flow_ring_node->flowid));
14142 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14143 #ifdef PCIE_INB_DW
14144 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14145 #endif
14146 		return BCME_NOMEM;
14147 	}
14148 
14149 	flow_ring_node->prot_info = (void *)flow_ring;
14150 
14151 	/* Common msg buf hdr */
14152 	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
14153 	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
14154 	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
14155 
14156 	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14157 	ctrl_ring->seqnum++;
14158 
14159 	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
14160 	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
14161 		__FUNCTION__, flow_ring_node->flowid));
14162 
14163 	/* Update the flow_ring's WRITE index */
14164 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
14165 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14166 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
14167 	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
14168 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14169 			H2D_IFRM_INDX_WR_UPD,
14170 			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
14171 	} else {
14172 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
14173 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
14174 	}
14175 
14176 	/* update control subn ring's WR index and ring doorbell to dongle */
14177 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
14178 
14179 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14180 
14181 #ifdef PCIE_INB_DW
14182 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14183 #endif
14184 	return BCME_OK;
14185 } /* dhd_prot_flow_ring_create */
14186 
14187 int
14188 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
14189 {
14190 	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
14191 	dhd_prot_t *prot = dhd->prot;
14192 	unsigned long flags;
14193 	uint16 index;
14194 	uint16 alloced = 0;
14195 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
14196 
14197 #ifdef PCIE_INB_DW
14198 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14199 		return BCME_ERROR;
14200 #endif /* PCIE_INB_DW */
14201 
14202 	DHD_RING_LOCK(ring->ring_lock, flags);
14203 
14204 	/* Request for ring buffer space */
14205 	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
14206 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
14207 
14208 	if (flow_suspend_rqst == NULL) {
14209 		DHD_RING_UNLOCK(ring->ring_lock, flags);
14210 		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
14211 #ifdef PCIE_INB_DW
14212 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14213 #endif
14214 		return BCME_NOMEM;
14215 	}
14216 
14217 	/* Common msg buf hdr */
14218 	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
14219 	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
14220 	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
14221 
14222 	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
14223 	ring->seqnum++;
14224 
14225 	/* Update flow id  info */
14226 	for (index = 0; index < count; index++)
14227 	{
14228 		flow_suspend_rqst->ring_id[index] = ringid[index];
14229 	}
14230 	flow_suspend_rqst->num = count;
14231 
14232 	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
14233 
14234 	/* update ring's WR index and ring doorbell to dongle */
14235 	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
14236 
14237 	DHD_RING_UNLOCK(ring->ring_lock, flags);
14238 
14239 #ifdef PCIE_INB_DW
14240 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14241 #endif
14242 
14243 	return BCME_OK;
14244 }
14245 #endif /* IDLE_TX_FLOW_MGMT */
14246 
14247 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
14248 static void
14249 dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len)
14250 {
14251 	struct dhd_prot *prot = dhd->prot;
14252 	uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE;
14253 
14254 	prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd;
14255 	prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id;
14256 	if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf)
14257 		memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf,
14258 			len > MAX_IOCTL_BUF_SIZE ? MAX_IOCTL_BUF_SIZE : len);
14259 	else
14260 		memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE);
14261 	prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US();
14262 	prot->ioctl_trace_count ++;
14263 }
14264 
14265 static void
14266 dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf)
14267 {
14268 	int dumpsz;
14269 	int i;
14270 
14271 	dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ?
14272 		prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE;
14273 	if (dumpsz == 0) {
14274 		bcm_bprintf(strbuf, "\nEmpty IOCTL TRACE\n");
14275 		return;
14276 	}
14277 	bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n");
14278 	bcm_bprintf(strbuf, "Timestamp us\t\tCMD\tTransID\tIOVAR\n");
14279 	for (i = 0; i < dumpsz; i ++) {
14280 		bcm_bprintf(strbuf, "%llu\t%d\t%d\t%s\n",
14281 			prot->ioctl_trace[i].timestamp,
14282 			prot->ioctl_trace[i].cmd,
14283 			prot->ioctl_trace[i].transid,
14284 			prot->ioctl_trace[i].ioctl_buf);
14285 	}
14286 }
14287 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
14288 
14289 static void dump_psmwd_v1(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14290 {
14291 	const hnd_ext_trap_psmwd_v1_t* psmwd = NULL;
14292 	uint32 i;
14293 	psmwd = (const hnd_ext_trap_psmwd_v1_t *)tlv;
14294 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1; i++) {
14295 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14296 	}
14297 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14298 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14299 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14300 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14301 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14302 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14303 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14304 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14305 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14306 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14307 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14308 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14309 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14310 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14311 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14312 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14313 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14314 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14315 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14316 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14317 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14318 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14319 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14320 
14321 }
14322 
14323 static void dump_psmwd_v2(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14324 {
14325 	const hnd_ext_trap_psmwd_t* psmwd = NULL;
14326 	uint32 i;
14327 	psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
14328 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2; i++) {
14329 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14330 	}
14331 
14332 	bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8);
14333 	bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba);
14334 	bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc);
14335 	bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be);
14336 	bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da);
14337 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14338 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14339 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14340 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14341 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14342 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14343 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14344 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14345 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14346 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14347 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14348 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14349 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14350 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14351 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14352 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14353 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14354 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14355 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14356 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14357 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14358 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14359 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14360 }
14361 
14362 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
14363 {
14364 	switch (tag) {
14365 	case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
14366 	case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
14367 	case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
14368 	case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
14369 	case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
14370 	case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
14371 	case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
14372 	case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
14373 	case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
14374 	case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
14375 	case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
14376 	case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
14377 	case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
14378 	case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
14379 	case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
14380 	case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
14381 	case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
14382 	case TAG_TRAP_MEM_BIT_FLIP: return "TAG_TRAP_MEM_BIT_FLIP";
14383 	case TAG_TRAP_LAST:
14384 	default:
14385 		return "Unknown";
14386 	}
14387 	return "Unknown";
14388 }
14389 
14390 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
14391 {
14392 	uint32 i;
14393 	uint32 *ext_data;
14394 	hnd_ext_trap_hdr_t *hdr;
14395 	const bcm_tlv_t *tlv;
14396 	const trap_t *tr;
14397 	const uint32 *stack;
14398 	const hnd_ext_trap_bp_err_t *bpe;
14399 	uint32 raw_len;
14400 
14401 	ext_data = dhdp->extended_trap_data;
14402 
14403 	/* return if there is no extended trap data */
14404 	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
14405 		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
14406 		return BCME_OK;
14407 	}
14408 
14409 	bcm_bprintf(b, "Extended trap data\n");
14410 
14411 	/* First word is original trap_data */
14412 	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
14413 	ext_data++;
14414 
14415 	/* Followed by the extended trap data header */
14416 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
14417 	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
14418 
14419 	/* Dump a list of all tags found  before parsing data */
14420 	bcm_bprintf(b, "\nTags Found:\n");
14421 	for (i = 0; i < TAG_TRAP_LAST; i++) {
14422 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
14423 		if (tlv)
14424 			bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
14425 	}
14426 
14427 	/* XXX debug dump */
14428 	if (raw) {
14429 		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
14430 		for (i = 0; i < raw_len; i++)
14431 		{
14432 			bcm_bprintf(b, "0x%08x ", ext_data[i]);
14433 			if (i % 4 == 3)
14434 				bcm_bprintf(b, "\n");
14435 		}
14436 		return BCME_OK;
14437 	}
14438 
14439 	/* Extract the various supported TLVs from the extended trap data */
14440 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
14441 	if (tlv) {
14442 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
14443 		bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
14444 	}
14445 
14446 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
14447 	if (tlv) {
14448 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
14449 		tr = (const trap_t *)tlv->data;
14450 
14451 		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
14452 		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
14453 		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
14454 		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
14455 		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
14456 		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
14457 	}
14458 
14459 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
14460 	if (tlv) {
14461 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
14462 		stack = (const uint32 *)tlv->data;
14463 		for (i = 0; i < (uint32)(tlv->len / 4); i++)
14464 		{
14465 			bcm_bprintf(b, "  0x%08x\n", *stack);
14466 			stack++;
14467 		}
14468 	}
14469 
14470 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
14471 	if (tlv) {
14472 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
14473 		bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
14474 		bcm_bprintf(b, " error: %x\n", bpe->error);
14475 		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
14476 		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
14477 		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
14478 		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
14479 		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
14480 		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
14481 		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
14482 		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
14483 		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
14484 		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
14485 		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
14486 		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
14487 		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
14488 		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
14489 	}
14490 
14491 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
14492 	if (tlv) {
14493 		const hnd_ext_trap_heap_err_t* hme;
14494 
14495 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
14496 		hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
14497 		bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
14498 		bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
14499 		bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
14500 		bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
14501 		bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
14502 
14503 		bcm_bprintf(b, " Histogram:\n");
14504 		for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
14505 			if (hme->heap_histogm[i] == 0xfffe)
14506 				bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
14507 			else if (hme->heap_histogm[i] == 0xffff)
14508 				bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
14509 			else
14510 				bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
14511 					hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
14512 					* hme->heap_histogm[i + 1]);
14513 		}
14514 
14515 		bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
14516 		for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
14517 			bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
14518 		}
14519 	}
14520 
14521 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
14522 	if (tlv) {
14523 		const hnd_ext_trap_pcie_mem_err_t* pqme;
14524 
14525 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
14526 		pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
14527 		bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
14528 		bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
14529 	}
14530 
14531 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
14532 	if (tlv) {
14533 		const hnd_ext_trap_wlc_mem_err_t* wsme;
14534 
14535 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
14536 		wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
14537 		bcm_bprintf(b, " instance: %d\n", wsme->instance);
14538 		bcm_bprintf(b, " associated: %d\n", wsme->associated);
14539 		bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14540 		bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14541 		bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14542 		bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14543 		bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14544 		bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14545 
14546 		if (tlv->len >= (sizeof(*wsme) * 2)) {
14547 			wsme++;
14548 			bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
14549 			bcm_bprintf(b, " associated: %d\n", wsme->associated);
14550 			bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14551 			bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14552 			bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14553 			bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14554 			bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14555 			bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14556 		}
14557 	}
14558 
14559 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
14560 	if (tlv) {
14561 		const hnd_ext_trap_phydbg_t* phydbg;
14562 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
14563 		phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
14564 		bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
14565 		bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
14566 		bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
14567 		bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
14568 		bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
14569 		bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
14570 		bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
14571 		bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
14572 		bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
14573 		bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
14574 		bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
14575 		bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
14576 		bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
14577 		bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
14578 		bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
14579 		bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
14580 		bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
14581 		bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
14582 		bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
14583 		bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
14584 		bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
14585 		bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
14586 		bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
14587 		bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
14588 		bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
14589 		bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
14590 		bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
14591 		for (i = 0; i < 3; i++)
14592 			bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
14593 	}
14594 
14595 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
14596 	if (tlv) {
14597 		const hnd_ext_trap_psmwd_t* psmwd;
14598 
14599 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
14600 		psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data;
14601 		bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
14602 		bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
14603 		bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
14604 		bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
14605 		bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
14606 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
14607 		if (psmwd->version == 1) {
14608 			dump_psmwd_v1(tlv, b);
14609 		}
14610 		if (psmwd->version == 2) {
14611 			dump_psmwd_v2(tlv, b);
14612 		}
14613 	}
14614 /* PHY TxErr MacDump */
14615 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH);
14616 	if (tlv) {
14617 		const hnd_ext_trap_macphytxerr_t* phytxerr = NULL;
14618 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len);
14619 		phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data;
14620 		bcm_bprintf(b, " version: 0x%x\n", phytxerr->version);
14621 		bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason);
14622 		bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E);
14623 		bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640);
14624 		bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642);
14625 		bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846);
14626 		bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848);
14627 		bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a);
14628 		bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a);
14629 		bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c);
14630 		bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856);
14631 		bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858);
14632 		bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490);
14633 		bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8);
14634 		bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason);
14635 		bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0);
14636 		bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1);
14637 		bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2);
14638 		bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0);
14639 		bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1);
14640 		bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0);
14641 		bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1);
14642 		bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2);
14643 		bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0);
14644 		bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1);
14645 		bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst);
14646 		bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm);
14647 		bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel);
14648 		bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos);
14649 		bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf);
14650 		bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval);
14651 		bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29);
14652 		bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a);
14653 	}
14654 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
14655 	if (tlv) {
14656 		const hnd_ext_trap_macsusp_t* macsusp;
14657 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
14658 		macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data;
14659 		bcm_bprintf(b, " version: %d\n", macsusp->version);
14660 		bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
14661 		bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
14662 		bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
14663 		bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
14664 		for (i = 0; i < 4; i++)
14665 			bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
14666 		for (i = 0; i < 8; i++)
14667 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
14668 		bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
14669 		bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
14670 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
14671 		bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
14672 		bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
14673 		bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
14674 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
14675 		bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
14676 		bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
14677 		bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
14678 		bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
14679 		bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
14680 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
14681 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
14682 	}
14683 
14684 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
14685 	if (tlv) {
14686 		const hnd_ext_trap_macenab_t* macwake;
14687 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
14688 		macwake = (const hnd_ext_trap_macenab_t *)tlv->data;
14689 		bcm_bprintf(b, " version: 0x%x\n", macwake->version);
14690 		bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
14691 		bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
14692 		bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
14693 		bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
14694 		for (i = 0; i < 8; i++)
14695 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
14696 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
14697 		bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
14698 		bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
14699 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
14700 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
14701 		bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
14702 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
14703 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
14704 		bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
14705 		bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
14706 		bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
14707 		bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
14708 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
14709 	}
14710 
14711 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
14712 	if (tlv) {
14713 		const bcm_dngl_pcie_hc_t* hc;
14714 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
14715 		hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
14716 		bcm_bprintf(b, " version: 0x%x\n", hc->version);
14717 		bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
14718 		bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
14719 		bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
14720 		bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
14721 		for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
14722 			bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
14723 	}
14724 
14725 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
14726 	if (tlv) {
14727 		const pcie_hmapviolation_t* hmap;
14728 		hmap = (const pcie_hmapviolation_t *)tlv->data;
14729 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
14730 		bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
14731 		bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
14732 		bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
14733 	}
14734 
14735 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP);
14736 	if (tlv) {
14737 		const hnd_ext_trap_fb_mem_err_t* fbit;
14738 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len);
14739 		fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data;
14740 		bcm_bprintf(b, " version: %d\n", fbit->version);
14741 		bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time);
14742 	}
14743 
14744 	return BCME_OK;
14745 }
14746 
14747 #ifdef BCMPCIE
14748 int
14749 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
14750 	uint16 seqnum, uint16 xt_id)
14751 {
14752 	dhd_prot_t *prot = dhdp->prot;
14753 	host_timestamp_msg_t *ts_req;
14754 	unsigned long flags;
14755 	uint16 alloced = 0;
14756 	uchar *ts_tlv_buf;
14757 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14758 
14759 	if ((tlvs == NULL) || (tlv_len == 0)) {
14760 		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
14761 			__FUNCTION__, tlvs, tlv_len));
14762 		return -1;
14763 	}
14764 
14765 #ifdef PCIE_INB_DW
14766 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14767 		return BCME_ERROR;
14768 #endif /* PCIE_INB_DW */
14769 
14770 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14771 
14772 	/* if Host TS req already pending go away */
14773 	if (prot->hostts_req_buf_inuse == TRUE) {
14774 		DHD_ERROR(("one host TS request already pending at device\n"));
14775 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14776 #ifdef PCIE_INB_DW
14777 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14778 #endif
14779 		return -1;
14780 	}
14781 
14782 	/* Request for cbuf space */
14783 	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
14784 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
14785 	if (ts_req == NULL) {
14786 		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
14787 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14788 #ifdef PCIE_INB_DW
14789 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14790 #endif
14791 		return -1;
14792 	}
14793 
14794 	/* Common msg buf hdr */
14795 	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
14796 	ts_req->msg.if_id = 0;
14797 	ts_req->msg.flags =  ctrl_ring->current_phase;
14798 	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
14799 
14800 	ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14801 	ctrl_ring->seqnum++;
14802 
14803 	ts_req->xt_id = xt_id;
14804 	ts_req->seqnum = seqnum;
14805 	/* populate TS req buffer info */
14806 	ts_req->input_data_len = htol16(tlv_len);
14807 	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
14808 	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
14809 	/* copy ioct payload */
14810 	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
14811 	prot->hostts_req_buf_inuse = TRUE;
14812 	memcpy(ts_tlv_buf, tlvs, tlv_len);
14813 
14814 	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
14815 
14816 	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
14817 		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
14818 	}
14819 
14820 	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
14821 		ts_req->msg.request_id, ts_req->input_data_len,
14822 		ts_req->xt_id, ts_req->seqnum));
14823 
14824 	/* upd wrt ptr and raise interrupt */
14825 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
14826 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
14827 
14828 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14829 
14830 #ifdef PCIE_INB_DW
14831 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14832 #endif
14833 	return 0;
14834 } /* dhd_prot_send_host_timestamp */
14835 
14836 bool
14837 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14838 {
14839 	if (set)
14840 		dhd->prot->tx_ts_log_enabled = enable;
14841 
14842 	return dhd->prot->tx_ts_log_enabled;
14843 }
14844 
14845 bool
14846 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14847 {
14848 	if (set)
14849 		dhd->prot->rx_ts_log_enabled = enable;
14850 
14851 	return dhd->prot->rx_ts_log_enabled;
14852 }
14853 
14854 bool
14855 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
14856 {
14857 	if (set)
14858 		dhd->prot->no_retry = enable;
14859 
14860 	return dhd->prot->no_retry;
14861 }
14862 
14863 bool
14864 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
14865 {
14866 	if (set)
14867 		dhd->prot->no_aggr = enable;
14868 
14869 	return dhd->prot->no_aggr;
14870 }
14871 
14872 bool
14873 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
14874 {
14875 	if (set)
14876 		dhd->prot->fixed_rate = enable;
14877 
14878 	return dhd->prot->fixed_rate;
14879 }
14880 #endif /* BCMPCIE */
14881 
14882 void
14883 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
14884 {
14885 	dhd_prot_t *prot = dhd->prot;
14886 
14887 	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
14888 	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
14889 }
14890 
14891 void
14892 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
14893 {
14894 	if (dhd->prot->max_tsbufpost > 0)
14895 		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14896 }
14897 
14898 static void
14899 BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
14900 {
14901 #ifdef DHD_TIMESYNC
14902 	fw_timestamp_event_msg_t *resp;
14903 	uint32 pktid;
14904 	uint16 buflen, seqnum;
14905 	void * pkt;
14906 
14907 	resp = (fw_timestamp_event_msg_t *)buf;
14908 	pktid = ltoh32(resp->msg.request_id);
14909 	buflen = ltoh16(resp->buf_len);
14910 	seqnum = ltoh16(resp->seqnum);
14911 
14912 #if defined(DHD_PKTID_AUDIT_RING)
14913 	DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
14914 		DHD_DUPLICATE_FREE);
14915 #endif /* DHD_PKTID_AUDIT_RING */
14916 
14917 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
14918 		pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
14919 
14920 	if (!dhd->prot->cur_ts_bufs_posted) {
14921 		DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
14922 		return;
14923 	}
14924 
14925 	dhd->prot->cur_ts_bufs_posted--;
14926 
14927 	if (!dhd_timesync_delay_post_bufs(dhd)) {
14928 		if (dhd->prot->max_tsbufpost > 0) {
14929 			dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14930 		}
14931 	}
14932 
14933 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
14934 
14935 	if (!pkt) {
14936 		DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
14937 		return;
14938 	}
14939 
14940 	PKTSETLEN(dhd->osh, pkt, buflen);
14941 	dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
14942 #ifdef DHD_USE_STATIC_CTRLBUF
14943 	PKTFREE_STATIC(dhd->osh, pkt, TRUE);
14944 #else
14945 	PKTFREE(dhd->osh, pkt, TRUE);
14946 #endif /* DHD_USE_STATIC_CTRLBUF */
14947 #else /* DHD_TIMESYNC */
14948 	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
14949 #endif /* DHD_TIMESYNC */
14950 
14951 }
14952 
14953 uint16
14954 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
14955 {
14956 	return dhdp->prot->ioctl_trans_id;
14957 }
14958 
14959 #ifdef SNAPSHOT_UPLOAD
14960 /* send request to take snapshot */
14961 int
14962 dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param)
14963 {
14964 	dhd_prot_t *prot = dhdp->prot;
14965 	dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf;
14966 	snapshot_upload_request_msg_t *snap_req;
14967 	unsigned long flags;
14968 	uint16 alloced = 0;
14969 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14970 
14971 #ifdef PCIE_INB_DW
14972 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14973 		return BCME_ERROR;
14974 #endif /* PCIE_INB_DW */
14975 
14976 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14977 
14978 	/* Request for cbuf space */
14979 	snap_req = (snapshot_upload_request_msg_t *)dhd_prot_alloc_ring_space(dhdp,
14980 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
14981 		&alloced, FALSE);
14982 	if (snap_req == NULL) {
14983 		DHD_ERROR(("couldn't allocate space on msgring to send snapshot request\n"));
14984 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14985 #ifdef PCIE_INB_DW
14986 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14987 #endif
14988 		return BCME_ERROR;
14989 	}
14990 
14991 	/* Common msg buf hdr */
14992 	snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD;
14993 	snap_req->cmn_hdr.if_id = 0;
14994 	snap_req->cmn_hdr.flags =  ctrl_ring->current_phase;
14995 	snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID;
14996 	snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14997 	ctrl_ring->seqnum++;
14998 
14999 	/* snapshot request msg */
15000 	snap_req->snapshot_buf_len = htol32(dma_buf->len);
15001 	snap_req->snapshot_type = snapshot_type;
15002 	snap_req->snapshot_param = snapshot_param;
15003 	snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa));
15004 	snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa));
15005 
15006 	if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) {
15007 		DHD_ERROR(("snapshot req buffer address unaligned !!!!! \n"));
15008 	}
15009 
15010 	/* clear previous snapshot upload */
15011 	memset(dma_buf->va, 0, dma_buf->len);
15012 	prot->snapshot_upload_len = 0;
15013 	prot->snapshot_type = snapshot_type;
15014 	prot->snapshot_cmpl_pending = TRUE;
15015 
15016 	DHD_CTL(("submitted snapshot request request_id %d, buf_len %d, type %d, param %d\n",
15017 		snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len,
15018 		snap_req->snapshot_type, snap_req->snapshot_param));
15019 
15020 	/* upd wrt ptr and raise interrupt */
15021 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, snap_req,
15022 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
15023 
15024 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
15025 
15026 #ifdef PCIE_INB_DW
15027 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
15028 #endif
15029 
15030 	return BCME_OK;
15031 } /* dhd_prot_send_snapshot_request */
15032 
15033 /* get uploaded snapshot */
15034 int
15035 dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset,
15036 	uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more)
15037 {
15038 	dhd_prot_t *prot = dhdp->prot;
15039 	uint8 *buf = prot->snapshot_upload_buf.va;
15040 	uint8 *buf_end = buf + prot->snapshot_upload_len;
15041 	uint32 copy_size;
15042 
15043 	/* snapshot type must match */
15044 	if (prot->snapshot_type != snapshot_type) {
15045 		return BCME_DATA_NOTFOUND;
15046 	}
15047 
15048 	/* snapshot not completed */
15049 	if (prot->snapshot_cmpl_pending) {
15050 		return BCME_NOTREADY;
15051 	}
15052 
15053 	/* offset within the buffer */
15054 	if (buf + offset >= buf_end) {
15055 		return BCME_BADARG;
15056 	}
15057 
15058 	/* copy dst buf size or remaining size */
15059 	copy_size = MIN(dst_buf_size, buf_end - (buf + offset));
15060 	memcpy(dst_buf, buf + offset, copy_size);
15061 
15062 	/* return size and is_more */
15063 	*dst_size = copy_size;
15064 	*is_more = (offset + copy_size < prot->snapshot_upload_len) ?
15065 		TRUE : FALSE;
15066 	return BCME_OK;
15067 } /* dhd_prot_get_snapshot */
15068 
15069 #endif	/* SNAPSHOT_UPLOAD */
15070 
15071 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
15072 {
15073 	if (!dhd->hscb_enable) {
15074 		if (len) {
15075 			/* prevent "Operation not supported" dhd message */
15076 			*len = 0;
15077 			return BCME_OK;
15078 		}
15079 		return BCME_UNSUPPORTED;
15080 	}
15081 
15082 	if (va) {
15083 		*va = dhd->prot->host_scb_buf.va;
15084 	}
15085 	if (len) {
15086 		*len = dhd->prot->host_scb_buf.len;
15087 	}
15088 
15089 	return BCME_OK;
15090 }
15091 
15092 #ifdef DHD_BUS_MEM_ACCESS
15093 int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
15094 {
15095 	if (!dhd->hscb_enable) {
15096 		return BCME_UNSUPPORTED;
15097 	}
15098 
15099 	if (dhd->prot->host_scb_buf.va == NULL ||
15100 		((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
15101 		return BCME_BADADDR;
15102 	}
15103 
15104 	memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
15105 
15106 	return BCME_OK;
15107 }
15108 #endif /* DHD_BUS_MEM_ACCESS */
15109 
15110 #ifdef DHD_HP2P
15111 uint32
15112 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15113 {
15114 	if (set)
15115 		dhd->pkt_thresh = (uint16)val;
15116 
15117 	val = dhd->pkt_thresh;
15118 
15119 	return val;
15120 }
15121 
15122 uint32
15123 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15124 {
15125 	if (set)
15126 		dhd->time_thresh = (uint16)val;
15127 
15128 	val = dhd->time_thresh;
15129 
15130 	return val;
15131 }
15132 
15133 uint32
15134 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
15135 {
15136 	if (set)
15137 		dhd->pkt_expiry = (uint16)val;
15138 
15139 	val = dhd->pkt_expiry;
15140 
15141 	return val;
15142 }
15143 
15144 uint8
15145 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
15146 {
15147 	uint8 ret = 0;
15148 	if (set) {
15149 		dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
15150 		dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
15151 
15152 		if (enable) {
15153 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
15154 		} else {
15155 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
15156 		}
15157 	}
15158 	ret = dhd->hp2p_infra_enable ? 0x1:0x0;
15159 	ret <<= 4;
15160 	ret |= dhd->hp2p_enable ? 0x1:0x0;
15161 
15162 	return ret;
15163 }
15164 
15165 static void
15166 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
15167 {
15168 	ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
15169 	hp2p_info_t *hp2p_info;
15170 	uint32 dur1;
15171 
15172 	hp2p_info = &dhd->hp2p_info[0];
15173 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
15174 
15175 	if (dur1 > (MAX_RX_HIST_BIN - 1)) {
15176 		dur1 = MAX_RX_HIST_BIN - 1;
15177 		DHD_INFO(("%s: 0x%x 0x%x\n",
15178 			__FUNCTION__, ts->low, ts->high));
15179 	}
15180 
15181 	hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
15182 	return;
15183 }
15184 
15185 static void
15186 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
15187 {
15188 	ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
15189 	uint16 flowid = txstatus->compl_hdr.flow_ring_id;
15190 	uint32 hp2p_flowid, dur1, dur2;
15191 	hp2p_info_t *hp2p_info;
15192 
15193 	hp2p_flowid = dhd->bus->max_submission_rings -
15194 		dhd->bus->max_cmn_rings - flowid + 1;
15195 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15196 	ts = (ts_timestamp_t *)&(txstatus->ts);
15197 
15198 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15199 	if (dur1 > (MAX_TX_HIST_BIN - 1)) {
15200 		dur1 = MAX_TX_HIST_BIN - 1;
15201 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15202 	}
15203 	hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
15204 
15205 	dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15206 	if (dur2 > (MAX_TX_HIST_BIN - 1)) {
15207 		dur2 = MAX_TX_HIST_BIN - 1;
15208 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15209 	}
15210 
15211 	hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
15212 	return;
15213 }
15214 
15215 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
15216 {
15217 	hp2p_info_t *hp2p_info;
15218 	unsigned long flags;
15219 	dhd_pub_t *dhdp;
15220 
15221 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
15222 	hp2p_info = container_of(timer, hp2p_info_t, timer);
15223 	GCC_DIAGNOSTIC_POP();
15224 
15225 	dhdp = hp2p_info->dhd_pub;
15226 	if (!dhdp) {
15227 		goto done;
15228 	}
15229 
15230 	DHD_INFO(("%s: pend_item = %d flowid = %d\n",
15231 		__FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
15232 		hp2p_info->flowid));
15233 
15234 	flags = dhd_os_hp2plock(dhdp);
15235 
15236 	dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
15237 	hp2p_info->hrtimer_init = FALSE;
15238 	hp2p_info->num_timer_limit++;
15239 
15240 	dhd_os_hp2punlock(dhdp, flags);
15241 done:
15242 	return HRTIMER_NORESTART;
15243 }
15244 
15245 static void
15246 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
15247 {
15248 	hp2p_info_t *hp2p_info;
15249 	uint16 hp2p_flowid;
15250 
15251 	hp2p_flowid = dhd->bus->max_submission_rings -
15252 		dhd->bus->max_cmn_rings - flowid + 1;
15253 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15254 
15255 	if (ring->pend_items_count == dhd->pkt_thresh) {
15256 		dhd_prot_txdata_write_flush(dhd, flowid);
15257 
15258 		hp2p_info->hrtimer_init = FALSE;
15259 		hp2p_info->ring = NULL;
15260 		hp2p_info->num_pkt_limit++;
15261 		hrtimer_cancel(&hp2p_info->timer);
15262 
15263 		DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
15264 			"hp2p_flowid = %d pkt_thresh = %d\n",
15265 			__FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
15266 	} else {
15267 		if (hp2p_info->hrtimer_init == FALSE) {
15268 			hp2p_info->hrtimer_init = TRUE;
15269 			hp2p_info->flowid = flowid;
15270 			hp2p_info->dhd_pub = dhd;
15271 			hp2p_info->ring = ring;
15272 			hp2p_info->num_timer_start++;
15273 
15274 			hrtimer_start(&hp2p_info->timer,
15275 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
15276 
15277 			DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
15278 					__FUNCTION__, flowid, hp2p_flowid));
15279 		}
15280 	}
15281 	return;
15282 }
15283 
15284 static void
15285 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
15286 {
15287 	uint64 ts;
15288 
15289 	ts = local_clock();
15290 	do_div(ts, 1000);
15291 
15292 	txdesc->metadata_buf_len = 0;
15293 	txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
15294 	txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
15295 	txdesc->exp_time = dhd->pkt_expiry;
15296 
15297 	DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
15298 		__FUNCTION__, txdesc->metadata_buf_addr.high_addr,
15299 		txdesc->metadata_buf_addr.low_addr,
15300 		txdesc->exp_time));
15301 
15302 	return;
15303 }
15304 #endif /* DHD_HP2P */
15305 
15306 #ifdef DHD_MAP_LOGGING
15307 void
15308 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
15309 {
15310 	dhd_prot_debug_info_print(dhdp);
15311 	OSL_DMA_MAP_DUMP(dhdp->osh);
15312 #ifdef DHD_MAP_PKTID_LOGGING
15313 	dhd_pktid_logging_dump(dhdp);
15314 #endif /* DHD_MAP_PKTID_LOGGING */
15315 #ifdef DHD_FW_COREDUMP
15316 	dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
15317 #ifdef DNGL_AXI_ERROR_LOGGING
15318 	dhdp->memdump_enabled = DUMP_MEMFILE;
15319 	dhd_bus_get_mem_dump(dhdp);
15320 #else
15321 	dhdp->memdump_enabled = DUMP_MEMONLY;
15322 	dhd_bus_mem_dump(dhdp);
15323 #endif /* DNGL_AXI_ERROR_LOGGING */
15324 #endif /* DHD_FW_COREDUMP */
15325 }
15326 #endif /* DHD_MAP_LOGGING */
15327 
15328 #ifdef DHD_FLOW_RING_STATUS_TRACE
15329 void
15330 dhd_dump_bus_flow_ring_status_trace(
15331 	dhd_bus_t *bus, struct bcmstrbuf *strbuf, dhd_frs_trace_t *frs_trace, int dumpsz, char *str)
15332 {
15333 	int i;
15334 	dhd_prot_t *prot = bus->dhd->prot;
15335 	uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE;
15336 	uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE;
15337 
15338 	bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n",
15339 		str, isr_cnt, dpc_cnt);
15340 	bcm_bprintf(strbuf, "%s\t%s\t%s\t%s\t%s\t%s\t",
15341 		"Timestamp ns", "H2DCtrlPost", "D2HCtrlCpl",
15342 		"H2DRxPost", "D2HRxCpl", "D2HTxCpl");
15343 	if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15344 		bcm_bprintf(strbuf, "%s\t%s\t", "H2DRingInfoPost", "D2HRingInfoCpl");
15345 	}
15346 	if (prot->d2hring_edl != NULL) {
15347 		bcm_bprintf(strbuf, "%s", "D2HRingEDL");
15348 	}
15349 	bcm_bprintf(strbuf, "\n");
15350 	for (i = 0; i < dumpsz; i ++) {
15351 		bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t",
15352 				frs_trace[i].timestamp,
15353 				frs_trace[i].h2d_ctrl_post_drd,
15354 				frs_trace[i].h2d_ctrl_post_dwr,
15355 				frs_trace[i].d2h_ctrl_cpln_drd,
15356 				frs_trace[i].d2h_ctrl_cpln_dwr,
15357 				frs_trace[i].h2d_rx_post_drd,
15358 				frs_trace[i].h2d_rx_post_dwr,
15359 				frs_trace[i].d2h_rx_cpln_drd,
15360 				frs_trace[i].d2h_rx_cpln_dwr,
15361 				frs_trace[i].d2h_tx_cpln_drd,
15362 				frs_trace[i].d2h_tx_cpln_dwr);
15363 		if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15364 			bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t",
15365 				frs_trace[i].h2d_info_post_drd,
15366 				frs_trace[i].h2d_info_post_dwr,
15367 				frs_trace[i].d2h_info_cpln_drd,
15368 				frs_trace[i].d2h_info_cpln_dwr);
15369 		}
15370 		if (prot->d2hring_edl != NULL) {
15371 			bcm_bprintf(strbuf, "%6u-%u",
15372 				frs_trace[i].d2h_ring_edl_drd,
15373 				frs_trace[i].d2h_ring_edl_dwr);
15374 
15375 		}
15376 		bcm_bprintf(strbuf, "\n");
15377 	}
15378 	bcm_bprintf(strbuf, "--------------------------\n");
15379 }
15380 
15381 void
15382 dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15383 {
15384 	int dumpsz;
15385 
15386 	dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ?
15387 		bus->frs_isr_count : FRS_TRACE_SIZE;
15388 	if (dumpsz == 0) {
15389 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15390 		return;
15391 	}
15392 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace,
15393 		dumpsz, "ISR FLOW RING TRACE DRD-DWR");
15394 }
15395 
15396 void
15397 dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15398 {
15399 	int dumpsz;
15400 
15401 	dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ?
15402 		bus->frs_dpc_count : FRS_TRACE_SIZE;
15403 	if (dumpsz == 0) {
15404 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15405 		return;
15406 	}
15407 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace,
15408 		dumpsz, "DPC FLOW RING TRACE DRD-DWR");
15409 }
15410 static void
15411 dhd_bus_flow_ring_status_trace(dhd_pub_t *dhd, dhd_frs_trace_t *frs_trace)
15412 {
15413 	dhd_prot_t *prot = dhd->prot;
15414 	msgbuf_ring_t *ring;
15415 
15416 	ring = &prot->h2dring_ctrl_subn;
15417 	frs_trace->h2d_ctrl_post_drd =
15418 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15419 	frs_trace->h2d_ctrl_post_dwr =
15420 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15421 
15422 	ring = &prot->d2hring_ctrl_cpln;
15423 	frs_trace->d2h_ctrl_cpln_drd =
15424 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15425 	frs_trace->d2h_ctrl_cpln_dwr =
15426 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15427 
15428 	ring = &prot->h2dring_rxp_subn;
15429 	frs_trace->h2d_rx_post_drd =
15430 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15431 	frs_trace->h2d_rx_post_dwr =
15432 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15433 
15434 	ring = &prot->d2hring_rx_cpln;
15435 	frs_trace->d2h_rx_cpln_drd =
15436 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15437 	frs_trace->d2h_rx_cpln_dwr =
15438 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15439 
15440 	ring = &prot->d2hring_tx_cpln;
15441 	frs_trace->d2h_tx_cpln_drd =
15442 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15443 	frs_trace->d2h_tx_cpln_dwr =
15444 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15445 
15446 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
15447 		ring = prot->h2dring_info_subn;
15448 		frs_trace->h2d_info_post_drd =
15449 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15450 		frs_trace->h2d_info_post_dwr =
15451 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15452 
15453 		ring = prot->d2hring_info_cpln;
15454 		frs_trace->d2h_info_cpln_drd =
15455 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15456 		frs_trace->d2h_info_cpln_dwr =
15457 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15458 	}
15459 	if (prot->d2hring_edl != NULL) {
15460 		ring = prot->d2hring_edl;
15461 		frs_trace->d2h_ring_edl_drd =
15462 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15463 		frs_trace->d2h_ring_edl_dwr =
15464 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15465 	}
15466 
15467 }
15468 
15469 void
15470 dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd)
15471 {
15472 	uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE;
15473 	dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt];
15474 	uint64 time_ns_prev = frs_isr_trace[cnt].timestamp;
15475 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15476 
15477 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15478 		return;
15479 	}
15480 
15481 	dhd_bus_flow_ring_status_trace(dhd, frs_isr_trace);
15482 
15483 	frs_isr_trace->timestamp = OSL_LOCALTIME_NS();
15484 	dhd->bus->frs_isr_count ++;
15485 }
15486 
15487 void
15488 dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd)
15489 {
15490 	uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE;
15491 	dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt];
15492 	uint64 time_ns_prev = frs_dpc_trace[cnt].timestamp;
15493 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15494 
15495 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15496 		return;
15497 	}
15498 
15499 	dhd_bus_flow_ring_status_trace(dhd, frs_dpc_trace);
15500 
15501 	frs_dpc_trace->timestamp = OSL_LOCALTIME_NS();
15502 	dhd->bus->frs_dpc_count ++;
15503 }
15504 #endif /* DHD_FLOW_RING_STATUS_TRACE */
15505