1 /**
2 * @file definition of host message ring functionality
3 * Provides type definitions and function prototypes used to link the
4 * DHD OS, bus, and protocol modules.
5 *
6 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
7 *
8 * Copyright (C) 1999-2017, Broadcom Corporation
9 *
10 * Unless you and Broadcom execute a separate written software license
11 * agreement governing use of this software, this software is licensed to you
12 * under the terms of the GNU General Public License version 2 (the "GPL"),
13 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
14 * following added to such license:
15 *
16 * As a special exception, the copyright holders of this software give you
17 * permission to link this software with independent modules, and to copy and
18 * distribute the resulting executable under terms of your choice, provided that
19 * you also meet, for each linked independent module, the terms and conditions of
20 * the license of that module. An independent module is a module which is not
21 * derived from this software. The special exception does not apply to any
22 * modifications of the software.
23 *
24 * Notwithstanding the above, under no circumstances may you combine this
25 * software in any way with any other Broadcom software provided under a license
26 * other than the GPL, without Broadcom's express prior written consent.
27 *
28 *
29 * <<Broadcom-WL-IPTag/Open:>>
30 *
31 * $Id: dhd_msgbuf.c 701962 2017-05-30 06:13:15Z $
32 */
33
34 #include <typedefs.h>
35 #include <osl.h>
36
37 #include <bcmutils.h>
38 #include <bcmmsgbuf.h>
39 #include <bcmendian.h>
40 #include <bcmstdlib_s.h>
41
42 #include <dngl_stats.h>
43 #include <dhd.h>
44 #include <dhd_proto.h>
45
46 #include <dhd_bus.h>
47
48 #include <dhd_dbg.h>
49 #include <siutils.h>
50 #include <dhd_debug.h>
51
52 #include <dhd_flowring.h>
53
54 #include <pcie_core.h>
55 #include <bcmpcie.h>
56 #include <dhd_pcie.h>
57
58 #if defined(DHD_LB)
59 #include <linux/cpu.h>
60 #include <bcm_ring.h>
61 #define DHD_LB_WORKQ_SZ (8192)
62 #define DHD_LB_WORKQ_SYNC (16)
63 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
64 #endif /* DHD_LB */
65
66 #include <etd.h>
67 #include <hnd_debug.h>
68 #include <bcmtlv.h>
69 #include <hnd_armtrap.h>
70 #include <dnglevent.h>
71
72 #ifdef DHD_PKT_LOGGING
73 #include <dhd_pktlog.h>
74 #include <dhd_linux_pktdump.h>
75 #endif /* DHD_PKT_LOGGING */
76 #ifdef DHD_EWPR_VER2
77 #include <dhd_bitpack.h>
78 #endif /* DHD_EWPR_VER2 */
79
80 extern char dhd_version[];
81 extern char fw_version[];
82
83 /**
84 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
85 * address where a value must be written. Host may also interrupt coalescing
86 * on this soft doorbell.
87 * Use Case: Hosts with network processors, may register with the dongle the
88 * network processor's thread wakeup register and a value corresponding to the
89 * core/thread context. Dongle will issue a write transaction <address,value>
90 * to the PCIE RC which will need to be routed to the mapped register space, by
91 * the host.
92 */
93 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
94
95 /* Dependency Check */
96 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
97 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
98 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
99
100 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
101
102 #define DEFAULT_RX_BUFFERS_TO_POST 256
103 #define RXBUFPOST_THRESHOLD 32
104 #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
105
106 #define DHD_STOP_QUEUE_THRESHOLD 200
107 #define DHD_START_QUEUE_THRESHOLD 100
108
109 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
110 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
111
112 /* flags for ioctl pending status */
113 #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
114 #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
115
116 #define DHD_IOCTL_REQ_PKTBUFSZ 2048
117 #define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
118
119 #define DMA_ALIGN_LEN 4
120
121 #define DMA_D2H_SCRATCH_BUF_LEN 8
122 #define DMA_XFER_LEN_LIMIT 0x400000
123
124 #ifdef BCM_HOST_BUF
125 #ifndef DMA_HOST_BUFFER_LEN
126 #define DMA_HOST_BUFFER_LEN 0x200000
127 #endif // endif
128 #endif /* BCM_HOST_BUF */
129
130 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
131
132 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
133 #define DHD_FLOWRING_MAX_EVENTBUF_POST 32
134 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
135 #define DHD_H2D_INFORING_MAX_BUF_POST 32
136 #define DHD_MAX_TSBUF_POST 8
137
138 #define DHD_PROT_FUNCS 43
139
140 /* Length of buffer in host for bus throughput measurement */
141 #define DHD_BUS_TPUT_BUF_LEN 2048
142
143 #define TXP_FLUSH_NITEMS
144
145 /* optimization to write "n" tx items at a time to ring */
146 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
147
148 #define RING_NAME_MAX_LENGTH 24
149 #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
150 /* Giving room before ioctl_trans_id rollsover. */
151 #define BUFFER_BEFORE_ROLLOVER 300
152
153 /* 512K memory + 32K registers */
154 #define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
155
156 struct msgbuf_ring; /* ring context for common and flow rings */
157
158 /**
159 * PCIE D2H DMA Complete Sync Modes
160 *
161 * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
162 * Host system memory. A WAR using one of 3 approaches is needed:
163 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
164 * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
165 * writes in the last word of each work item. Each work item has a seqnum
166 * number = sequence num % 253.
167 *
168 * 3. Read Barrier: Dongle does a host memory read access prior to posting an
169 * interrupt, ensuring that D2H data transfer indeed completed.
170 * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
171 * ring contents before the indices.
172 *
173 * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
174 * callback (see dhd_prot_d2h_sync_none) may be bound.
175 *
176 * Dongle advertizes host side sync mechanism requirements.
177 */
178
179 #define PCIE_D2H_SYNC_WAIT_TRIES (512U)
180 #define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
181 #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
182
183 #define HWA_DB_TYPE_RXPOST (0x0050)
184 #define HWA_DB_TYPE_TXCPLT (0x0060)
185 #define HWA_DB_TYPE_RXCPLT (0x0170)
186 #define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
187
188 #define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
189 #define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
190 #define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
191
192 /**
193 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
194 *
195 * On success: return cmn_msg_hdr_t::msg_type
196 * On failure: return 0 (invalid msg_type)
197 */
198 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
199 volatile cmn_msg_hdr_t *msg, int msglen);
200
201 /**
202 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
203 * For EDL messages.
204 *
205 * On success: return cmn_msg_hdr_t::msg_type
206 * On failure: return 0 (invalid msg_type)
207 */
208 #ifdef EWP_EDL
209 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
210 volatile cmn_msg_hdr_t *msg);
211 #endif /* EWP_EDL */
212
213 /*
214 * +----------------------------------------------------------------------------
215 *
216 * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
217 * flowids do not.
218 *
219 * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
220 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
221 *
222 * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
223 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
224 * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
225 *
226 * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
227 * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
228 *
229 * D2H Control Complete RingId = 2
230 * D2H Transmit Complete RingId = 3
231 * D2H Receive Complete RingId = 4
232 *
233 * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
234 * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
235 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
236 *
237 * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
238 * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
239 *
240 * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
241 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
242 * FlowId values would be in the range [2..133] and the corresponding
243 * RingId values would be in the range [5..136].
244 *
245 * The flowId allocator, may chose to, allocate Flowids:
246 * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
247 * X# of uc flowids in consecutive ranges (per station Id), where X is the
248 * packet's access category (e.g. 4 uc flowids per station).
249 *
250 * CAUTION:
251 * When DMA indices array feature is used, RingId=5, corresponding to the 0th
252 * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
253 * since the FlowId truly represents the index in the H2D DMA indices array.
254 *
255 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
256 * will represent the index in the D2H DMA indices array.
257 *
258 * +----------------------------------------------------------------------------
259 */
260
261 /* First TxPost Flowring Id */
262 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
263
264 /* Determine whether a ringid belongs to a TxPost flowring */
265 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
266 ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
267 (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
268
269 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
270 #define DHD_FLOWID_TO_RINGID(flowid) \
271 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
272
273 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
274 #define DHD_RINGID_TO_FLOWID(ringid) \
275 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
276
277 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
278 * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
279 * any array of H2D rings.
280 */
281 #define DHD_H2D_RING_OFFSET(ringid) \
282 (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
283
284 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
285 * This may be used for IFRM.
286 */
287 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
288 ((ringid) - BCMPCIE_COMMON_MSGRINGS)
289
290 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
291 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
292 * any array of D2H rings.
293 * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
294 * max_h2d_rings: total number of h2d rings
295 */
296 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
297 ((ringid) > (max_h2d_rings) ? \
298 ((ringid) - max_h2d_rings) : \
299 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
300
301 /* Convert a D2H DMA Indices Offset to a RingId */
302 #define DHD_D2H_RINGID(offset) \
303 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
304
305 #define DHD_DMAH_NULL ((void*)NULL)
306
307 /*
308 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
309 * buffer does not occupy the entire cacheline, and another object is placed
310 * following the DMA-able buffer, data corruption may occur if the DMA-able
311 * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
312 * is not available.
313 */
314 #if defined(L1_CACHE_BYTES)
315 #define DHD_DMA_PAD (L1_CACHE_BYTES)
316 #else
317 #define DHD_DMA_PAD (128)
318 #endif // endif
319
320 /*
321 * +----------------------------------------------------------------------------
322 * Flowring Pool
323 *
324 * Unlike common rings, which are attached very early on (dhd_prot_attach),
325 * flowrings are dynamically instantiated. Moreover, flowrings may require a
326 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
327 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
328 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
329 *
330 * Each DMA-able buffer may be allocated independently, or may be carved out
331 * of a single large contiguous region that is registered with the protocol
332 * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
333 * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
334 *
335 * No flowring pool action is performed in dhd_prot_attach(), as the number
336 * of h2d rings is not yet known.
337 *
338 * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
339 * determine the number of flowrings required, and a pool of msgbuf_rings are
340 * allocated and a DMA-able buffer (carved or allocated) is attached.
341 * See: dhd_prot_flowrings_pool_attach()
342 *
343 * A flowring msgbuf_ring object may be fetched from this pool during flowring
344 * creation, using the flowid. Likewise, flowrings may be freed back into the
345 * pool on flowring deletion.
346 * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
347 *
348 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
349 * are detached (returned back to the carved region or freed), and the pool of
350 * msgbuf_ring and any objects allocated against it are freed.
351 * See: dhd_prot_flowrings_pool_detach()
352 *
353 * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
354 * state as-if upon an attach. All DMA-able buffers are retained.
355 * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
356 * pool attach will notice that the pool persists and continue to use it. This
357 * will avoid the case of a fragmented DMA-able region.
358 *
359 * +----------------------------------------------------------------------------
360 */
361
362 /* Conversion of a flowid to a flowring pool index */
363 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
364 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
365
366 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
367 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
368 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
369 DHD_FLOWRINGS_POOL_OFFSET(flowid)
370
371 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
372 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
373 for ((flowid) = DHD_FLOWRING_START_FLOWID, \
374 (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
375 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
376 (ring)++, (flowid)++)
377
378 /* Used in loopback tests */
379 typedef struct dhd_dmaxfer {
380 dhd_dma_buf_t srcmem;
381 dhd_dma_buf_t dstmem;
382 uint32 srcdelay;
383 uint32 destdelay;
384 uint32 len;
385 bool in_progress;
386 uint64 start_usec;
387 uint64 time_taken;
388 uint32 d11_lpbk;
389 int status;
390 } dhd_dmaxfer_t;
391
392 /**
393 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
394 * buffer, the WR and RD indices, ring parameters such as max number of items
395 * an length of each items, and other miscellaneous runtime state.
396 * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
397 * H2D TxPost ring as specified in the PCIE FullDongle Spec.
398 * Ring parameters are conveyed to the dongle, which maintains its own peer end
399 * ring state. Depending on whether the DMA Indices feature is supported, the
400 * host will update the WR/RD index in the DMA indices array in host memory or
401 * directly in dongle memory.
402 */
403 typedef struct msgbuf_ring {
404 bool inited;
405 uint16 idx; /* ring id */
406 uint16 rd; /* read index */
407 uint16 curr_rd; /* read index for debug */
408 uint16 wr; /* write index */
409 uint16 max_items; /* maximum number of items in ring */
410 uint16 item_len; /* length of each item in the ring */
411 sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
412 dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
413 uint32 seqnum; /* next expected item's sequence number */
414 #ifdef TXP_FLUSH_NITEMS
415 void *start_addr;
416 /* # of messages on ring not yet announced to dongle */
417 uint16 pend_items_count;
418 #endif /* TXP_FLUSH_NITEMS */
419
420 uint8 ring_type;
421 uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
422 uint8 n_completion_ids;
423 bool create_pending;
424 uint16 create_req_id;
425 uint8 current_phase;
426 uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
427 uchar name[RING_NAME_MAX_LENGTH];
428 uint32 ring_mem_allocated;
429 void *ring_lock;
430 } msgbuf_ring_t;
431
432 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
433 #define DHD_RING_END_VA(ring) \
434 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
435 (((ring)->max_items - 1) * (ring)->item_len))
436
437 /* This can be overwritten by module parameter defined in dhd_linux.c
438 * or by dhd iovar h2d_max_txpost.
439 */
440 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
441
442 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
443 typedef struct dhd_prot {
444 osl_t *osh; /* OSL handle */
445 uint16 rxbufpost_sz;
446 uint16 rxbufpost;
447 uint16 max_rxbufpost;
448 uint16 max_eventbufpost;
449 uint16 max_ioctlrespbufpost;
450 uint16 max_tsbufpost;
451 uint16 max_infobufpost;
452 uint16 infobufpost;
453 uint16 cur_event_bufs_posted;
454 uint16 cur_ioctlresp_bufs_posted;
455 uint16 cur_ts_bufs_posted;
456
457 /* Flow control mechanism based on active transmits pending */
458 osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
459 uint16 h2d_max_txpost;
460 uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
461
462 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
463 msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
464 msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
465 msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
466 msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
467 msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
468 msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
469 msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
470 msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
471
472 msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
473 dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
474 uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
475
476 uint32 rx_dataoffset;
477
478 dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
479 dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
480
481 /* ioctl related resources */
482 uint8 ioctl_state;
483 int16 ioctl_status; /* status returned from dongle */
484 uint16 ioctl_resplen;
485 dhd_ioctl_recieved_status_t ioctl_received;
486 uint curr_ioctl_cmd;
487 dhd_dma_buf_t retbuf; /* For holding ioctl response */
488 dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
489
490 dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
491
492 /* DMA-able arrays for holding WR and RD indices */
493 uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
494 dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
495 dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
496 dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
497 dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
498 dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
499
500 dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
501
502 dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
503 uint32 flowring_num;
504
505 d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
506 #ifdef EWP_EDL
507 d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
508 #endif /* EWP_EDL */
509 ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
510 ulong d2h_sync_wait_tot; /* total wait loops */
511
512 dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
513
514 uint16 ioctl_seq_no;
515 uint16 data_seq_no;
516 uint16 ioctl_trans_id;
517 void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
518 void *pktid_rx_map; /* pktid map for rx path */
519 void *pktid_tx_map; /* pktid map for tx path */
520 bool metadata_dbg;
521 void *pktid_map_handle_ioctl;
522 #ifdef DHD_MAP_PKTID_LOGGING
523 void *pktid_dma_map; /* pktid map for DMA MAP */
524 void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
525 #endif /* DHD_MAP_PKTID_LOGGING */
526 uint32 pktid_depleted_cnt; /* pktid depleted count */
527 /* netif tx queue stop count */
528 uint8 pktid_txq_stop_cnt;
529 /* netif tx queue start count */
530 uint8 pktid_txq_start_cnt;
531 uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
532 uint64 ioctl_ack_time; /* timestamp for ioctl ack */
533 uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
534
535 /* Applications/utilities can read tx and rx metadata using IOVARs */
536 uint16 rx_metadata_offset;
537 uint16 tx_metadata_offset;
538
539 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
540 /* Host's soft doorbell configuration */
541 bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
542 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
543
544 /* Work Queues to be used by the producer and the consumer, and threshold
545 * when the WRITE index must be synced to consumer's workq
546 */
547 #if defined(DHD_LB_TXC)
548 uint32 tx_compl_prod_sync ____cacheline_aligned;
549 bcm_workq_t tx_compl_prod, tx_compl_cons;
550 #endif /* DHD_LB_TXC */
551 #if defined(DHD_LB_RXC)
552 uint32 rx_compl_prod_sync ____cacheline_aligned;
553 bcm_workq_t rx_compl_prod, rx_compl_cons;
554 #endif /* DHD_LB_RXC */
555
556 dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
557
558 uint32 host_ipc_version; /* Host sypported IPC rev */
559 uint32 device_ipc_version; /* FW supported IPC rev */
560 uint32 active_ipc_version; /* Host advertised IPC rev */
561 dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
562 bool hostts_req_buf_inuse;
563 bool rx_ts_log_enabled;
564 bool tx_ts_log_enabled;
565 bool no_retry;
566 bool no_aggr;
567 bool fixed_rate;
568 dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
569 #ifdef DHD_HP2P
570 msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
571 msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
572 #endif /* DHD_HP2P */
573 bool no_tx_resource;
574 } dhd_prot_t;
575
576 #ifdef DHD_EWPR_VER2
577 #define HANG_INFO_BASE64_BUFFER_SIZE 640
578 #endif // endif
579
580 #ifdef DHD_DUMP_PCIE_RINGS
581 static
582 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
583 const void *user_buf, unsigned long *file_posn);
584 #ifdef EWP_EDL
585 static
586 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
587 unsigned long *file_posn);
588 #endif /* EWP_EDL */
589 #endif /* DHD_DUMP_PCIE_RINGS */
590
591 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
592 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
593 /* Convert a dmaaddr_t to a base_addr with htol operations */
594 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
595
596 /* APIs for managing a DMA-able buffer */
597 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
598 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
599
600 /* msgbuf ring management */
601 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
602 const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
603 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
604 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
605 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
606 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
607
608 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
609 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
610 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
611 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
612
613 /* Fetch and Release a flowring msgbuf_ring from flowring pool */
614 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
615 uint16 flowid);
616 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
617
618 /* Producer: Allocate space in a msgbuf ring */
619 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
620 uint16 nitems, uint16 *alloced, bool exactly_nitems);
621 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
622 uint16 *alloced, bool exactly_nitems);
623
624 /* Consumer: Determine the location where the next message may be consumed */
625 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
626 uint32 *available_len);
627
628 /* Producer (WR index update) or Consumer (RD index update) indication */
629 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
630 void *p, uint16 len);
631 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
632
633 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
634 dhd_dma_buf_t *dma_buf, uint32 bufsz);
635
636 /* Set/Get a RD or WR index in the array of indices */
637 /* See also: dhd_prot_dma_indx_init() */
638 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
639 uint16 ringid);
640 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
641
642 /* Locate a packet given a pktid */
643 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
644 bool free_pktid);
645 /* Locate a packet given a PktId and free it. */
646 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
647
648 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
649 void *buf, uint len, uint8 action);
650 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
651 void *buf, uint len, uint8 action);
652 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
653 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
654 void *buf, int ifidx);
655
656 /* Post buffers for Rx, control ioctl response and events */
657 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
658 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
659 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
660 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
661 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
662 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
663
664 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
665
666 /* D2H Message handling */
667 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
668
669 /* D2H Message handlers */
670 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
671 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
672 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
673 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
674 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
675 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
676 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
677
678 /* Loopback test with dongle */
679 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
680 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
681 uint destdelay, dhd_dmaxfer_t *dma);
682 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
683
684 /* Flowring management communication with dongle */
685 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
686 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
687 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
688 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
689 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
690
691 /* Monitor Mode */
692 #ifdef WL_MONITOR
693 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
694 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
695 #endif /* WL_MONITOR */
696
697 /* Configure a soft doorbell per D2H ring */
698 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
699 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
700 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
701 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
702 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
703 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
704 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
705 #ifdef DHD_HP2P
706 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
707 #endif /* DHD_HP2P */
708 #ifdef EWP_EDL
709 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
710 #endif // endif
711 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
712 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
713
714 #ifdef DHD_HP2P
715 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
716 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
717 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
718 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
719 #endif // endif
720 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
721
722 /** callback functions for messages generated by the dongle */
723 #define MSG_TYPE_INVALID 0
724
725 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
726 dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
727 dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
728 dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
729 NULL,
730 dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
731 NULL,
732 dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
733 NULL,
734 dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
735 NULL,
736 dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
737 NULL,
738 dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
739 NULL,
740 dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
741 NULL,
742 dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
743 NULL,
744 NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
745 NULL,
746 dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
747 NULL, /* MSG_TYPE_FLOW_RING_RESUME */
748 dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
749 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
750 dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
751 NULL, /* MSG_TYPE_INFO_BUF_POST */
752 dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
753 NULL, /* MSG_TYPE_H2D_RING_CREATE */
754 NULL, /* MSG_TYPE_D2H_RING_CREATE */
755 dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
756 dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
757 NULL, /* MSG_TYPE_H2D_RING_CONFIG */
758 NULL, /* MSG_TYPE_D2H_RING_CONFIG */
759 NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
760 dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
761 NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
762 dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
763 NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
764 NULL, /* MSG_TYPE_HOSTTIMSTAMP */
765 dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
766 dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
767 NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
768 dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
769 };
770
771 #ifdef DHD_RX_CHAINING
772
773 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
774 (dhd_wet_chainable(dhd) && \
775 dhd_rx_pkt_chainable((dhd), (ifidx)) && \
776 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
777 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
778 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
779 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
780 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
781 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
782 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
783
784 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
785 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
786 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
787
788 #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
789
790 #endif /* DHD_RX_CHAINING */
791
792 #define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
793
794 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
795
796 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)797 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
798 {
799 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
800 uint16 rd, wr;
801 bool ret;
802
803 if (dhd->dma_d2h_ring_upd_support) {
804 wr = flow_ring->wr;
805 } else {
806 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
807 }
808 if (dhd->dma_h2d_ring_upd_support) {
809 rd = flow_ring->rd;
810 } else {
811 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
812 }
813 ret = (wr == rd) ? TRUE : FALSE;
814 return ret;
815 }
816
817 void
dhd_prot_dump_ring_ptrs(void * prot_info)818 dhd_prot_dump_ring_ptrs(void *prot_info)
819 {
820 msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
821 DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
822 ring->curr_rd, ring->rd, ring->wr));
823 }
824
825 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)826 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
827 {
828 return (uint16)h2d_max_txpost;
829 }
830 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)831 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
832 {
833 h2d_max_txpost = max_txpost;
834 }
835 /**
836 * D2H DMA to completion callback handlers. Based on the mode advertised by the
837 * dongle through the PCIE shared region, the appropriate callback will be
838 * registered in the proto layer to be invoked prior to precessing any message
839 * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
840 * does not require host participation, then a noop callback handler will be
841 * bound that simply returns the msg_type.
842 */
843 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
844 uint32 tries, volatile uchar *msg, int msglen);
845 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
846 volatile cmn_msg_hdr_t *msg, int msglen);
847 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
848 volatile cmn_msg_hdr_t *msg, int msglen);
849 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
850 volatile cmn_msg_hdr_t *msg, int msglen);
851 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
852 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
853 uint16 ring_type, uint32 id);
854 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
855 uint8 type, uint32 id);
856
857 /**
858 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
859 * not completed, a livelock condition occurs. Host will avert this livelock by
860 * dropping this message and moving to the next. This dropped message can lead
861 * to a packet leak, or even something disastrous in the case the dropped
862 * message happens to be a control response.
863 * Here we will log this condition. One may choose to reboot the dongle.
864 *
865 */
866 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)867 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
868 volatile uchar *msg, int msglen)
869 {
870 uint32 ring_seqnum = ring->seqnum;
871
872 if (dhd_query_bus_erros(dhd)) {
873 return;
874 }
875
876 DHD_ERROR((
877 "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
878 " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
879 dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
880 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
881 ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
882
883 dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
884
885 /* Try to resume if already suspended or suspend in progress */
886 #ifdef DHD_PCIE_RUNTIMEPM
887 dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
888 #endif /* DHD_PCIE_RUNTIMEPM */
889
890 /* Skip if still in suspended or suspend in progress */
891 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
892 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
893 __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
894 goto exit;
895 }
896
897 dhd_bus_dump_console_buffer(dhd->bus);
898 dhd_prot_debug_info_print(dhd);
899
900 #ifdef DHD_FW_COREDUMP
901 if (dhd->memdump_enabled) {
902 /* collect core dump */
903 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
904 dhd_bus_mem_dump(dhd);
905 }
906 #endif /* DHD_FW_COREDUMP */
907
908 exit:
909 dhd_schedule_reset(dhd);
910
911 #ifdef OEM_ANDROID
912 #ifdef SUPPORT_LINKDOWN_RECOVERY
913 #ifdef CONFIG_ARCH_MSM
914 dhd->bus->no_cfg_restore = 1;
915 #endif /* CONFIG_ARCH_MSM */
916 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
917 dhd_os_send_hang_message(dhd);
918 #endif /* SUPPORT_LINKDOWN_RECOVERY */
919 #endif /* OEM_ANDROID */
920 dhd->livelock_occured = TRUE;
921 }
922
923 /**
924 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
925 * mode. Sequence number is always in the last word of a message.
926 */
927 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)928 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
929 volatile cmn_msg_hdr_t *msg, int msglen)
930 {
931 uint32 tries;
932 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
933 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
934 volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
935 dhd_prot_t *prot = dhd->prot;
936 uint32 msg_seqnum;
937 uint32 step = 0;
938 uint32 delay = PCIE_D2H_SYNC_DELAY;
939 uint32 total_tries = 0;
940
941 ASSERT(msglen == ring->item_len);
942
943 BCM_REFERENCE(delay);
944 /*
945 * For retries we have to make some sort of stepper algorithm.
946 * We see that every time when the Dongle comes out of the D3
947 * Cold state, the first D2H mem2mem DMA takes more time to
948 * complete, leading to livelock issues.
949 *
950 * Case 1 - Apart from Host CPU some other bus master is
951 * accessing the DDR port, probably page close to the ring
952 * so, PCIE does not get a change to update the memory.
953 * Solution - Increase the number of tries.
954 *
955 * Case 2 - The 50usec delay given by the Host CPU is not
956 * sufficient for the PCIe RC to start its work.
957 * In this case the breathing time of 50usec given by
958 * the Host CPU is not sufficient.
959 * Solution: Increase the delay in a stepper fashion.
960 * This is done to ensure that there are no
961 * unwanted extra delay introdcued in normal conditions.
962 */
963 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
964 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
965 msg_seqnum = *marker;
966 if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
967 ring->seqnum++; /* next expected sequence number */
968 /* Check for LIVELOCK induce flag, which is set by firing
969 * dhd iovar to induce LIVELOCK error. If flag is set,
970 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
971 */
972 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
973 goto dma_completed;
974 }
975 }
976
977 total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
978
979 if (total_tries > prot->d2h_sync_wait_max)
980 prot->d2h_sync_wait_max = total_tries;
981
982 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
983 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
984 OSL_DELAY(delay * step); /* Add stepper delay */
985
986 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
987 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
988
989 dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
990 (volatile uchar *) msg, msglen);
991
992 ring->seqnum++; /* skip this message ... leak of a pktid */
993 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
994
995 dma_completed:
996
997 prot->d2h_sync_wait_tot += tries;
998 return msg->msg_type;
999 }
1000
1001 /**
1002 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1003 * mode. The xorcsum is placed in the last word of a message. Dongle will also
1004 * place a seqnum in the epoch field of the cmn_msg_hdr.
1005 */
1006 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1007 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1008 volatile cmn_msg_hdr_t *msg, int msglen)
1009 {
1010 uint32 tries;
1011 uint32 prot_checksum = 0; /* computed checksum */
1012 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1013 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1014 dhd_prot_t *prot = dhd->prot;
1015 uint32 step = 0;
1016 uint32 delay = PCIE_D2H_SYNC_DELAY;
1017 uint32 total_tries = 0;
1018
1019 ASSERT(msglen == ring->item_len);
1020
1021 BCM_REFERENCE(delay);
1022 /*
1023 * For retries we have to make some sort of stepper algorithm.
1024 * We see that every time when the Dongle comes out of the D3
1025 * Cold state, the first D2H mem2mem DMA takes more time to
1026 * complete, leading to livelock issues.
1027 *
1028 * Case 1 - Apart from Host CPU some other bus master is
1029 * accessing the DDR port, probably page close to the ring
1030 * so, PCIE does not get a change to update the memory.
1031 * Solution - Increase the number of tries.
1032 *
1033 * Case 2 - The 50usec delay given by the Host CPU is not
1034 * sufficient for the PCIe RC to start its work.
1035 * In this case the breathing time of 50usec given by
1036 * the Host CPU is not sufficient.
1037 * Solution: Increase the delay in a stepper fashion.
1038 * This is done to ensure that there are no
1039 * unwanted extra delay introdcued in normal conditions.
1040 */
1041 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1042 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1043 /* First verify if the seqnumber has been update,
1044 * if yes, then only check xorcsum.
1045 * Once seqnum and xorcsum is proper that means
1046 * complete message has arrived.
1047 */
1048 if (msg->epoch == ring_seqnum) {
1049 prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1050 num_words);
1051 if (prot_checksum == 0U) { /* checksum is OK */
1052 ring->seqnum++; /* next expected sequence number */
1053 /* Check for LIVELOCK induce flag, which is set by firing
1054 * dhd iovar to induce LIVELOCK error. If flag is set,
1055 * MSG_TYPE_INVALID is returned, which results in to
1056 * LIVELOCK error.
1057 */
1058 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1059 goto dma_completed;
1060 }
1061 }
1062 }
1063
1064 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1065
1066 if (total_tries > prot->d2h_sync_wait_max)
1067 prot->d2h_sync_wait_max = total_tries;
1068
1069 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1070 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
1071 OSL_DELAY(delay * step); /* Add stepper delay */
1072
1073 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1074 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1075
1076 DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1077 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1078 (volatile uchar *) msg, msglen);
1079
1080 ring->seqnum++; /* skip this message ... leak of a pktid */
1081 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1082
1083 dma_completed:
1084
1085 prot->d2h_sync_wait_tot += tries;
1086 return msg->msg_type;
1087 }
1088
1089 /**
1090 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1091 * need to try to sync. This noop sync handler will be bound when the dongle
1092 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1093 */
1094 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1095 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1096 volatile cmn_msg_hdr_t *msg, int msglen)
1097 {
1098 /* Check for LIVELOCK induce flag, which is set by firing
1099 * dhd iovar to induce LIVELOCK error. If flag is set,
1100 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1101 */
1102 if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1103 DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1104 return MSG_TYPE_INVALID;
1105 } else {
1106 return msg->msg_type;
1107 }
1108 }
1109
1110 #ifdef EWP_EDL
1111 /**
1112 * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1113 * header values at both the beginning and end of the payload.
1114 * The cmn_msg_hdr_t is placed at the start and end of the payload
1115 * in each work item in the EDL ring.
1116 * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1117 * and the length of the payload in the 'request_id' field.
1118 * Structure of each work item in the EDL ring:
1119 * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1120 * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1121 * too costly on the dongle side and might take up too many ARM cycles,
1122 * hence the xorcsum sync method is not being used for EDL ring.
1123 */
1124 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1125 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1126 volatile cmn_msg_hdr_t *msg)
1127 {
1128 uint32 tries;
1129 int msglen = 0, len = 0;
1130 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1131 dhd_prot_t *prot = dhd->prot;
1132 uint32 step = 0;
1133 uint32 delay = PCIE_D2H_SYNC_DELAY;
1134 uint32 total_tries = 0;
1135 volatile cmn_msg_hdr_t *trailer = NULL;
1136 volatile uint8 *buf = NULL;
1137 bool valid_msg = FALSE;
1138
1139 BCM_REFERENCE(delay);
1140 /*
1141 * For retries we have to make some sort of stepper algorithm.
1142 * We see that every time when the Dongle comes out of the D3
1143 * Cold state, the first D2H mem2mem DMA takes more time to
1144 * complete, leading to livelock issues.
1145 *
1146 * Case 1 - Apart from Host CPU some other bus master is
1147 * accessing the DDR port, probably page close to the ring
1148 * so, PCIE does not get a change to update the memory.
1149 * Solution - Increase the number of tries.
1150 *
1151 * Case 2 - The 50usec delay given by the Host CPU is not
1152 * sufficient for the PCIe RC to start its work.
1153 * In this case the breathing time of 50usec given by
1154 * the Host CPU is not sufficient.
1155 * Solution: Increase the delay in a stepper fashion.
1156 * This is done to ensure that there are no
1157 * unwanted extra delay introdcued in normal conditions.
1158 */
1159 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1160 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1161 /* First verify if the seqnumber has been updated,
1162 * if yes, only then validate the header and trailer.
1163 * Once seqnum, header and trailer have been validated, it means
1164 * that the complete message has arrived.
1165 */
1166 valid_msg = FALSE;
1167 if (msg->epoch == ring_seqnum &&
1168 msg->msg_type == MSG_TYPE_INFO_PYLD &&
1169 msg->request_id > 0 &&
1170 msg->request_id <= ring->item_len) {
1171 /* proceed to check trailer only if header is valid */
1172 buf = (volatile uint8 *)msg;
1173 msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1174 buf += msglen;
1175 if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1176 trailer = (volatile cmn_msg_hdr_t *)buf;
1177 valid_msg = (trailer->epoch == ring_seqnum) &&
1178 (trailer->msg_type == msg->msg_type) &&
1179 (trailer->request_id == msg->request_id);
1180 if (!valid_msg) {
1181 DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1182 " expected, seqnum=%u; reqid=%u. Retrying... \n",
1183 __FUNCTION__, trailer->epoch, trailer->request_id,
1184 msg->epoch, msg->request_id));
1185 }
1186 } else {
1187 DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1188 __FUNCTION__, msg->request_id));
1189 }
1190
1191 if (valid_msg) {
1192 /* data is OK */
1193 ring->seqnum++; /* next expected sequence number */
1194 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1195 goto dma_completed;
1196 }
1197 }
1198 } else {
1199 DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1200 " msg_type=0x%x, request_id=%u."
1201 " Retrying...\n",
1202 __FUNCTION__, ring_seqnum, msg->epoch,
1203 msg->msg_type, msg->request_id));
1204 }
1205
1206 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1207
1208 if (total_tries > prot->d2h_sync_wait_max)
1209 prot->d2h_sync_wait_max = total_tries;
1210
1211 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1212 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
1213 OSL_DELAY(delay * step); /* Add stepper delay */
1214
1215 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1216 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1217
1218 DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1219 DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1220 " msgtype=0x%x; expected-msgtype=0x%x"
1221 " length=%u; expected-max-length=%u", __FUNCTION__,
1222 msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1223 msg->request_id, ring->item_len));
1224 dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1225 if (trailer && msglen > 0 &&
1226 (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1227 DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1228 " msgtype=0x%x; expected-msgtype=0x%x"
1229 " length=%u; expected-length=%u", __FUNCTION__,
1230 trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1231 trailer->request_id, msg->request_id));
1232 dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1233 sizeof(*trailer), DHD_ERROR_VAL);
1234 }
1235
1236 if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1237 len = msglen + sizeof(cmn_msg_hdr_t);
1238 else
1239 len = ring->item_len;
1240
1241 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1242 (volatile uchar *) msg, len);
1243
1244 ring->seqnum++; /* skip this message */
1245 return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1246
1247 dma_completed:
1248 DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1249 msg->epoch, msg->request_id));
1250
1251 prot->d2h_sync_wait_tot += tries;
1252 return BCME_OK;
1253 }
1254
1255 /**
1256 * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1257 * need to try to sync. This noop sync handler will be bound when the dongle
1258 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1259 */
1260 static int BCMFASTPATH
dhd_prot_d2h_sync_edl_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg)1261 dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1262 volatile cmn_msg_hdr_t *msg)
1263 {
1264 /* Check for LIVELOCK induce flag, which is set by firing
1265 * dhd iovar to induce LIVELOCK error. If flag is set,
1266 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1267 */
1268 if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1269 DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1270 return BCME_ERROR;
1271 } else {
1272 if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1273 return BCME_OK;
1274 else
1275 return msg->msg_type;
1276 }
1277 }
1278 #endif /* EWP_EDL */
1279
1280 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1281 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1282 {
1283 /* To synchronize with the previous memory operations call wmb() */
1284 OSL_SMP_WMB();
1285 dhd->prot->ioctl_received = reason;
1286 /* Call another wmb() to make sure before waking up the other event value gets updated */
1287 OSL_SMP_WMB();
1288 dhd_os_ioctl_resp_wake(dhd);
1289 }
1290
1291 /**
1292 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1293 * dongle advertizes.
1294 */
1295 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1296 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1297 {
1298 dhd_prot_t *prot = dhd->prot;
1299 prot->d2h_sync_wait_max = 0UL;
1300 prot->d2h_sync_wait_tot = 0UL;
1301
1302 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1303 prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1304
1305 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1306 prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1307
1308 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1309 prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1310
1311 if (HWA_ACTIVE(dhd)) {
1312 prot->d2hring_tx_cpln.hwa_db_type =
1313 (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
1314 prot->d2hring_rx_cpln.hwa_db_type =
1315 (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
1316 DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
1317 __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
1318 prot->d2hring_rx_cpln.hwa_db_type));
1319 }
1320
1321 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1322 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1323 #ifdef EWP_EDL
1324 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1325 #endif /* EWP_EDL */
1326 DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1327 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1328 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1329 #ifdef EWP_EDL
1330 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1331 #endif /* EWP_EDL */
1332 DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1333 } else {
1334 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1335 #ifdef EWP_EDL
1336 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1337 #endif /* EWP_EDL */
1338 DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1339 }
1340 }
1341
1342 /**
1343 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1344 */
1345 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1346 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1347 {
1348 dhd_prot_t *prot = dhd->prot;
1349 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1350
1351 if (HWA_ACTIVE(dhd)) {
1352 prot->h2dring_rxp_subn.hwa_db_type =
1353 (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
1354 DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
1355 __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
1356 }
1357
1358 prot->h2dring_rxp_subn.current_phase = 0;
1359
1360 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1361 prot->h2dring_ctrl_subn.current_phase = 0;
1362 }
1363
1364 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
1365
1366 /*
1367 * +---------------------------------------------------------------------------+
1368 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1369 * virtual and physical address, the buffer lenght and the DMA handler.
1370 * A secdma handler is also included in the dhd_dma_buf object.
1371 * +---------------------------------------------------------------------------+
1372 */
1373
1374 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1375 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1376 {
1377 base_addr->low_addr = htol32(PHYSADDRLO(pa));
1378 base_addr->high_addr = htol32(PHYSADDRHI(pa));
1379 }
1380
1381 /**
1382 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1383 */
1384 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1385 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1386 {
1387 uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1388 ASSERT(dma_buf);
1389 pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1390 ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1391 ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1392 ASSERT(dma_buf->len != 0);
1393
1394 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1395 end = (pa_lowaddr + dma_buf->len); /* end address */
1396
1397 if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1398 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1399 __FUNCTION__, pa_lowaddr, dma_buf->len));
1400 return BCME_ERROR;
1401 }
1402
1403 return BCME_OK;
1404 }
1405
1406 /**
1407 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1408 * returns BCME_OK=0 on success
1409 * returns non-zero negative error value on failure.
1410 */
1411 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1412 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1413 {
1414 uint32 dma_pad = 0;
1415 osl_t *osh = dhd->osh;
1416 uint16 dma_align = DMA_ALIGN_LEN;
1417 uint32 rem = 0;
1418
1419 ASSERT(dma_buf != NULL);
1420 ASSERT(dma_buf->va == NULL);
1421 ASSERT(dma_buf->len == 0);
1422
1423 /* Pad the buffer length to align to cacheline size. */
1424 rem = (buf_len % DHD_DMA_PAD);
1425 dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1426
1427 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1428 dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1429
1430 if (dma_buf->va == NULL) {
1431 DHD_ERROR(("%s: buf_len %d, no memory available\n",
1432 __FUNCTION__, buf_len));
1433 return BCME_NOMEM;
1434 }
1435
1436 dma_buf->len = buf_len; /* not including padded len */
1437
1438 if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1439 dhd_dma_buf_free(dhd, dma_buf);
1440 return BCME_ERROR;
1441 }
1442
1443 dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1444
1445 return BCME_OK;
1446 }
1447
1448 /**
1449 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1450 */
1451 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1452 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1453 {
1454 if ((dma_buf == NULL) || (dma_buf->va == NULL))
1455 return;
1456
1457 (void)dhd_dma_buf_audit(dhd, dma_buf);
1458
1459 /* Zero out the entire buffer and cache flush */
1460 memset((void*)dma_buf->va, 0, dma_buf->len);
1461 OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1462 }
1463
1464 /**
1465 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1466 * dhd_dma_buf_alloc().
1467 */
1468 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1469 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1470 {
1471 osl_t *osh = dhd->osh;
1472
1473 ASSERT(dma_buf);
1474
1475 if (dma_buf->va == NULL)
1476 return; /* Allow for free invocation, when alloc failed */
1477
1478 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1479 (void)dhd_dma_buf_audit(dhd, dma_buf);
1480
1481 /* dma buffer may have been padded at allocation */
1482 DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1483 dma_buf->pa, dma_buf->dmah);
1484
1485 memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1486 }
1487
1488 /**
1489 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1490 * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1491 */
1492 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1493 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1494 void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1495 {
1496 dhd_dma_buf_t *dma_buf;
1497 ASSERT(dhd_dma_buf);
1498 dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1499 dma_buf->va = va;
1500 dma_buf->len = len;
1501 dma_buf->pa = pa;
1502 dma_buf->dmah = dmah;
1503 dma_buf->secdma = secdma;
1504
1505 /* Audit user defined configuration */
1506 (void)dhd_dma_buf_audit(dhd, dma_buf);
1507 }
1508
1509 /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1510
1511 /*
1512 * +---------------------------------------------------------------------------+
1513 * DHD_MAP_PKTID_LOGGING
1514 * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1515 * debugging in customer platform.
1516 * +---------------------------------------------------------------------------+
1517 */
1518
1519 #ifdef DHD_MAP_PKTID_LOGGING
1520 typedef struct dhd_pktid_log_item {
1521 dmaaddr_t pa; /* DMA bus address */
1522 uint64 ts_nsec; /* Timestamp: nsec */
1523 uint32 size; /* DMA map/unmap size */
1524 uint32 pktid; /* Packet ID */
1525 uint8 pkttype; /* Packet Type */
1526 uint8 rsvd[7]; /* Reserved for future use */
1527 } dhd_pktid_log_item_t;
1528
1529 typedef struct dhd_pktid_log {
1530 uint32 items; /* number of total items */
1531 uint32 index; /* index of pktid_log_item */
1532 dhd_pktid_log_item_t map[0]; /* metadata storage */
1533 } dhd_pktid_log_t;
1534
1535 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1536
1537 #define MAX_PKTID_LOG (2048)
1538 #define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
1539 #define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
1540 ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1541
1542 #define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
1543 #define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
1544 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
1545 dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1546 #define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
1547
1548 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1549 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1550 {
1551 dhd_pktid_log_t *log;
1552 uint32 log_size;
1553
1554 log_size = DHD_PKTID_LOG_SZ(num_items);
1555 log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1556 if (log == NULL) {
1557 DHD_ERROR(("%s: MALLOC failed for size %d\n",
1558 __FUNCTION__, log_size));
1559 return (dhd_pktid_log_handle_t *)NULL;
1560 }
1561
1562 log->items = num_items;
1563 log->index = 0;
1564
1565 return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1566 }
1567
1568 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1569 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1570 {
1571 dhd_pktid_log_t *log;
1572 uint32 log_size;
1573
1574 if (handle == NULL) {
1575 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1576 return;
1577 }
1578
1579 log = (dhd_pktid_log_t *)handle;
1580 log_size = DHD_PKTID_LOG_SZ(log->items);
1581 MFREE(dhd->osh, handle, log_size);
1582 }
1583
1584 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1585 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1586 uint32 pktid, uint32 len, uint8 pkttype)
1587 {
1588 dhd_pktid_log_t *log;
1589 uint32 idx;
1590
1591 if (handle == NULL) {
1592 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1593 return;
1594 }
1595
1596 log = (dhd_pktid_log_t *)handle;
1597 idx = log->index;
1598 log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1599 log->map[idx].pa = pa;
1600 log->map[idx].pktid = pktid;
1601 log->map[idx].size = len;
1602 log->map[idx].pkttype = pkttype;
1603 log->index = (idx + 1) % (log->items); /* update index */
1604 }
1605
1606 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1607 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1608 {
1609 dhd_prot_t *prot = dhd->prot;
1610 dhd_pktid_log_t *map_log, *unmap_log;
1611 uint64 ts_sec, ts_usec;
1612
1613 if (prot == NULL) {
1614 DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1615 return;
1616 }
1617
1618 map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1619 unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1620 OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1621 if (map_log && unmap_log) {
1622 DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1623 "current time=[%5lu.%06lu]\n", __FUNCTION__,
1624 map_log->index, unmap_log->index,
1625 (unsigned long)ts_sec, (unsigned long)ts_usec));
1626 DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1627 "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1628 (uint64)__virt_to_phys((ulong)(map_log->map)),
1629 (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1630 (uint64)__virt_to_phys((ulong)(unmap_log->map)),
1631 (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1632 }
1633 }
1634 #endif /* DHD_MAP_PKTID_LOGGING */
1635
1636 /* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1637
1638 /*
1639 * +---------------------------------------------------------------------------+
1640 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1641 * Main purpose is to save memory on the dongle, has other purposes as well.
1642 * The packet id map, also includes storage for some packet parameters that
1643 * may be saved. A native packet pointer along with the parameters may be saved
1644 * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1645 * and the metadata may be retrieved using the previously allocated packet id.
1646 * +---------------------------------------------------------------------------+
1647 */
1648 #define DHD_PCIE_PKTID
1649 #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1650 #define MAX_RX_PKTID (1024)
1651 #define MAX_TX_PKTID (3072 * 12)
1652
1653 /* On Router, the pktptr serves as a pktid. */
1654
1655 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1656 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1657 #endif // endif
1658
1659 /* Enum for marking the buffer color based on usage */
1660 typedef enum dhd_pkttype {
1661 PKTTYPE_DATA_TX = 0,
1662 PKTTYPE_DATA_RX,
1663 PKTTYPE_IOCTL_RX,
1664 PKTTYPE_EVENT_RX,
1665 PKTTYPE_INFO_RX,
1666 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1667 PKTTYPE_NO_CHECK,
1668 PKTTYPE_TSBUF_RX
1669 } dhd_pkttype_t;
1670
1671 #define DHD_PKTID_MIN_AVAIL_COUNT 512U
1672 #define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1673 #define DHD_PKTID_INVALID (0U)
1674 #define DHD_IOCTL_REQ_PKTID (0xFFFE)
1675 #define DHD_FAKE_PKTID (0xFACE)
1676 #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1677 #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1678 #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1679 #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
1680 #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
1681 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
1682 #ifdef DHD_HP2P
1683 #define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
1684 #define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
1685 #endif /* DHD_HP2P */
1686
1687 #define IS_FLOWRING(ring) \
1688 ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1689
1690 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1691
1692 /* Construct a packet id mapping table, returning an opaque map handle */
1693 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1694
1695 /* Destroy a packet id mapping table, freeing all packets active in the table */
1696 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1697
1698 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1699 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
1700 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1701 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
1702
1703 #ifdef MACOSX_DHD
1704 #undef DHD_PCIE_PKTID
1705 #define DHD_PCIE_PKTID 1
1706 #endif /* MACOSX_DHD */
1707
1708 #if defined(DHD_PCIE_PKTID)
1709 #if defined(MACOSX_DHD)
1710 #define IOCTLRESP_USE_CONSTMEM
1711 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1712 static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1713 #endif // endif
1714
1715 /* Determine number of pktids that are available */
1716 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1717
1718 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1719 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1720 void *pkt, dhd_pkttype_t pkttype);
1721 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1722 void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1723 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1724 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1725 void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1726 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1727 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1728 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1729 uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1730 void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1731
1732 /*
1733 * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1734 *
1735 * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1736 * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1737 *
1738 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1739 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1740 */
1741 #if defined(DHD_PKTID_AUDIT_ENABLED)
1742 #define USE_DHD_PKTID_AUDIT_LOCK 1
1743 /* Audit the pktidmap allocator */
1744 /* #define DHD_PKTID_AUDIT_MAP */
1745
1746 /* Audit the pktid during production/consumption of workitems */
1747 #define DHD_PKTID_AUDIT_RING
1748
1749 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1750 #error "May only enabled audit of MAP or RING, at a time."
1751 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1752
1753 #define DHD_DUPLICATE_ALLOC 1
1754 #define DHD_DUPLICATE_FREE 2
1755 #define DHD_TEST_IS_ALLOC 3
1756 #define DHD_TEST_IS_FREE 4
1757
1758 typedef enum dhd_pktid_map_type {
1759 DHD_PKTID_MAP_TYPE_CTRL = 1,
1760 DHD_PKTID_MAP_TYPE_TX,
1761 DHD_PKTID_MAP_TYPE_RX,
1762 DHD_PKTID_MAP_TYPE_UNKNOWN
1763 } dhd_pktid_map_type_t;
1764
1765 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1766 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1767 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1768 #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1769 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1770 #else
1771 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1772 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
1773 #define DHD_PKTID_AUDIT_LOCK(lock) 0
1774 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
1775 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1776
1777 #endif /* DHD_PKTID_AUDIT_ENABLED */
1778
1779 #define USE_DHD_PKTID_LOCK 1
1780
1781 #ifdef USE_DHD_PKTID_LOCK
1782 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1783 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1784 #define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
1785 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1786 #else
1787 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1788 #define DHD_PKTID_LOCK_DEINIT(osh, lock) \
1789 do { \
1790 BCM_REFERENCE(osh); \
1791 BCM_REFERENCE(lock); \
1792 } while (0)
1793 #define DHD_PKTID_LOCK(lock) 0
1794 #define DHD_PKTID_UNLOCK(lock, flags) \
1795 do { \
1796 BCM_REFERENCE(lock); \
1797 BCM_REFERENCE(flags); \
1798 } while (0)
1799 #endif /* !USE_DHD_PKTID_LOCK */
1800
1801 typedef enum dhd_locker_state {
1802 LOCKER_IS_FREE,
1803 LOCKER_IS_BUSY,
1804 LOCKER_IS_RSVD
1805 } dhd_locker_state_t;
1806
1807 /* Packet metadata saved in packet id mapper */
1808
1809 typedef struct dhd_pktid_item {
1810 dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1811 uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1812 dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1813 uint16 len; /* length of mapped packet's buffer */
1814 void *pkt; /* opaque native pointer to a packet */
1815 dmaaddr_t pa; /* physical address of mapped packet's buffer */
1816 void *dmah; /* handle to OS specific DMA map */
1817 void *secdma;
1818 } dhd_pktid_item_t;
1819
1820 typedef uint32 dhd_pktid_key_t;
1821
1822 typedef struct dhd_pktid_map {
1823 uint32 items; /* total items in map */
1824 uint32 avail; /* total available items */
1825 int failures; /* lockers unavailable count */
1826 /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1827 void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1828
1829 #if defined(DHD_PKTID_AUDIT_ENABLED)
1830 void *pktid_audit_lock;
1831 struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1832 #endif /* DHD_PKTID_AUDIT_ENABLED */
1833 dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
1834 dhd_pktid_item_t lockers[0]; /* metadata storage */
1835 } dhd_pktid_map_t;
1836
1837 /*
1838 * PktId (Locker) #0 is never allocated and is considered invalid.
1839 *
1840 * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1841 * depleted pktid pool and must not be used by the caller.
1842 *
1843 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1844 */
1845
1846 #define DHD_PKTID_FREE_LOCKER (FALSE)
1847 #define DHD_PKTID_RSV_LOCKER (TRUE)
1848
1849 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1850 #define DHD_PKIDMAP_ITEMS(items) (items)
1851 #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1852 (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1853 #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
1854
1855 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
1856
1857 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1858 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
1859 dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1860 /* Reuse a previously reserved locker to save packet params */
1861 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1862 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1863 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1864 (dhd_pkttype_t)(pkttype))
1865 /* Convert a packet to a pktid, and save packet params in locker */
1866 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1867 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1868 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1869 (dhd_pkttype_t)(pkttype))
1870
1871 /* Convert pktid to a packet, and free the locker */
1872 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1873 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1874 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1875 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1876
1877 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1878 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1879 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1880 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1881 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1882
1883 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1884
1885 #if defined(DHD_PKTID_AUDIT_ENABLED)
1886
1887 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)1888 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1889 {
1890 dhd_prot_t *prot = dhd->prot;
1891 int pktid_map_type;
1892
1893 if (pktid_map == prot->pktid_ctrl_map) {
1894 pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1895 } else if (pktid_map == prot->pktid_tx_map) {
1896 pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1897 } else if (pktid_map == prot->pktid_rx_map) {
1898 pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1899 } else {
1900 pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1901 }
1902
1903 return pktid_map_type;
1904 }
1905
1906 /**
1907 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1908 */
1909 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1910 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1911 const int test_for, const char *errmsg)
1912 {
1913 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1914 struct bcm_mwbmap *handle;
1915 uint32 flags;
1916 bool ignore_audit;
1917 int error = BCME_OK;
1918
1919 if (pktid_map == (dhd_pktid_map_t *)NULL) {
1920 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1921 return BCME_OK;
1922 }
1923
1924 flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1925
1926 handle = pktid_map->pktid_audit;
1927 if (handle == (struct bcm_mwbmap *)NULL) {
1928 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1929 goto out;
1930 }
1931
1932 /* Exclude special pktids from audit */
1933 ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1934 if (ignore_audit) {
1935 goto out;
1936 }
1937
1938 if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1939 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1940 error = BCME_ERROR;
1941 goto out;
1942 }
1943
1944 /* Perform audit */
1945 switch (test_for) {
1946 case DHD_DUPLICATE_ALLOC:
1947 if (!bcm_mwbmap_isfree(handle, pktid)) {
1948 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1949 errmsg, pktid));
1950 error = BCME_ERROR;
1951 } else {
1952 bcm_mwbmap_force(handle, pktid);
1953 }
1954 break;
1955
1956 case DHD_DUPLICATE_FREE:
1957 if (bcm_mwbmap_isfree(handle, pktid)) {
1958 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1959 errmsg, pktid));
1960 error = BCME_ERROR;
1961 } else {
1962 bcm_mwbmap_free(handle, pktid);
1963 }
1964 break;
1965
1966 case DHD_TEST_IS_ALLOC:
1967 if (bcm_mwbmap_isfree(handle, pktid)) {
1968 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1969 errmsg, pktid));
1970 error = BCME_ERROR;
1971 }
1972 break;
1973
1974 case DHD_TEST_IS_FREE:
1975 if (!bcm_mwbmap_isfree(handle, pktid)) {
1976 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1977 errmsg, pktid));
1978 error = BCME_ERROR;
1979 }
1980 break;
1981
1982 default:
1983 DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
1984 error = BCME_ERROR;
1985 break;
1986 }
1987
1988 out:
1989 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1990
1991 if (error != BCME_OK) {
1992 dhd->pktid_audit_failed = TRUE;
1993 }
1994
1995 return error;
1996 }
1997
1998 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1999 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2000 const int test_for, const char *errmsg)
2001 {
2002 int ret = BCME_OK;
2003 ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2004 if (ret == BCME_ERROR) {
2005 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2006 __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2007 dhd_pktid_error_handler(dhd);
2008 }
2009
2010 return ret;
2011 }
2012
2013 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
2014 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
2015
2016 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2017 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2018 const int test_for, void *msg, uint32 msg_len, const char *func)
2019 {
2020 int ret = BCME_OK;
2021
2022 if (dhd_query_bus_erros(dhdp)) {
2023 return BCME_ERROR;
2024 }
2025
2026 ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2027 if (ret == BCME_ERROR) {
2028 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2029 __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2030 prhex(func, (uchar *)msg, msg_len);
2031 dhd_pktid_error_handler(dhdp);
2032 }
2033 return ret;
2034 }
2035 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2036 dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2037 (pktid), (test_for), msg, msg_len, __FUNCTION__)
2038
2039 #endif /* DHD_PKTID_AUDIT_ENABLED */
2040
2041 /**
2042 * +---------------------------------------------------------------------------+
2043 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2044 *
2045 * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2046 *
2047 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2048 * packet id is returned. This unique packet id may be used to retrieve the
2049 * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2050 * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2051 * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2052 *
2053 * Implementation Note:
2054 * Convert this into a <key,locker> abstraction and place into bcmutils !
2055 * Locker abstraction should treat contents as opaque storage, and a
2056 * callback should be registered to handle busy lockers on destructor.
2057 *
2058 * +---------------------------------------------------------------------------+
2059 */
2060
2061 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2062
2063 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2064 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2065 {
2066 void* osh;
2067 uint32 nkey;
2068 dhd_pktid_map_t *map;
2069 uint32 dhd_pktid_map_sz;
2070 uint32 map_items;
2071 uint32 map_keys_sz;
2072 osh = dhd->osh;
2073
2074 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2075
2076 map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2077 if (map == NULL) {
2078 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2079 __FUNCTION__, __LINE__, dhd_pktid_map_sz));
2080 return (dhd_pktid_map_handle_t *)NULL;
2081 }
2082
2083 map->items = num_items;
2084 map->avail = num_items;
2085
2086 map_items = DHD_PKIDMAP_ITEMS(map->items);
2087
2088 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2089
2090 /* Initialize the lock that protects this structure */
2091 map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2092 if (map->pktid_lock == NULL) {
2093 DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2094 goto error;
2095 }
2096
2097 map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2098 if (map->keys == NULL) {
2099 DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2100 __FUNCTION__, __LINE__, map_keys_sz));
2101 goto error;
2102 }
2103
2104 #if defined(DHD_PKTID_AUDIT_ENABLED)
2105 /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2106 map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2107 if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2108 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2109 goto error;
2110 } else {
2111 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2112 __FUNCTION__, __LINE__, map_items + 1));
2113 }
2114 map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2115 #endif /* DHD_PKTID_AUDIT_ENABLED */
2116
2117 for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2118 map->keys[nkey] = nkey; /* populate with unique keys */
2119 map->lockers[nkey].state = LOCKER_IS_FREE;
2120 map->lockers[nkey].pkt = NULL; /* bzero: redundant */
2121 map->lockers[nkey].len = 0;
2122 }
2123
2124 /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2125 map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2126 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
2127 map->lockers[DHD_PKTID_INVALID].len = 0;
2128
2129 #if defined(DHD_PKTID_AUDIT_ENABLED)
2130 /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2131 bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2132 #endif /* DHD_PKTID_AUDIT_ENABLED */
2133
2134 return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2135
2136 error:
2137 if (map) {
2138 #if defined(DHD_PKTID_AUDIT_ENABLED)
2139 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2140 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2141 map->pktid_audit = (struct bcm_mwbmap *)NULL;
2142 if (map->pktid_audit_lock)
2143 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2144 }
2145 #endif /* DHD_PKTID_AUDIT_ENABLED */
2146
2147 if (map->keys) {
2148 MFREE(osh, map->keys, map_keys_sz);
2149 }
2150
2151 if (map->pktid_lock) {
2152 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2153 }
2154
2155 VMFREE(osh, map, dhd_pktid_map_sz);
2156 }
2157 return (dhd_pktid_map_handle_t *)NULL;
2158 }
2159
2160 /**
2161 * Retrieve all allocated keys and free all <numbered_key, locker>.
2162 * Freeing implies: unmapping the buffers and freeing the native packet
2163 * This could have been a callback registered with the pktid mapper.
2164 */
2165 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2166 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2167 {
2168 void *osh;
2169 uint32 nkey;
2170 dhd_pktid_map_t *map;
2171 dhd_pktid_item_t *locker;
2172 uint32 map_items;
2173 unsigned long flags;
2174 bool data_tx = FALSE;
2175
2176 map = (dhd_pktid_map_t *)handle;
2177 DHD_PKTID_LOCK(map->pktid_lock, flags);
2178 osh = dhd->osh;
2179
2180 map_items = DHD_PKIDMAP_ITEMS(map->items);
2181 /* skip reserved KEY #0, and start from 1 */
2182
2183 for (nkey = 1; nkey <= map_items; nkey++) {
2184 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2185 locker = &map->lockers[nkey];
2186 locker->state = LOCKER_IS_FREE;
2187 data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2188 if (data_tx) {
2189 OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2190 }
2191
2192 #ifdef DHD_PKTID_AUDIT_RING
2193 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2194 #endif /* DHD_PKTID_AUDIT_RING */
2195 #ifdef DHD_MAP_PKTID_LOGGING
2196 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2197 locker->pa, nkey, locker->len,
2198 locker->pkttype);
2199 #endif /* DHD_MAP_PKTID_LOGGING */
2200
2201 {
2202 if (SECURE_DMA_ENAB(dhd->osh))
2203 SECURE_DMA_UNMAP(osh, locker->pa,
2204 locker->len, locker->dir, 0,
2205 locker->dmah, locker->secdma, 0);
2206 else
2207 DMA_UNMAP(osh, locker->pa, locker->len,
2208 locker->dir, 0, locker->dmah);
2209 }
2210 dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2211 locker->pkttype, data_tx);
2212 }
2213 else {
2214 #ifdef DHD_PKTID_AUDIT_RING
2215 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2216 #endif /* DHD_PKTID_AUDIT_RING */
2217 }
2218 map->keys[nkey] = nkey; /* populate with unique keys */
2219 }
2220
2221 map->avail = map_items;
2222 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2223 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2224 }
2225
2226 #ifdef IOCTLRESP_USE_CONSTMEM
2227 /** Called in detach scenario. Releasing IOCTL buffers. */
2228 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2229 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2230 {
2231 uint32 nkey;
2232 dhd_pktid_map_t *map;
2233 dhd_pktid_item_t *locker;
2234 uint32 map_items;
2235 unsigned long flags;
2236
2237 map = (dhd_pktid_map_t *)handle;
2238 DHD_PKTID_LOCK(map->pktid_lock, flags);
2239
2240 map_items = DHD_PKIDMAP_ITEMS(map->items);
2241 /* skip reserved KEY #0, and start from 1 */
2242 for (nkey = 1; nkey <= map_items; nkey++) {
2243 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2244 dhd_dma_buf_t retbuf;
2245
2246 #ifdef DHD_PKTID_AUDIT_RING
2247 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2248 #endif /* DHD_PKTID_AUDIT_RING */
2249
2250 locker = &map->lockers[nkey];
2251 retbuf.va = locker->pkt;
2252 retbuf.len = locker->len;
2253 retbuf.pa = locker->pa;
2254 retbuf.dmah = locker->dmah;
2255 retbuf.secdma = locker->secdma;
2256
2257 free_ioctl_return_buffer(dhd, &retbuf);
2258 }
2259 else {
2260 #ifdef DHD_PKTID_AUDIT_RING
2261 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2262 #endif /* DHD_PKTID_AUDIT_RING */
2263 }
2264 map->keys[nkey] = nkey; /* populate with unique keys */
2265 }
2266
2267 map->avail = map_items;
2268 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2269 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2270 }
2271 #endif /* IOCTLRESP_USE_CONSTMEM */
2272
2273 /**
2274 * Free the pktid map.
2275 */
2276 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2277 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2278 {
2279 dhd_pktid_map_t *map;
2280 uint32 dhd_pktid_map_sz;
2281 uint32 map_keys_sz;
2282
2283 if (handle == NULL)
2284 return;
2285
2286 /* Free any pending packets */
2287 dhd_pktid_map_reset(dhd, handle);
2288
2289 map = (dhd_pktid_map_t *)handle;
2290 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2291 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2292
2293 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2294
2295 #if defined(DHD_PKTID_AUDIT_ENABLED)
2296 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2297 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2298 map->pktid_audit = (struct bcm_mwbmap *)NULL;
2299 if (map->pktid_audit_lock) {
2300 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2301 }
2302 }
2303 #endif /* DHD_PKTID_AUDIT_ENABLED */
2304 MFREE(dhd->osh, map->keys, map_keys_sz);
2305 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2306 }
2307 #ifdef IOCTLRESP_USE_CONSTMEM
2308 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2309 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2310 {
2311 dhd_pktid_map_t *map;
2312 uint32 dhd_pktid_map_sz;
2313 uint32 map_keys_sz;
2314
2315 if (handle == NULL)
2316 return;
2317
2318 /* Free any pending packets */
2319 dhd_pktid_map_reset_ioctl(dhd, handle);
2320
2321 map = (dhd_pktid_map_t *)handle;
2322 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2323 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2324
2325 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2326
2327 #if defined(DHD_PKTID_AUDIT_ENABLED)
2328 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2329 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2330 map->pktid_audit = (struct bcm_mwbmap *)NULL;
2331 if (map->pktid_audit_lock) {
2332 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2333 }
2334 }
2335 #endif /* DHD_PKTID_AUDIT_ENABLED */
2336
2337 MFREE(dhd->osh, map->keys, map_keys_sz);
2338 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2339 }
2340 #endif /* IOCTLRESP_USE_CONSTMEM */
2341
2342 /** Get the pktid free count */
2343 static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t * handle)2344 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
2345 {
2346 dhd_pktid_map_t *map;
2347 uint32 avail;
2348 unsigned long flags;
2349
2350 ASSERT(handle != NULL);
2351 map = (dhd_pktid_map_t *)handle;
2352
2353 DHD_PKTID_LOCK(map->pktid_lock, flags);
2354 avail = map->avail;
2355 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2356
2357 return avail;
2358 }
2359
2360 /**
2361 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2362 * yet populated. Invoke the pktid save api to populate the packet parameters
2363 * into the locker. This function is not reentrant, and is the caller's
2364 * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2365 * a failure case, implying a depleted pool of pktids.
2366 */
2367 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2368 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2369 void *pkt, dhd_pkttype_t pkttype)
2370 {
2371 uint32 nkey;
2372 dhd_pktid_map_t *map;
2373 dhd_pktid_item_t *locker;
2374 unsigned long flags;
2375
2376 ASSERT(handle != NULL);
2377 map = (dhd_pktid_map_t *)handle;
2378
2379 DHD_PKTID_LOCK(map->pktid_lock, flags);
2380
2381 if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2382 map->failures++;
2383 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2384 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2385 return DHD_PKTID_INVALID; /* failed alloc request */
2386 }
2387
2388 ASSERT(map->avail <= map->items);
2389 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2390
2391 if ((map->avail > map->items) || (nkey > map->items)) {
2392 map->failures++;
2393 DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2394 " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2395 __FUNCTION__, __LINE__, map->avail, nkey,
2396 pkttype));
2397 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2398 return DHD_PKTID_INVALID; /* failed alloc request */
2399 }
2400
2401 locker = &map->lockers[nkey]; /* save packet metadata in locker */
2402 map->avail--;
2403 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2404 locker->len = 0;
2405 locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2406
2407 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2408
2409 ASSERT(nkey != DHD_PKTID_INVALID);
2410
2411 return nkey; /* return locker's numbered key */
2412 }
2413
2414 /*
2415 * dhd_pktid_map_save - Save a packet's parameters into a locker
2416 * corresponding to a previously reserved unique numbered key.
2417 */
2418 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2419 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2420 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2421 dhd_pkttype_t pkttype)
2422 {
2423 dhd_pktid_map_t *map;
2424 dhd_pktid_item_t *locker;
2425 unsigned long flags;
2426
2427 ASSERT(handle != NULL);
2428 map = (dhd_pktid_map_t *)handle;
2429
2430 DHD_PKTID_LOCK(map->pktid_lock, flags);
2431
2432 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2433 DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2434 __FUNCTION__, __LINE__, nkey, pkttype));
2435 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2436 #ifdef DHD_FW_COREDUMP
2437 if (dhd->memdump_enabled) {
2438 /* collect core dump */
2439 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2440 dhd_bus_mem_dump(dhd);
2441 }
2442 #else
2443 ASSERT(0);
2444 #endif /* DHD_FW_COREDUMP */
2445 return;
2446 }
2447
2448 locker = &map->lockers[nkey];
2449
2450 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2451 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2452
2453 /* store contents in locker */
2454 locker->dir = dir;
2455 locker->pa = pa;
2456 locker->len = (uint16)len; /* 16bit len */
2457 locker->dmah = dmah; /* 16bit len */
2458 locker->secdma = secdma;
2459 locker->pkttype = pkttype;
2460 locker->pkt = pkt;
2461 locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2462 #ifdef DHD_MAP_PKTID_LOGGING
2463 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2464 #endif /* DHD_MAP_PKTID_LOGGING */
2465 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2466 }
2467
2468 /**
2469 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2470 * contents into the corresponding locker. Return the numbered key.
2471 */
2472 static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2473 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2474 dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2475 dhd_pkttype_t pkttype)
2476 {
2477 uint32 nkey;
2478
2479 nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2480 if (nkey != DHD_PKTID_INVALID) {
2481 dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2482 len, dir, dmah, secdma, pkttype);
2483 }
2484
2485 return nkey;
2486 }
2487
2488 /**
2489 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2490 * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2491 * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2492 * value. Only a previously allocated pktid may be freed.
2493 */
2494 static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,uint32 nkey,dmaaddr_t * pa,uint32 * len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype,bool rsv_locker)2495 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2496 dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2497 bool rsv_locker)
2498 {
2499 dhd_pktid_map_t *map;
2500 dhd_pktid_item_t *locker;
2501 void * pkt;
2502 unsigned long long locker_addr;
2503 unsigned long flags;
2504
2505 ASSERT(handle != NULL);
2506
2507 map = (dhd_pktid_map_t *)handle;
2508
2509 DHD_PKTID_LOCK(map->pktid_lock, flags);
2510
2511 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2512 DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2513 __FUNCTION__, __LINE__, nkey, pkttype));
2514 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2515 #ifdef DHD_FW_COREDUMP
2516 if (dhd->memdump_enabled) {
2517 /* collect core dump */
2518 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2519 dhd_bus_mem_dump(dhd);
2520 }
2521 #else
2522 ASSERT(0);
2523 #endif /* DHD_FW_COREDUMP */
2524 return NULL;
2525 }
2526
2527 locker = &map->lockers[nkey];
2528
2529 #if defined(DHD_PKTID_AUDIT_MAP)
2530 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2531 #endif /* DHD_PKTID_AUDIT_MAP */
2532
2533 /* Debug check for cloned numbered key */
2534 if (locker->state == LOCKER_IS_FREE) {
2535 DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2536 __FUNCTION__, __LINE__, nkey));
2537 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2538 #ifdef DHD_FW_COREDUMP
2539 if (dhd->memdump_enabled) {
2540 /* collect core dump */
2541 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2542 dhd_bus_mem_dump(dhd);
2543 }
2544 #else
2545 ASSERT(0);
2546 #endif /* DHD_FW_COREDUMP */
2547 return NULL;
2548 }
2549
2550 /* Check for the colour of the buffer i.e The buffer posted for TX,
2551 * should be freed for TX completion. Similarly the buffer posted for
2552 * IOCTL should be freed for IOCT completion etc.
2553 */
2554 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2555
2556 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2557 __FUNCTION__, __LINE__, nkey));
2558 #ifdef BCMDMA64OSL
2559 PHYSADDRTOULONG(locker->pa, locker_addr);
2560 #else
2561 locker_addr = PHYSADDRLO(locker->pa);
2562 #endif /* BCMDMA64OSL */
2563 DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2564 "pkttype <%d> locker->pa <0x%llx> \n",
2565 __FUNCTION__, __LINE__, locker->state, locker->pkttype,
2566 pkttype, locker_addr));
2567 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2568 #ifdef DHD_FW_COREDUMP
2569 if (dhd->memdump_enabled) {
2570 /* collect core dump */
2571 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2572 dhd_bus_mem_dump(dhd);
2573 }
2574 #else
2575 ASSERT(0);
2576 #endif /* DHD_FW_COREDUMP */
2577 return NULL;
2578 }
2579
2580 if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
2581 map->avail++;
2582 map->keys[map->avail] = nkey; /* make this numbered key available */
2583 locker->state = LOCKER_IS_FREE; /* open and free Locker */
2584 } else {
2585 /* pktid will be reused, but the locker does not have a valid pkt */
2586 locker->state = LOCKER_IS_RSVD;
2587 }
2588
2589 #if defined(DHD_PKTID_AUDIT_MAP)
2590 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2591 #endif /* DHD_PKTID_AUDIT_MAP */
2592 #ifdef DHD_MAP_PKTID_LOGGING
2593 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2594 (uint32)locker->len, pkttype);
2595 #endif /* DHD_MAP_PKTID_LOGGING */
2596
2597 *pa = locker->pa; /* return contents of locker */
2598 *len = (uint32)locker->len;
2599 *dmah = locker->dmah;
2600 *secdma = locker->secdma;
2601
2602 pkt = locker->pkt;
2603 locker->pkt = NULL; /* Clear pkt */
2604 locker->len = 0;
2605
2606 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2607
2608 return pkt;
2609 }
2610
2611 #else /* ! DHD_PCIE_PKTID */
2612
2613 typedef struct pktlist {
2614 PKT_LIST *tx_pkt_list; /* list for tx packets */
2615 PKT_LIST *rx_pkt_list; /* list for rx packets */
2616 PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
2617 } pktlists_t;
2618
2619 /*
2620 * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2621 * of a one to one mapping 32bit pktptr and a 32bit pktid.
2622 *
2623 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2624 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2625 * a lock.
2626 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2627 */
2628 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
2629 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
2630
2631 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2632 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2633 dhd_pkttype_t pkttype);
2634 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2635 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2636 dhd_pkttype_t pkttype);
2637
2638 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2639 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2640 {
2641 osl_t *osh = dhd->osh;
2642 pktlists_t *handle = NULL;
2643
2644 if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2645 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2646 __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2647 goto error_done;
2648 }
2649
2650 if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2651 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2652 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2653 goto error;
2654 }
2655
2656 if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2657 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2658 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2659 goto error;
2660 }
2661
2662 if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2663 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2664 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2665 goto error;
2666 }
2667
2668 PKTLIST_INIT(handle->tx_pkt_list);
2669 PKTLIST_INIT(handle->rx_pkt_list);
2670 PKTLIST_INIT(handle->ctrl_pkt_list);
2671
2672 return (dhd_pktid_map_handle_t *) handle;
2673
2674 error:
2675 if (handle->ctrl_pkt_list) {
2676 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2677 }
2678
2679 if (handle->rx_pkt_list) {
2680 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2681 }
2682
2683 if (handle->tx_pkt_list) {
2684 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2685 }
2686
2687 if (handle) {
2688 MFREE(osh, handle, sizeof(pktlists_t));
2689 }
2690
2691 error_done:
2692 return (dhd_pktid_map_handle_t *)NULL;
2693 }
2694
2695 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)2696 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
2697 {
2698 osl_t *osh = dhd->osh;
2699
2700 if (handle->ctrl_pkt_list) {
2701 PKTLIST_FINI(handle->ctrl_pkt_list);
2702 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2703 }
2704
2705 if (handle->rx_pkt_list) {
2706 PKTLIST_FINI(handle->rx_pkt_list);
2707 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2708 }
2709
2710 if (handle->tx_pkt_list) {
2711 PKTLIST_FINI(handle->tx_pkt_list);
2712 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2713 }
2714 }
2715
2716 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)2717 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2718 {
2719 osl_t *osh = dhd->osh;
2720 pktlists_t *handle = (pktlists_t *) map;
2721
2722 ASSERT(handle != NULL);
2723 if (handle == (pktlists_t *)NULL) {
2724 return;
2725 }
2726
2727 dhd_pktid_map_reset(dhd, handle);
2728
2729 if (handle) {
2730 MFREE(osh, handle, sizeof(pktlists_t));
2731 }
2732 }
2733
2734 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2735 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)2736 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2737 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2738 dhd_pkttype_t pkttype)
2739 {
2740 pktlists_t *handle = (pktlists_t *) map;
2741 ASSERT(pktptr32 != NULL);
2742 DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2743 DHD_PKT_SET_DMAH(pktptr32, dmah);
2744 DHD_PKT_SET_PA(pktptr32, pa);
2745 DHD_PKT_SET_SECDMA(pktptr32, secdma);
2746
2747 if (pkttype == PKTTYPE_DATA_TX) {
2748 PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
2749 } else if (pkttype == PKTTYPE_DATA_RX) {
2750 PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
2751 } else {
2752 PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
2753 }
2754
2755 return DHD_PKTID32(pktptr32);
2756 }
2757
2758 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2759 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)2760 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2761 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2762 dhd_pkttype_t pkttype)
2763 {
2764 pktlists_t *handle = (pktlists_t *) map;
2765 void *pktptr32;
2766
2767 ASSERT(pktid32 != 0U);
2768 pktptr32 = DHD_PKTPTR32(pktid32);
2769 *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2770 *dmah = DHD_PKT_GET_DMAH(pktptr32);
2771 *pa = DHD_PKT_GET_PA(pktptr32);
2772 *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2773
2774 if (pkttype == PKTTYPE_DATA_TX) {
2775 PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
2776 } else if (pkttype == PKTTYPE_DATA_RX) {
2777 PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
2778 } else {
2779 PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
2780 }
2781
2782 return pktptr32;
2783 }
2784
2785 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
2786
2787 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2788 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2789 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2790 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2791 })
2792
2793 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2794 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2795 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2796 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2797 })
2798
2799 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2800 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
2801 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2802 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2803 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2804 })
2805
2806 #define DHD_PKTID_AVAIL(map) (~0)
2807
2808 #endif /* ! DHD_PCIE_PKTID */
2809
2810 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2811
2812 /**
2813 * The PCIE FD protocol layer is constructed in two phases:
2814 * Phase 1. dhd_prot_attach()
2815 * Phase 2. dhd_prot_init()
2816 *
2817 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2818 * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2819 * with DMA-able buffers).
2820 * All dhd_dma_buf_t objects are also allocated here.
2821 *
2822 * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2823 * initialization of objects that requires information advertized by the dongle
2824 * may not be performed here.
2825 * E.g. the number of TxPost flowrings is not know at this point, neither do
2826 * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2827 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2828 * rings (common + flow).
2829 *
2830 * dhd_prot_init() is invoked after the bus layer has fetched the information
2831 * advertized by the dongle in the pcie_shared_t.
2832 */
2833 int
dhd_prot_attach(dhd_pub_t * dhd)2834 dhd_prot_attach(dhd_pub_t *dhd)
2835 {
2836 osl_t *osh = dhd->osh;
2837 dhd_prot_t *prot;
2838
2839 /* FW going to DMA extended trap data,
2840 * allocate buffer for the maximum extended trap data.
2841 */
2842 uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2843
2844 /* Allocate prot structure */
2845 if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2846 sizeof(dhd_prot_t)))) {
2847 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2848 goto fail;
2849 }
2850 memset(prot, 0, sizeof(*prot));
2851
2852 prot->osh = osh;
2853 dhd->prot = prot;
2854
2855 /* DMAing ring completes supported? FALSE by default */
2856 dhd->dma_d2h_ring_upd_support = FALSE;
2857 dhd->dma_h2d_ring_upd_support = FALSE;
2858 dhd->dma_ring_upd_overwrite = FALSE;
2859
2860 dhd->hwa_inited = 0;
2861 dhd->idma_inited = 0;
2862 dhd->ifrm_inited = 0;
2863 dhd->dar_inited = 0;
2864
2865 /* Common Ring Allocations */
2866
2867 /* Ring 0: H2D Control Submission */
2868 if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2869 H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2870 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2871 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2872 __FUNCTION__));
2873 goto fail;
2874 }
2875
2876 /* Ring 1: H2D Receive Buffer Post */
2877 if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2878 H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2879 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2880 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2881 __FUNCTION__));
2882 goto fail;
2883 }
2884
2885 /* Ring 2: D2H Control Completion */
2886 if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2887 D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2888 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2889 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2890 __FUNCTION__));
2891 goto fail;
2892 }
2893
2894 /* Ring 3: D2H Transmit Complete */
2895 if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2896 D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2897 BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2898 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2899 __FUNCTION__));
2900 goto fail;
2901
2902 }
2903
2904 /* Ring 4: D2H Receive Complete */
2905 if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2906 D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2907 BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2908 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2909 __FUNCTION__));
2910 goto fail;
2911
2912 }
2913
2914 /*
2915 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2916 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2917 * See dhd_prot_flowrings_pool_attach()
2918 */
2919 /* ioctl response buffer */
2920 if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2921 goto fail;
2922 }
2923
2924 /* IOCTL request buffer */
2925 if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2926 goto fail;
2927 }
2928
2929 /* Host TS request buffer one buffer for now */
2930 if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2931 goto fail;
2932 }
2933 prot->hostts_req_buf_inuse = FALSE;
2934
2935 /* Scratch buffer for dma rx offset */
2936 #ifdef BCM_HOST_BUF
2937 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2938 ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
2939 #else
2940 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2941
2942 #endif /* BCM_HOST_BUF */
2943
2944 goto fail;
2945 }
2946
2947 /* scratch buffer bus throughput measurement */
2948 if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2949 goto fail;
2950 }
2951
2952 #ifdef DHD_RX_CHAINING
2953 dhd_rxchain_reset(&prot->rxchain);
2954 #endif // endif
2955
2956 prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2957 if (prot->pktid_ctrl_map == NULL) {
2958 goto fail;
2959 }
2960
2961 prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2962 if (prot->pktid_rx_map == NULL)
2963 goto fail;
2964
2965 prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2966 if (prot->pktid_tx_map == NULL)
2967 goto fail;
2968
2969 #ifdef IOCTLRESP_USE_CONSTMEM
2970 prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2971 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2972 if (prot->pktid_map_handle_ioctl == NULL) {
2973 goto fail;
2974 }
2975 #endif /* IOCTLRESP_USE_CONSTMEM */
2976
2977 #ifdef DHD_MAP_PKTID_LOGGING
2978 prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2979 if (prot->pktid_dma_map == NULL) {
2980 DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2981 __FUNCTION__));
2982 }
2983
2984 prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2985 if (prot->pktid_dma_unmap == NULL) {
2986 DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2987 __FUNCTION__));
2988 }
2989 #endif /* DHD_MAP_PKTID_LOGGING */
2990
2991 /* Initialize the work queues to be used by the Load Balancing logic */
2992 #if defined(DHD_LB_TXC)
2993 {
2994 void *buffer;
2995 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2996 if (buffer == NULL) {
2997 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2998 goto fail;
2999 }
3000 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
3001 buffer, DHD_LB_WORKQ_SZ);
3002 prot->tx_compl_prod_sync = 0;
3003 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
3004 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
3005 }
3006 #endif /* DHD_LB_TXC */
3007
3008 #if defined(DHD_LB_RXC)
3009 {
3010 void *buffer;
3011 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
3012 if (buffer == NULL) {
3013 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
3014 goto fail;
3015 }
3016 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
3017 buffer, DHD_LB_WORKQ_SZ);
3018 prot->rx_compl_prod_sync = 0;
3019 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
3020 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
3021 }
3022 #endif /* DHD_LB_RXC */
3023
3024 /* Initialize trap buffer */
3025 if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3026 DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3027 goto fail;
3028 }
3029
3030 return BCME_OK;
3031
3032 fail:
3033
3034 if (prot) {
3035 /* Free up all allocated memories */
3036 dhd_prot_detach(dhd);
3037 }
3038
3039 return BCME_NOMEM;
3040 } /* dhd_prot_attach */
3041
3042 static int
3043 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3044 {
3045 int ret = BCME_OK;
3046 sh_addr_t base_addr;
3047 dhd_prot_t *prot = dhd->prot;
3048 uint32 host_scb_size = 0;
3049
3050 if (dhd->hscb_enable) {
3051 /* read number of bytes to allocate from F/W */
3052 dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3053 if (host_scb_size) {
3054 /* alloc array of host scbs */
3055 ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3056 /* write host scb address to F/W */
3057 if (ret == BCME_OK) {
3058 dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3059 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3060 HOST_SCB_ADDR, 0);
3061 } else {
3062 DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
3063 }
3064 } else {
3065 DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
3066 }
3067 } else {
3068 DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
3069 }
3070
3071 return ret;
3072 }
3073
3074 void
3075 dhd_set_host_cap(dhd_pub_t *dhd)
3076 {
3077 uint32 data = 0;
3078 dhd_prot_t *prot = dhd->prot;
3079
3080 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3081 if (dhd->h2d_phase_supported) {
3082 data |= HOSTCAP_H2D_VALID_PHASE;
3083 if (dhd->force_dongletrap_on_bad_h2d_phase)
3084 data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3085 }
3086 if (prot->host_ipc_version > prot->device_ipc_version)
3087 prot->active_ipc_version = prot->device_ipc_version;
3088 else
3089 prot->active_ipc_version = prot->host_ipc_version;
3090
3091 data |= prot->active_ipc_version;
3092
3093 if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3094 DHD_INFO(("Advertise Hostready Capability\n"));
3095 data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3096 }
3097 {
3098 /* Disable DS altogether */
3099 data |= HOSTCAP_DS_NO_OOB_DW;
3100 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3101 }
3102
3103 /* Indicate support for extended trap data */
3104 data |= HOSTCAP_EXTENDED_TRAP_DATA;
3105
3106 /* Indicate support for TX status metadata */
3107 if (dhd->pcie_txs_metadata_enable != 0)
3108 data |= HOSTCAP_TXSTATUS_METADATA;
3109
3110 /* Enable fast delete ring in firmware if supported */
3111 if (dhd->fast_delete_ring_support) {
3112 data |= HOSTCAP_FAST_DELETE_RING;
3113 }
3114
3115 if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
3116 DHD_ERROR(("HWA inited\n"));
3117 /* TODO: Is hostcap needed? */
3118 dhd->hwa_inited = TRUE;
3119 }
3120
3121 if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3122 DHD_ERROR(("IDMA inited\n"));
3123 data |= HOSTCAP_H2D_IDMA;
3124 dhd->idma_inited = TRUE;
3125 }
3126
3127 if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3128 DHD_ERROR(("IFRM Inited\n"));
3129 data |= HOSTCAP_H2D_IFRM;
3130 dhd->ifrm_inited = TRUE;
3131 dhd->dma_h2d_ring_upd_support = FALSE;
3132 dhd_prot_dma_indx_free(dhd);
3133 }
3134
3135 if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3136 DHD_ERROR(("DAR doorbell Use\n"));
3137 data |= HOSTCAP_H2D_DAR;
3138 dhd->dar_inited = TRUE;
3139 }
3140
3141 data |= HOSTCAP_UR_FW_NO_TRAP;
3142
3143 if (dhd->hscb_enable) {
3144 data |= HOSTCAP_HSCB;
3145 }
3146
3147 #ifdef EWP_EDL
3148 if (dhd->dongle_edl_support) {
3149 data |= HOSTCAP_EDL_RING;
3150 DHD_ERROR(("Enable EDL host cap\n"));
3151 } else {
3152 DHD_ERROR(("DO NOT SET EDL host cap\n"));
3153 }
3154 #endif /* EWP_EDL */
3155
3156 #ifdef DHD_HP2P
3157 if (dhd->hp2p_capable) {
3158 data |= HOSTCAP_PKT_TIMESTAMP;
3159 data |= HOSTCAP_PKT_HP2P;
3160 DHD_ERROR(("Enable HP2P in host cap\n"));
3161 } else {
3162 DHD_ERROR(("HP2P not enabled in host cap\n"));
3163 }
3164 #endif // endif
3165
3166 #ifdef DHD_DB0TS
3167 if (dhd->db0ts_capable) {
3168 data |= HOSTCAP_DB0_TIMESTAMP;
3169 DHD_ERROR(("Enable DB0 TS in host cap\n"));
3170 } else {
3171 DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3172 }
3173 #endif /* DHD_DB0TS */
3174 if (dhd->extdtxs_in_txcpl) {
3175 DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3176 data |= HOSTCAP_PKT_TXSTATUS;
3177 }
3178 else {
3179 DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3180 }
3181
3182 DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3183 __FUNCTION__,
3184 prot->active_ipc_version, prot->host_ipc_version,
3185 prot->device_ipc_version));
3186
3187 dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3188 dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3189 sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3190 }
3191
3192 }
3193
3194 /**
3195 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3196 * completed it's initialization of the pcie_shared structure, we may now fetch
3197 * the dongle advertized features and adjust the protocol layer accordingly.
3198 *
3199 * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3200 */
3201 int
3202 dhd_prot_init(dhd_pub_t *dhd)
3203 {
3204 sh_addr_t base_addr;
3205 dhd_prot_t *prot = dhd->prot;
3206 int ret = 0;
3207 uint32 idmacontrol;
3208 uint32 waitcount = 0;
3209
3210 #ifdef WL_MONITOR
3211 dhd->monitor_enable = FALSE;
3212 #endif /* WL_MONITOR */
3213
3214 /**
3215 * A user defined value can be assigned to global variable h2d_max_txpost via
3216 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3217 * 2. module parameter h2d_max_txpost
3218 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
3219 * if user has not defined any buffers by one of the above methods.
3220 */
3221 prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3222
3223 DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3224
3225 /* Read max rx packets supported by dongle */
3226 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3227 if (prot->max_rxbufpost == 0) {
3228 /* This would happen if the dongle firmware is not */
3229 /* using the latest shared structure template */
3230 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3231 }
3232 DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3233
3234 /* Initialize. bzero() would blow away the dma pointers. */
3235 prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
3236 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3237 prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3238 prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3239
3240 prot->cur_ioctlresp_bufs_posted = 0;
3241 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3242 prot->data_seq_no = 0;
3243 prot->ioctl_seq_no = 0;
3244 prot->rxbufpost = 0;
3245 prot->cur_event_bufs_posted = 0;
3246 prot->ioctl_state = 0;
3247 prot->curr_ioctl_cmd = 0;
3248 prot->cur_ts_bufs_posted = 0;
3249 prot->infobufpost = 0;
3250
3251 prot->dmaxfer.srcmem.va = NULL;
3252 prot->dmaxfer.dstmem.va = NULL;
3253 prot->dmaxfer.in_progress = FALSE;
3254
3255 prot->metadata_dbg = FALSE;
3256 prot->rx_metadata_offset = 0;
3257 prot->tx_metadata_offset = 0;
3258 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3259
3260 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3261 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3262 prot->ioctl_state = 0;
3263 prot->ioctl_status = 0;
3264 prot->ioctl_resplen = 0;
3265 prot->ioctl_received = IOCTL_WAIT;
3266
3267 /* Initialize Common MsgBuf Rings */
3268
3269 prot->device_ipc_version = dhd->bus->api.fw_rev;
3270 prot->host_ipc_version = PCIE_SHARED_VERSION;
3271 prot->no_tx_resource = FALSE;
3272
3273 /* Init the host API version */
3274 dhd_set_host_cap(dhd);
3275
3276 /* alloc and configure scb host address for dongle */
3277 if ((ret = dhd_alloc_host_scbs(dhd))) {
3278 return ret;
3279 }
3280
3281 /* Register the interrupt function upfront */
3282 /* remove corerev checks in data path */
3283 /* do this after host/fw negotiation for DAR */
3284 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
3285 prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
3286
3287 dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
3288
3289 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
3290 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
3291 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
3292
3293 /* Make it compatibile with pre-rev7 Firmware */
3294 if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
3295 prot->d2hring_tx_cpln.item_len =
3296 D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
3297 prot->d2hring_rx_cpln.item_len =
3298 D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
3299 }
3300 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
3301 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
3302
3303 dhd_prot_d2h_sync_init(dhd);
3304
3305 dhd_prot_h2d_sync_init(dhd);
3306
3307 /* init the scratch buffer */
3308 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
3309 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3310 D2H_DMA_SCRATCH_BUF, 0);
3311 dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
3312 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
3313
3314 /* If supported by the host, indicate the memory block
3315 * for completion writes / submission reads to shared space
3316 */
3317 if (dhd->dma_d2h_ring_upd_support) {
3318 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
3319 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3320 D2H_DMA_INDX_WR_BUF, 0);
3321 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
3322 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3323 H2D_DMA_INDX_RD_BUF, 0);
3324 }
3325
3326 if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
3327 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
3328 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3329 H2D_DMA_INDX_WR_BUF, 0);
3330 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
3331 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3332 D2H_DMA_INDX_RD_BUF, 0);
3333 }
3334 /* Signal to the dongle that common ring init is complete */
3335 if (dhd->hostrdy_after_init)
3336 dhd_bus_hostready(dhd->bus);
3337
3338 /*
3339 * If the DMA-able buffers for flowring needs to come from a specific
3340 * contiguous memory region, then setup prot->flowrings_dma_buf here.
3341 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
3342 * this contiguous memory region, for each of the flowrings.
3343 */
3344
3345 /* Pre-allocate pool of msgbuf_ring for flowrings */
3346 if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
3347 return BCME_ERROR;
3348 }
3349
3350 /* If IFRM is enabled, wait for FW to setup the DMA channel */
3351 if (IFRM_ENAB(dhd)) {
3352 dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
3353 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3354 H2D_IFRM_INDX_WR_BUF, 0);
3355 }
3356
3357 /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
3358 * Waiting just before configuring doorbell
3359 */
3360 #ifdef BCMQT
3361 #define IDMA_ENABLE_WAIT 100
3362 #else
3363 #define IDMA_ENABLE_WAIT 10
3364 #endif // endif
3365 if (IDMA_ACTIVE(dhd)) {
3366 /* wait for idma_en bit in IDMAcontrol register to be set */
3367 /* Loop till idma_en is not set */
3368 uint buscorerev = dhd->bus->sih->buscorerev;
3369 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3370 IDMAControl(buscorerev), 0, 0);
3371 while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
3372 (waitcount++ < IDMA_ENABLE_WAIT)) {
3373
3374 DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
3375 waitcount, idmacontrol));
3376 #ifdef BCMQT
3377 OSL_DELAY(200000); /* 200msec for BCMQT */
3378 #else
3379 OSL_DELAY(1000); /* 1ms as its onetime only */
3380 #endif // endif
3381 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3382 IDMAControl(buscorerev), 0, 0);
3383 }
3384
3385 if (waitcount < IDMA_ENABLE_WAIT) {
3386 DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
3387 } else {
3388 DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
3389 waitcount, idmacontrol));
3390 return BCME_ERROR;
3391 }
3392 }
3393
3394 /* Host should configure soft doorbells if needed ... here */
3395
3396 /* Post to dongle host configured soft doorbells */
3397 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
3398
3399 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
3400 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3401
3402 prot->no_retry = FALSE;
3403 prot->no_aggr = FALSE;
3404 prot->fixed_rate = FALSE;
3405
3406 /*
3407 * Note that any communication with the Dongle should be added
3408 * below this point. Any other host data structure initialiation that
3409 * needs to be done prior to the DPC starts executing should be done
3410 * befor this point.
3411 * Because once we start sending H2D requests to Dongle, the Dongle
3412 * respond immediately. So the DPC context to handle this
3413 * D2H response could preempt the context in which dhd_prot_init is running.
3414 * We want to ensure that all the Host part of dhd_prot_init is
3415 * done before that.
3416 */
3417
3418 /* See if info rings could be created, info rings should be created
3419 * only if dongle does not support EDL
3420 */
3421 #ifdef EWP_EDL
3422 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
3423 #else
3424 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
3425 #endif /* EWP_EDL */
3426 {
3427 if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
3428 /* For now log and proceed, further clean up action maybe necessary
3429 * when we have more clarity.
3430 */
3431 DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
3432 __FUNCTION__, ret));
3433 }
3434 }
3435
3436 #ifdef EWP_EDL
3437 /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
3438 if (dhd->dongle_edl_support) {
3439 if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
3440 DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
3441 __FUNCTION__, ret));
3442 }
3443 }
3444 #endif /* EWP_EDL */
3445
3446 #ifdef DHD_HP2P
3447 /* create HPP txcmpl/rxcmpl rings */
3448 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
3449 if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
3450 /* For now log and proceed, further clean up action maybe necessary
3451 * when we have more clarity.
3452 */
3453 DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
3454 __FUNCTION__, ret));
3455 }
3456 }
3457 #endif /* DHD_HP2P */
3458
3459 return BCME_OK;
3460 } /* dhd_prot_init */
3461
3462 /**
3463 * dhd_prot_detach - PCIE FD protocol layer destructor.
3464 * Unlink, frees allocated protocol memory (including dhd_prot)
3465 */
3466 void dhd_prot_detach(dhd_pub_t *dhd)
3467 {
3468 dhd_prot_t *prot = dhd->prot;
3469
3470 /* Stop the protocol module */
3471 if (prot) {
3472
3473 /* free up all DMA-able buffers allocated during prot attach/init */
3474
3475 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
3476 dhd_dma_buf_free(dhd, &prot->retbuf);
3477 dhd_dma_buf_free(dhd, &prot->ioctbuf);
3478 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3479 dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3480 dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3481 dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3482
3483 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3484 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
3485 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
3486 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
3487 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3488
3489 dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
3490
3491 /* Common MsgBuf Rings */
3492 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
3493 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
3494 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
3495 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
3496 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
3497
3498 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3499 dhd_prot_flowrings_pool_detach(dhd);
3500
3501 /* detach info rings */
3502 dhd_prot_detach_info_rings(dhd);
3503
3504 #ifdef EWP_EDL
3505 dhd_prot_detach_edl_rings(dhd);
3506 #endif // endif
3507 #ifdef DHD_HP2P
3508 /* detach HPP rings */
3509 dhd_prot_detach_hp2p_rings(dhd);
3510 #endif /* DHD_HP2P */
3511
3512 /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3513 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3514 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3515 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3516 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3517 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3518 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3519 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3520 */
3521 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3522 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3523 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3524 #ifdef IOCTLRESP_USE_CONSTMEM
3525 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3526 #endif // endif
3527 #ifdef DHD_MAP_PKTID_LOGGING
3528 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3529 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3530 #endif /* DHD_MAP_PKTID_LOGGING */
3531
3532 #if defined(DHD_LB_TXC)
3533 if (prot->tx_compl_prod.buffer)
3534 MFREE(dhd->osh, prot->tx_compl_prod.buffer,
3535 sizeof(void*) * DHD_LB_WORKQ_SZ);
3536 #endif /* DHD_LB_TXC */
3537 #if defined(DHD_LB_RXC)
3538 if (prot->rx_compl_prod.buffer)
3539 MFREE(dhd->osh, prot->rx_compl_prod.buffer,
3540 sizeof(void*) * DHD_LB_WORKQ_SZ);
3541 #endif /* DHD_LB_RXC */
3542
3543 DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
3544
3545 dhd->prot = NULL;
3546 }
3547 } /* dhd_prot_detach */
3548
3549 /**
3550 * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3551 * This may be invoked to soft reboot the dongle, without having to
3552 * detach and attach the entire protocol layer.
3553 *
3554 * After dhd_prot_reset(), dhd_prot_init() may be invoked
3555 * without going througha dhd_prot_attach() phase.
3556 */
3557 void
3558 dhd_prot_reset(dhd_pub_t *dhd)
3559 {
3560 struct dhd_prot *prot = dhd->prot;
3561
3562 DHD_TRACE(("%s\n", __FUNCTION__));
3563
3564 if (prot == NULL) {
3565 return;
3566 }
3567
3568 dhd_prot_flowrings_pool_reset(dhd);
3569
3570 /* Reset Common MsgBuf Rings */
3571 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
3572 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
3573 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
3574 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
3575 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
3576
3577 /* Reset info rings */
3578 if (prot->h2dring_info_subn) {
3579 dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3580 }
3581
3582 if (prot->d2hring_info_cpln) {
3583 dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3584 }
3585 #ifdef EWP_EDL
3586 if (prot->d2hring_edl) {
3587 dhd_prot_ring_reset(dhd, prot->d2hring_edl);
3588 }
3589 #endif /* EWP_EDL */
3590
3591 /* Reset all DMA-able buffers allocated during prot attach */
3592 dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3593 dhd_dma_buf_reset(dhd, &prot->retbuf);
3594 dhd_dma_buf_reset(dhd, &prot->ioctbuf);
3595 dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3596 dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3597 dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3598 dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
3599
3600 dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3601
3602 /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3603 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
3604 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
3605 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
3606 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
3607
3608 prot->rx_metadata_offset = 0;
3609 prot->tx_metadata_offset = 0;
3610
3611 prot->rxbufpost = 0;
3612 prot->cur_event_bufs_posted = 0;
3613 prot->cur_ioctlresp_bufs_posted = 0;
3614
3615 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3616 prot->data_seq_no = 0;
3617 prot->ioctl_seq_no = 0;
3618 prot->ioctl_state = 0;
3619 prot->curr_ioctl_cmd = 0;
3620 prot->ioctl_received = IOCTL_WAIT;
3621 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3622 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3623
3624 /* dhd_flow_rings_init is located at dhd_bus_start,
3625 * so when stopping bus, flowrings shall be deleted
3626 */
3627 if (dhd->flow_rings_inited) {
3628 dhd_flow_rings_deinit(dhd);
3629 }
3630
3631 #ifdef DHD_HP2P
3632 if (prot->d2hring_hp2p_txcpl) {
3633 dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
3634 }
3635 if (prot->d2hring_hp2p_rxcpl) {
3636 dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
3637 }
3638 #endif /* DHD_HP2P */
3639
3640 /* Reset PKTID map */
3641 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3642 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3643 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
3644 #ifdef IOCTLRESP_USE_CONSTMEM
3645 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3646 #endif /* IOCTLRESP_USE_CONSTMEM */
3647 #ifdef DMAMAP_STATS
3648 dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3649 dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3650 #ifndef IOCTLRESP_USE_CONSTMEM
3651 dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3652 #endif /* IOCTLRESP_USE_CONSTMEM */
3653 dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3654 dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3655 dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3656 #endif /* DMAMAP_STATS */
3657 } /* dhd_prot_reset */
3658
3659 #if defined(DHD_LB_RXP)
3660 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
3661 #else /* !DHD_LB_RXP */
3662 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
3663 #endif /* !DHD_LB_RXP */
3664
3665 #if defined(DHD_LB_RXC)
3666 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
3667 #else /* !DHD_LB_RXC */
3668 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
3669 #endif /* !DHD_LB_RXC */
3670
3671 #if defined(DHD_LB_TXC)
3672 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
3673 #else /* !DHD_LB_TXC */
3674 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
3675 #endif /* !DHD_LB_TXC */
3676
3677 #if defined(DHD_LB)
3678 /* DHD load balancing: deferral of work to another online CPU */
3679 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3680 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
3681 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
3682 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
3683 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
3684
3685 #if defined(DHD_LB_RXP)
3686 /**
3687 * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3688 * to other CPU cores
3689 */
3690 static INLINE void
3691 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
3692 {
3693 dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3694 }
3695 #endif /* DHD_LB_RXP */
3696
3697 #if defined(DHD_LB_TXC)
3698 /**
3699 * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3700 * to other CPU cores
3701 */
3702 static INLINE void
3703 dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3704 {
3705 bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3706 dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
3707 }
3708
3709 /**
3710 * DHD load balanced tx completion tasklet handler, that will perform the
3711 * freeing of packets on the selected CPU. Packet pointers are delivered to
3712 * this tasklet via the tx complete workq.
3713 */
3714 void
3715 dhd_lb_tx_compl_handler(unsigned long data)
3716 {
3717 int elem_ix;
3718 void *pkt, **elem;
3719 dmaaddr_t pa;
3720 uint32 pa_len;
3721 dhd_pub_t *dhd = (dhd_pub_t *)data;
3722 dhd_prot_t *prot = dhd->prot;
3723 bcm_workq_t *workq = &prot->tx_compl_cons;
3724 uint32 count = 0;
3725
3726 int curr_cpu;
3727 curr_cpu = get_cpu();
3728 put_cpu();
3729
3730 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
3731
3732 while (1) {
3733 elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3734
3735 if (elem_ix == BCM_RING_EMPTY) {
3736 break;
3737 }
3738
3739 elem = WORKQ_ELEMENT(void *, workq, elem_ix);
3740 pkt = *elem;
3741
3742 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
3743
3744 OSL_PREFETCH(PKTTAG(pkt));
3745 OSL_PREFETCH(pkt);
3746
3747 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
3748 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
3749
3750 DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
3751 #if defined(BCMPCIE)
3752 dhd_txcomplete(dhd, pkt, true);
3753 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
3754 dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
3755 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
3756 #endif // endif
3757
3758 PKTFREE(dhd->osh, pkt, TRUE);
3759 count++;
3760 }
3761
3762 /* smp_wmb(); */
3763 bcm_workq_cons_sync(workq);
3764 DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
3765 }
3766 #endif /* DHD_LB_TXC */
3767
3768 #if defined(DHD_LB_RXC)
3769
3770 /**
3771 * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3772 * to other CPU cores
3773 */
3774 static INLINE void
3775 dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3776 {
3777 dhd_prot_t *prot = dhdp->prot;
3778 /* Schedule the takslet only if we have to */
3779 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3780 /* flush WR index */
3781 bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3782 dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3783 }
3784 }
3785
3786 void
3787 dhd_lb_rx_compl_handler(unsigned long data)
3788 {
3789 dhd_pub_t *dhd = (dhd_pub_t *)data;
3790 bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
3791
3792 DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
3793
3794 dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
3795 bcm_workq_cons_sync(workq);
3796 }
3797 #endif /* DHD_LB_RXC */
3798 #endif /* DHD_LB */
3799
3800 void
3801 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3802 {
3803 dhd_prot_t *prot = dhd->prot;
3804 prot->rx_dataoffset = rx_offset;
3805 }
3806
3807 static int
3808 dhd_check_create_info_rings(dhd_pub_t *dhd)
3809 {
3810 dhd_prot_t *prot = dhd->prot;
3811 int ret = BCME_ERROR;
3812 uint16 ringid;
3813
3814 {
3815 /* dongle may increase max_submission_rings so keep
3816 * ringid at end of dynamic rings
3817 */
3818 ringid = dhd->bus->max_tx_flowrings +
3819 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3820 BCMPCIE_H2D_COMMON_MSGRINGS;
3821 }
3822
3823 if (prot->d2hring_info_cpln) {
3824 /* for d2hring re-entry case, clear inited flag */
3825 prot->d2hring_info_cpln->inited = FALSE;
3826 }
3827
3828 if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3829 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3830 }
3831
3832 if (prot->h2dring_info_subn == NULL) {
3833 prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3834
3835 if (prot->h2dring_info_subn == NULL) {
3836 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3837 __FUNCTION__));
3838 return BCME_NOMEM;
3839 }
3840
3841 DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3842 ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3843 H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
3844 ringid);
3845 if (ret != BCME_OK) {
3846 DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3847 __FUNCTION__));
3848 goto err;
3849 }
3850 }
3851
3852 if (prot->d2hring_info_cpln == NULL) {
3853 prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3854
3855 if (prot->d2hring_info_cpln == NULL) {
3856 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3857 __FUNCTION__));
3858 return BCME_NOMEM;
3859 }
3860
3861 /* create the debug info completion ring next to debug info submit ring
3862 * ringid = id next to debug info submit ring
3863 */
3864 ringid = ringid + 1;
3865
3866 DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3867 ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3868 D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3869 ringid);
3870 if (ret != BCME_OK) {
3871 DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3872 __FUNCTION__));
3873 dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3874 goto err;
3875 }
3876 }
3877
3878 return ret;
3879 err:
3880 MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3881 prot->h2dring_info_subn = NULL;
3882
3883 if (prot->d2hring_info_cpln) {
3884 MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3885 prot->d2hring_info_cpln = NULL;
3886 }
3887 return ret;
3888 } /* dhd_check_create_info_rings */
3889
3890 int
3891 dhd_prot_init_info_rings(dhd_pub_t *dhd)
3892 {
3893 dhd_prot_t *prot = dhd->prot;
3894 int ret = BCME_OK;
3895
3896 if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3897 DHD_ERROR(("%s: info rings aren't created! \n",
3898 __FUNCTION__));
3899 return ret;
3900 }
3901
3902 if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3903 DHD_INFO(("Info completion ring was created!\n"));
3904 return ret;
3905 }
3906
3907 DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3908 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3909 BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
3910 if (ret != BCME_OK)
3911 return ret;
3912
3913 prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
3914 prot->h2dring_info_subn->current_phase = 0;
3915 prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3916 prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3917
3918 DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3919 prot->h2dring_info_subn->n_completion_ids = 1;
3920 prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3921
3922 ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3923 BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
3924
3925 /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3926 * so can not cleanup if one ring was created while the other failed
3927 */
3928 return ret;
3929 } /* dhd_prot_init_info_rings */
3930
3931 static void
3932 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3933 {
3934 if (dhd->prot->h2dring_info_subn) {
3935 dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3936 MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3937 dhd->prot->h2dring_info_subn = NULL;
3938 }
3939 if (dhd->prot->d2hring_info_cpln) {
3940 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3941 MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3942 dhd->prot->d2hring_info_cpln = NULL;
3943 }
3944 }
3945
3946 #ifdef DHD_HP2P
3947 static int
3948 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
3949 {
3950 dhd_prot_t *prot = dhd->prot;
3951 int ret = BCME_ERROR;
3952 uint16 ringid;
3953
3954 /* Last 2 dynamic ring indices are used by hp2p rings */
3955 ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
3956
3957 if (prot->d2hring_hp2p_txcpl == NULL) {
3958 prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3959
3960 if (prot->d2hring_hp2p_txcpl == NULL) {
3961 DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
3962 __FUNCTION__));
3963 return BCME_NOMEM;
3964 }
3965
3966 DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
3967 ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
3968 dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
3969 ringid);
3970 if (ret != BCME_OK) {
3971 DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
3972 __FUNCTION__));
3973 goto err2;
3974 }
3975 } else {
3976 /* for re-entry case, clear inited flag */
3977 prot->d2hring_hp2p_txcpl->inited = FALSE;
3978 }
3979 if (prot->d2hring_hp2p_rxcpl == NULL) {
3980 prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3981
3982 if (prot->d2hring_hp2p_rxcpl == NULL) {
3983 DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
3984 __FUNCTION__));
3985 return BCME_NOMEM;
3986 }
3987
3988 /* create the hp2p rx completion ring next to hp2p tx compl ring
3989 * ringid = id next to hp2p tx compl ring
3990 */
3991 ringid = ringid + 1;
3992
3993 DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
3994 ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
3995 dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
3996 ringid);
3997 if (ret != BCME_OK) {
3998 DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
3999 __FUNCTION__));
4000 goto err1;
4001 }
4002 } else {
4003 /* for re-entry case, clear inited flag */
4004 prot->d2hring_hp2p_rxcpl->inited = FALSE;
4005 }
4006
4007 return ret;
4008 err1:
4009 MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4010 prot->d2hring_hp2p_rxcpl = NULL;
4011
4012 err2:
4013 MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4014 prot->d2hring_hp2p_txcpl = NULL;
4015 return ret;
4016 } /* dhd_check_create_hp2p_rings */
4017
4018 int
4019 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4020 {
4021 dhd_prot_t *prot = dhd->prot;
4022 int ret = BCME_OK;
4023
4024 dhd->hp2p_ring_active = FALSE;
4025
4026 if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4027 DHD_ERROR(("%s: hp2p rings aren't created! \n",
4028 __FUNCTION__));
4029 return ret;
4030 }
4031
4032 if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4033 DHD_INFO(("hp2p tx completion ring was created!\n"));
4034 return ret;
4035 }
4036
4037 DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4038 prot->d2hring_hp2p_txcpl->idx));
4039 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4040 BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4041 if (ret != BCME_OK)
4042 return ret;
4043
4044 prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4045 prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4046
4047 if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4048 DHD_INFO(("hp2p rx completion ring was created!\n"));
4049 return ret;
4050 }
4051
4052 DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4053 prot->d2hring_hp2p_rxcpl->idx));
4054 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4055 BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4056 if (ret != BCME_OK)
4057 return ret;
4058
4059 prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4060 prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4061
4062 /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4063 * so can not cleanup if one ring was created while the other failed
4064 */
4065 return BCME_OK;
4066 } /* dhd_prot_init_hp2p_rings */
4067
4068 static void
4069 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4070 {
4071 if (dhd->prot->d2hring_hp2p_txcpl) {
4072 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4073 MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4074 dhd->prot->d2hring_hp2p_txcpl = NULL;
4075 }
4076 if (dhd->prot->d2hring_hp2p_rxcpl) {
4077 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4078 MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4079 dhd->prot->d2hring_hp2p_rxcpl = NULL;
4080 }
4081 }
4082 #endif /* DHD_HP2P */
4083
4084 #ifdef EWP_EDL
4085 static int
4086 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4087 {
4088 dhd_prot_t *prot = dhd->prot;
4089 int ret = BCME_ERROR;
4090 uint16 ringid;
4091
4092 {
4093 /* dongle may increase max_submission_rings so keep
4094 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4095 */
4096 ringid = dhd->bus->max_tx_flowrings +
4097 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4098 BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4099 }
4100
4101 if (prot->d2hring_edl) {
4102 prot->d2hring_edl->inited = FALSE;
4103 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4104 }
4105
4106 if (prot->d2hring_edl == NULL) {
4107 prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4108
4109 if (prot->d2hring_edl == NULL) {
4110 DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4111 __FUNCTION__));
4112 return BCME_NOMEM;
4113 }
4114
4115 DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4116 ringid));
4117 ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4118 D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4119 ringid);
4120 if (ret != BCME_OK) {
4121 DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4122 __FUNCTION__));
4123 goto err;
4124 }
4125 }
4126
4127 return ret;
4128 err:
4129 MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4130 prot->d2hring_edl = NULL;
4131
4132 return ret;
4133 } /* dhd_check_create_btlog_rings */
4134
4135 int
4136 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4137 {
4138 dhd_prot_t *prot = dhd->prot;
4139 int ret = BCME_ERROR;
4140
4141 if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4142 DHD_ERROR(("%s: EDL rings aren't created! \n",
4143 __FUNCTION__));
4144 return ret;
4145 }
4146
4147 if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4148 DHD_INFO(("EDL completion ring was created!\n"));
4149 return ret;
4150 }
4151
4152 DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4153 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4154 BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4155 if (ret != BCME_OK)
4156 return ret;
4157
4158 prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4159 prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4160
4161 return BCME_OK;
4162 } /* dhd_prot_init_btlog_rings */
4163
4164 static void
4165 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
4166 {
4167 if (dhd->prot->d2hring_edl) {
4168 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
4169 MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
4170 dhd->prot->d2hring_edl = NULL;
4171 }
4172 }
4173 #endif /* EWP_EDL */
4174
4175 /**
4176 * Initialize protocol: sync w/dongle state.
4177 * Sets dongle media info (iswl, drv_version, mac address).
4178 */
4179 int dhd_sync_with_dongle(dhd_pub_t *dhd)
4180 {
4181 int ret = 0;
4182 wlc_rev_info_t revinfo;
4183 char buf[128];
4184 dhd_prot_t *prot = dhd->prot;
4185
4186 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4187
4188 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4189
4190 /* Post ts buffer after shim layer is attached */
4191 ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
4192
4193 #ifndef OEM_ANDROID
4194 /* Get the device MAC address */
4195 memset(buf, 0, sizeof(buf));
4196 strncpy(buf, "cur_etheraddr", sizeof(buf) - 1);
4197 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4198 if (ret < 0) {
4199 DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
4200 goto done;
4201 }
4202 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
4203 if (dhd_msg_level & DHD_INFO_VAL) {
4204 bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
4205 }
4206 #endif /* OEM_ANDROID */
4207
4208 #ifdef DHD_FW_COREDUMP
4209 /* Check the memdump capability */
4210 dhd_get_memdump_info(dhd);
4211 #endif /* DHD_FW_COREDUMP */
4212 #ifdef BCMASSERT_LOG
4213 dhd_get_assert_info(dhd);
4214 #endif /* BCMASSERT_LOG */
4215
4216 /* Get the device rev info */
4217 memset(&revinfo, 0, sizeof(revinfo));
4218 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
4219 if (ret < 0) {
4220 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
4221 goto done;
4222 }
4223 DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
4224 revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
4225
4226 /* Get the RxBuf post size */
4227 memset(buf, 0, sizeof(buf));
4228 bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
4229 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4230 if (ret < 0) {
4231 DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
4232 __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4233 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4234 } else {
4235 memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
4236 if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
4237 DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
4238 __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4239 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4240 } else {
4241 DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
4242 }
4243 }
4244
4245 /* Post buffers for packet reception */
4246 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4247
4248 DHD_SSSR_DUMP_INIT(dhd);
4249
4250 dhd_process_cid_mac(dhd, TRUE);
4251 ret = dhd_preinit_ioctls(dhd);
4252 dhd_process_cid_mac(dhd, FALSE);
4253
4254 #if defined(DHD_H2D_LOG_TIME_SYNC)
4255 #ifdef DHD_HP2P
4256 if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
4257 if (dhd->hp2p_enable) {
4258 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
4259 } else {
4260 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4261 }
4262 #else
4263 if (FW_SUPPORTED(dhd, h2dlogts)) {
4264 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4265 #endif // endif
4266 dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
4267 /* This is during initialization. */
4268 dhd_h2d_log_time_sync(dhd);
4269 } else {
4270 dhd->dhd_rte_time_sync_ms = 0;
4271 }
4272 #endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
4273 /* Always assumes wl for now */
4274 dhd->iswl = TRUE;
4275 done:
4276 return ret;
4277 } /* dhd_sync_with_dongle */
4278
4279 #define DHD_DBG_SHOW_METADATA 0
4280
4281 #if DHD_DBG_SHOW_METADATA
4282 static void BCMFASTPATH
4283 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
4284 {
4285 uint8 tlv_t;
4286 uint8 tlv_l;
4287 uint8 *tlv_v = (uint8 *)ptr;
4288
4289 if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
4290 return;
4291
4292 len -= BCMPCIE_D2H_METADATA_HDRLEN;
4293 tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
4294
4295 while (len > TLV_HDR_LEN) {
4296 tlv_t = tlv_v[TLV_TAG_OFF];
4297 tlv_l = tlv_v[TLV_LEN_OFF];
4298
4299 len -= TLV_HDR_LEN;
4300 tlv_v += TLV_HDR_LEN;
4301 if (len < tlv_l)
4302 break;
4303 if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
4304 break;
4305
4306 switch (tlv_t) {
4307 case WLFC_CTL_TYPE_TXSTATUS: {
4308 uint32 txs;
4309 memcpy(&txs, tlv_v, sizeof(uint32));
4310 if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
4311 printf("METADATA TX_STATUS: %08x\n", txs);
4312 } else {
4313 wl_txstatus_additional_info_t tx_add_info;
4314 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
4315 sizeof(wl_txstatus_additional_info_t));
4316 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
4317 " rate = %08x tries = %d - %d\n", txs,
4318 tx_add_info.seq, tx_add_info.entry_ts,
4319 tx_add_info.enq_ts, tx_add_info.last_ts,
4320 tx_add_info.rspec, tx_add_info.rts_cnt,
4321 tx_add_info.tx_cnt);
4322 }
4323 } break;
4324
4325 case WLFC_CTL_TYPE_RSSI: {
4326 if (tlv_l == 1)
4327 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
4328 else
4329 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
4330 (*(tlv_v + 3) << 8) | *(tlv_v + 2),
4331 (int8)(*tlv_v), *(tlv_v + 1));
4332 } break;
4333
4334 case WLFC_CTL_TYPE_FIFO_CREDITBACK:
4335 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
4336 break;
4337
4338 case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
4339 bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
4340 break;
4341
4342 case WLFC_CTL_TYPE_RX_STAMP: {
4343 struct {
4344 uint32 rspec;
4345 uint32 bus_time;
4346 uint32 wlan_time;
4347 } rx_tmstamp;
4348 memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
4349 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
4350 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
4351 } break;
4352
4353 case WLFC_CTL_TYPE_TRANS_ID:
4354 bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
4355 break;
4356
4357 case WLFC_CTL_TYPE_COMP_TXSTATUS:
4358 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
4359 break;
4360
4361 default:
4362 bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
4363 break;
4364 }
4365
4366 len -= tlv_l;
4367 tlv_v += tlv_l;
4368 }
4369 }
4370 #endif /* DHD_DBG_SHOW_METADATA */
4371
4372 static INLINE void BCMFASTPATH
4373 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
4374 {
4375 if (pkt) {
4376 if (pkttype == PKTTYPE_IOCTL_RX ||
4377 pkttype == PKTTYPE_EVENT_RX ||
4378 pkttype == PKTTYPE_INFO_RX ||
4379 pkttype == PKTTYPE_TSBUF_RX) {
4380 #ifdef DHD_USE_STATIC_CTRLBUF
4381 PKTFREE_STATIC(dhd->osh, pkt, send);
4382 #else
4383 PKTFREE(dhd->osh, pkt, send);
4384 #endif /* DHD_USE_STATIC_CTRLBUF */
4385 } else {
4386 PKTFREE(dhd->osh, pkt, send);
4387 }
4388 }
4389 }
4390
4391 /**
4392 * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
4393 * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
4394 * to ensure thread safety, so no need to hold any locks for this function
4395 */
4396 static INLINE void * BCMFASTPATH
4397 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
4398 {
4399 void *PKTBUF;
4400 dmaaddr_t pa;
4401 uint32 len;
4402 void *dmah;
4403 void *secdma;
4404
4405 #ifdef DHD_PCIE_PKTID
4406 if (free_pktid) {
4407 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
4408 pktid, pa, len, dmah, secdma, pkttype);
4409 } else {
4410 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
4411 pktid, pa, len, dmah, secdma, pkttype);
4412 }
4413 #else
4414 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
4415 len, dmah, secdma, pkttype);
4416 #endif /* DHD_PCIE_PKTID */
4417 if (PKTBUF) {
4418 {
4419 if (SECURE_DMA_ENAB(dhd->osh))
4420 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
4421 secdma, 0);
4422 else
4423 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4424 #ifdef DMAMAP_STATS
4425 switch (pkttype) {
4426 #ifndef IOCTLRESP_USE_CONSTMEM
4427 case PKTTYPE_IOCTL_RX:
4428 dhd->dma_stats.ioctl_rx--;
4429 dhd->dma_stats.ioctl_rx_sz -= len;
4430 break;
4431 #endif /* IOCTLRESP_USE_CONSTMEM */
4432 case PKTTYPE_EVENT_RX:
4433 dhd->dma_stats.event_rx--;
4434 dhd->dma_stats.event_rx_sz -= len;
4435 break;
4436 case PKTTYPE_INFO_RX:
4437 dhd->dma_stats.info_rx--;
4438 dhd->dma_stats.info_rx_sz -= len;
4439 break;
4440 case PKTTYPE_TSBUF_RX:
4441 dhd->dma_stats.tsbuf_rx--;
4442 dhd->dma_stats.tsbuf_rx_sz -= len;
4443 break;
4444 }
4445 #endif /* DMAMAP_STATS */
4446 }
4447 }
4448
4449 return PKTBUF;
4450 }
4451
4452 #ifdef IOCTLRESP_USE_CONSTMEM
4453 static INLINE void BCMFASTPATH
4454 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
4455 {
4456 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4457 retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
4458 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
4459
4460 return;
4461 }
4462 #endif // endif
4463
4464 static void BCMFASTPATH
4465 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
4466 {
4467 dhd_prot_t *prot = dhd->prot;
4468 int16 fillbufs;
4469 uint16 cnt = 256;
4470 int retcount = 0;
4471
4472 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4473 while (fillbufs >= RX_BUF_BURST) {
4474 cnt--;
4475 if (cnt == 0) {
4476 /* find a better way to reschedule rx buf post if space not available */
4477 DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
4478 DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
4479 break;
4480 }
4481
4482 /* Post in a burst of 32 buffers at a time */
4483 fillbufs = MIN(fillbufs, RX_BUF_BURST);
4484
4485 /* Post buffers */
4486 retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
4487
4488 if (retcount >= 0) {
4489 prot->rxbufpost += (uint16)retcount;
4490 #ifdef DHD_LB_RXC
4491 /* dhd_prot_rxbuf_post returns the number of buffers posted */
4492 DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
4493 #endif /* DHD_LB_RXC */
4494 /* how many more to post */
4495 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4496 } else {
4497 /* Make sure we don't run loop any further */
4498 fillbufs = 0;
4499 }
4500 }
4501 }
4502
4503 /** Post 'count' no of rx buffers to dongle */
4504 static int BCMFASTPATH
4505 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
4506 {
4507 void *p, **pktbuf;
4508 uint8 *rxbuf_post_tmp;
4509 host_rxbuf_post_t *rxbuf_post;
4510 void *msg_start;
4511 dmaaddr_t pa, *pktbuf_pa;
4512 uint32 *pktlen;
4513 uint16 i = 0, alloced = 0;
4514 unsigned long flags;
4515 uint32 pktid;
4516 dhd_prot_t *prot = dhd->prot;
4517 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
4518 void *lcl_buf;
4519 uint16 lcl_buf_size;
4520 uint16 pktsz = prot->rxbufpost_sz;
4521
4522 /* allocate a local buffer to store pkt buffer va, pa and length */
4523 lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
4524 RX_BUF_BURST;
4525 lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
4526 if (!lcl_buf) {
4527 DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
4528 return 0;
4529 }
4530 pktbuf = lcl_buf;
4531 pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
4532 pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
4533
4534 for (i = 0; i < count; i++) {
4535 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
4536 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
4537 dhd->rx_pktgetfail++;
4538 break;
4539 }
4540
4541 pktlen[i] = PKTLEN(dhd->osh, p);
4542 if (SECURE_DMA_ENAB(dhd->osh)) {
4543 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
4544 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4545 }
4546 #ifndef BCM_SECURE_DMA
4547 else
4548 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
4549 #endif /* #ifndef BCM_SECURE_DMA */
4550
4551 if (PHYSADDRISZERO(pa)) {
4552 PKTFREE(dhd->osh, p, FALSE);
4553 DHD_ERROR(("Invalid phyaddr 0\n"));
4554 ASSERT(0);
4555 break;
4556 }
4557 #ifdef DMAMAP_STATS
4558 dhd->dma_stats.rxdata++;
4559 dhd->dma_stats.rxdata_sz += pktlen[i];
4560 #endif /* DMAMAP_STATS */
4561
4562 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
4563 pktlen[i] = PKTLEN(dhd->osh, p);
4564 pktbuf[i] = p;
4565 pktbuf_pa[i] = pa;
4566 }
4567
4568 /* only post what we have */
4569 count = i;
4570
4571 /* grab the ring lock to allocate pktid and post on ring */
4572 DHD_RING_LOCK(ring->ring_lock, flags);
4573
4574 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4575 msg_start = (void *)
4576 dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
4577 if (msg_start == NULL) {
4578 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4579 DHD_RING_UNLOCK(ring->ring_lock, flags);
4580 goto cleanup;
4581 }
4582 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
4583 ASSERT(alloced > 0);
4584
4585 rxbuf_post_tmp = (uint8*)msg_start;
4586
4587 for (i = 0; i < alloced; i++) {
4588 rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
4589 p = pktbuf[i];
4590 pa = pktbuf_pa[i];
4591
4592 #if defined(DHD_LB_RXC)
4593 if (use_rsv_pktid == TRUE) {
4594 bcm_workq_t *workq = &prot->rx_compl_cons;
4595 int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4596
4597 if (elem_ix == BCM_RING_EMPTY) {
4598 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
4599 pktid = DHD_PKTID_INVALID;
4600 goto alloc_pkt_id;
4601 } else {
4602 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4603 pktid = *elem;
4604 }
4605
4606 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4607
4608 /* Now populate the previous locker with valid information */
4609 if (pktid != DHD_PKTID_INVALID) {
4610 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
4611 p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
4612 PKTTYPE_DATA_RX);
4613 }
4614 } else
4615 #endif /* ! DHD_LB_RXC */
4616 {
4617 #if defined(DHD_LB_RXC)
4618 alloc_pkt_id:
4619 #endif /* DHD_LB_RXC */
4620 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
4621 pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
4622 #if defined(DHD_PCIE_PKTID)
4623 if (pktid == DHD_PKTID_INVALID) {
4624 break;
4625 }
4626 #endif /* DHD_PCIE_PKTID */
4627 }
4628
4629 /* Common msg header */
4630 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
4631 rxbuf_post->cmn_hdr.if_id = 0;
4632 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4633 rxbuf_post->cmn_hdr.flags = ring->current_phase;
4634 ring->seqnum++;
4635 rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
4636 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4637 rxbuf_post->data_buf_addr.low_addr =
4638 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
4639
4640 if (prot->rx_metadata_offset) {
4641 rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
4642 rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4643 rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4644 } else {
4645 rxbuf_post->metadata_buf_len = 0;
4646 rxbuf_post->metadata_buf_addr.high_addr = 0;
4647 rxbuf_post->metadata_buf_addr.low_addr = 0;
4648 }
4649
4650 #ifdef DHD_PKTID_AUDIT_RING
4651 DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
4652 #endif /* DHD_PKTID_AUDIT_RING */
4653
4654 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4655
4656 /* Move rxbuf_post_tmp to next item */
4657 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
4658
4659 #ifdef DHD_LBUF_AUDIT
4660 PKTAUDIT(dhd->osh, p);
4661 #endif // endif
4662 }
4663
4664 if (i < alloced) {
4665 if (ring->wr < (alloced - i))
4666 ring->wr = ring->max_items - (alloced - i);
4667 else
4668 ring->wr -= (alloced - i);
4669
4670 if (ring->wr == 0) {
4671 DHD_INFO(("%s: flipping the phase now\n", ring->name));
4672 ring->current_phase = ring->current_phase ?
4673 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4674 }
4675
4676 alloced = i;
4677 }
4678
4679 /* update ring's WR index and ring doorbell to dongle */
4680 if (alloced > 0) {
4681 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4682 }
4683
4684 DHD_RING_UNLOCK(ring->ring_lock, flags);
4685
4686 cleanup:
4687 for (i = alloced; i < count; i++) {
4688 p = pktbuf[i];
4689 pa = pktbuf_pa[i];
4690
4691 if (SECURE_DMA_ENAB(dhd->osh))
4692 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
4693 DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
4694 else
4695 DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
4696 PKTFREE(dhd->osh, p, FALSE);
4697 }
4698
4699 MFREE(dhd->osh, lcl_buf, lcl_buf_size);
4700
4701 return alloced;
4702 } /* dhd_prot_rxbufpost */
4703
4704 static int
4705 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
4706 {
4707 unsigned long flags;
4708 uint32 pktid;
4709 dhd_prot_t *prot = dhd->prot;
4710 uint16 alloced = 0;
4711 uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
4712 uint32 pktlen;
4713 info_buf_post_msg_t *infobuf_post;
4714 uint8 *infobuf_post_tmp;
4715 void *p;
4716 void* msg_start;
4717 uint8 i = 0;
4718 dmaaddr_t pa;
4719 int16 count = 0;
4720
4721 if (ring == NULL)
4722 return 0;
4723
4724 if (ring->inited != TRUE)
4725 return 0;
4726 if (ring == dhd->prot->h2dring_info_subn) {
4727 if (prot->max_infobufpost == 0)
4728 return 0;
4729
4730 count = prot->max_infobufpost - prot->infobufpost;
4731 }
4732 else {
4733 DHD_ERROR(("Unknown ring\n"));
4734 return 0;
4735 }
4736
4737 if (count <= 0) {
4738 DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
4739 __FUNCTION__));
4740 return 0;
4741 }
4742
4743 /* grab the ring lock to allocate pktid and post on ring */
4744 DHD_RING_LOCK(ring->ring_lock, flags);
4745
4746 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4747 msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
4748
4749 if (msg_start == NULL) {
4750 DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4751 DHD_RING_UNLOCK(ring->ring_lock, flags);
4752 return -1;
4753 }
4754
4755 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
4756 ASSERT(alloced > 0);
4757
4758 infobuf_post_tmp = (uint8*) msg_start;
4759
4760 /* loop through each allocated message in the host ring */
4761 for (i = 0; i < alloced; i++) {
4762 infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
4763 /* Create a rx buffer */
4764 #ifdef DHD_USE_STATIC_CTRLBUF
4765 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4766 #else
4767 p = PKTGET(dhd->osh, pktsz, FALSE);
4768 #endif /* DHD_USE_STATIC_CTRLBUF */
4769 if (p == NULL) {
4770 DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4771 dhd->rx_pktgetfail++;
4772 break;
4773 }
4774 pktlen = PKTLEN(dhd->osh, p);
4775 if (SECURE_DMA_ENAB(dhd->osh)) {
4776 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4777 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4778 }
4779 #ifndef BCM_SECURE_DMA
4780 else
4781 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4782 #endif /* #ifndef BCM_SECURE_DMA */
4783 if (PHYSADDRISZERO(pa)) {
4784 if (SECURE_DMA_ENAB(dhd->osh)) {
4785 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4786 ring->dma_buf.secdma, 0);
4787 }
4788 else
4789 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4790 #ifdef DHD_USE_STATIC_CTRLBUF
4791 PKTFREE_STATIC(dhd->osh, p, FALSE);
4792 #else
4793 PKTFREE(dhd->osh, p, FALSE);
4794 #endif /* DHD_USE_STATIC_CTRLBUF */
4795 DHD_ERROR(("Invalid phyaddr 0\n"));
4796 ASSERT(0);
4797 break;
4798 }
4799 #ifdef DMAMAP_STATS
4800 dhd->dma_stats.info_rx++;
4801 dhd->dma_stats.info_rx_sz += pktlen;
4802 #endif /* DMAMAP_STATS */
4803 pktlen = PKTLEN(dhd->osh, p);
4804
4805 /* Common msg header */
4806 infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4807 infobuf_post->cmn_hdr.if_id = 0;
4808 infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4809 infobuf_post->cmn_hdr.flags = ring->current_phase;
4810 ring->seqnum++;
4811
4812 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4813 pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
4814
4815 #if defined(DHD_PCIE_PKTID)
4816 if (pktid == DHD_PKTID_INVALID) {
4817 if (SECURE_DMA_ENAB(dhd->osh)) {
4818 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4819 ring->dma_buf.secdma, 0);
4820 } else
4821 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4822
4823 #ifdef DHD_USE_STATIC_CTRLBUF
4824 PKTFREE_STATIC(dhd->osh, p, FALSE);
4825 #else
4826 PKTFREE(dhd->osh, p, FALSE);
4827 #endif /* DHD_USE_STATIC_CTRLBUF */
4828 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4829 break;
4830 }
4831 #endif /* DHD_PCIE_PKTID */
4832
4833 infobuf_post->host_buf_len = htol16((uint16)pktlen);
4834 infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4835 infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4836
4837 #ifdef DHD_PKTID_AUDIT_RING
4838 DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4839 #endif /* DHD_PKTID_AUDIT_RING */
4840
4841 DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4842 infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
4843 infobuf_post->host_buf_addr.high_addr));
4844
4845 infobuf_post->cmn_hdr.request_id = htol32(pktid);
4846 /* Move rxbuf_post_tmp to next item */
4847 infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4848 #ifdef DHD_LBUF_AUDIT
4849 PKTAUDIT(dhd->osh, p);
4850 #endif // endif
4851 }
4852
4853 if (i < alloced) {
4854 if (ring->wr < (alloced - i))
4855 ring->wr = ring->max_items - (alloced - i);
4856 else
4857 ring->wr -= (alloced - i);
4858
4859 alloced = i;
4860 if (alloced && ring->wr == 0) {
4861 DHD_INFO(("%s: flipping the phase now\n", ring->name));
4862 ring->current_phase = ring->current_phase ?
4863 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4864 }
4865 }
4866
4867 /* Update the write pointer in TCM & ring bell */
4868 if (alloced > 0) {
4869 if (ring == dhd->prot->h2dring_info_subn) {
4870 prot->infobufpost += alloced;
4871 }
4872 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4873 }
4874
4875 DHD_RING_UNLOCK(ring->ring_lock, flags);
4876
4877 return alloced;
4878 } /* dhd_prot_infobufpost */
4879
4880 #ifdef IOCTLRESP_USE_CONSTMEM
4881 static int
4882 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4883 {
4884 int err;
4885 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4886
4887 if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
4888 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
4889 ASSERT(0);
4890 return BCME_NOMEM;
4891 }
4892
4893 return BCME_OK;
4894 }
4895
4896 static void
4897 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4898 {
4899 /* retbuf (declared on stack) not fully populated ... */
4900 if (retbuf->va) {
4901 uint32 dma_pad;
4902 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
4903 retbuf->len = IOCT_RETBUF_SIZE;
4904 retbuf->_alloced = retbuf->len + dma_pad;
4905 }
4906
4907 dhd_dma_buf_free(dhd, retbuf);
4908 return;
4909 }
4910 #endif /* IOCTLRESP_USE_CONSTMEM */
4911
4912 static int
4913 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
4914 {
4915 void *p;
4916 uint16 pktsz;
4917 ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
4918 dmaaddr_t pa;
4919 uint32 pktlen;
4920 dhd_prot_t *prot = dhd->prot;
4921 uint16 alloced = 0;
4922 unsigned long flags;
4923 dhd_dma_buf_t retbuf;
4924 void *dmah = NULL;
4925 uint32 pktid;
4926 void *map_handle;
4927 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4928 bool non_ioctl_resp_buf = 0;
4929 dhd_pkttype_t buf_type;
4930
4931 if (dhd->busstate == DHD_BUS_DOWN) {
4932 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4933 return -1;
4934 }
4935 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4936
4937 if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
4938 buf_type = PKTTYPE_IOCTL_RX;
4939 else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
4940 buf_type = PKTTYPE_EVENT_RX;
4941 else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
4942 buf_type = PKTTYPE_TSBUF_RX;
4943 else {
4944 DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
4945 return -1;
4946 }
4947
4948 if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
4949 non_ioctl_resp_buf = TRUE;
4950 else
4951 non_ioctl_resp_buf = FALSE;
4952
4953 if (non_ioctl_resp_buf) {
4954 /* Allocate packet for not ioctl resp buffer post */
4955 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4956 } else {
4957 /* Allocate packet for ctrl/ioctl buffer post */
4958 pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
4959 }
4960
4961 #ifdef IOCTLRESP_USE_CONSTMEM
4962 if (!non_ioctl_resp_buf) {
4963 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
4964 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
4965 return -1;
4966 }
4967 ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
4968 p = retbuf.va;
4969 pktlen = retbuf.len;
4970 pa = retbuf.pa;
4971 dmah = retbuf.dmah;
4972 } else
4973 #endif /* IOCTLRESP_USE_CONSTMEM */
4974 {
4975 #ifdef DHD_USE_STATIC_CTRLBUF
4976 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4977 #else
4978 p = PKTGET(dhd->osh, pktsz, FALSE);
4979 #endif /* DHD_USE_STATIC_CTRLBUF */
4980 if (p == NULL) {
4981 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
4982 __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
4983 "EVENT" : "IOCTL RESP"));
4984 dhd->rx_pktgetfail++;
4985 return -1;
4986 }
4987
4988 pktlen = PKTLEN(dhd->osh, p);
4989
4990 if (SECURE_DMA_ENAB(dhd->osh)) {
4991 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4992 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4993 }
4994 #ifndef BCM_SECURE_DMA
4995 else
4996 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4997 #endif /* #ifndef BCM_SECURE_DMA */
4998
4999 if (PHYSADDRISZERO(pa)) {
5000 DHD_ERROR(("Invalid physaddr 0\n"));
5001 ASSERT(0);
5002 goto free_pkt_return;
5003 }
5004
5005 #ifdef DMAMAP_STATS
5006 switch (buf_type) {
5007 #ifndef IOCTLRESP_USE_CONSTMEM
5008 case PKTTYPE_IOCTL_RX:
5009 dhd->dma_stats.ioctl_rx++;
5010 dhd->dma_stats.ioctl_rx_sz += pktlen;
5011 break;
5012 #endif /* !IOCTLRESP_USE_CONSTMEM */
5013 case PKTTYPE_EVENT_RX:
5014 dhd->dma_stats.event_rx++;
5015 dhd->dma_stats.event_rx_sz += pktlen;
5016 break;
5017 case PKTTYPE_TSBUF_RX:
5018 dhd->dma_stats.tsbuf_rx++;
5019 dhd->dma_stats.tsbuf_rx_sz += pktlen;
5020 break;
5021 default:
5022 break;
5023 }
5024 #endif /* DMAMAP_STATS */
5025
5026 }
5027
5028 /* grab the ring lock to allocate pktid and post on ring */
5029 DHD_RING_LOCK(ring->ring_lock, flags);
5030
5031 rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5032 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5033
5034 if (rxbuf_post == NULL) {
5035 DHD_RING_UNLOCK(ring->ring_lock, flags);
5036 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5037 __FUNCTION__, __LINE__));
5038
5039 #ifdef IOCTLRESP_USE_CONSTMEM
5040 if (non_ioctl_resp_buf)
5041 #endif /* IOCTLRESP_USE_CONSTMEM */
5042 {
5043 if (SECURE_DMA_ENAB(dhd->osh)) {
5044 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5045 ring->dma_buf.secdma, 0);
5046 } else {
5047 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5048 }
5049 }
5050 goto free_pkt_return;
5051 }
5052
5053 /* CMN msg header */
5054 rxbuf_post->cmn_hdr.msg_type = msg_type;
5055
5056 #ifdef IOCTLRESP_USE_CONSTMEM
5057 if (!non_ioctl_resp_buf) {
5058 map_handle = dhd->prot->pktid_map_handle_ioctl;
5059 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5060 ring->dma_buf.secdma, buf_type);
5061 } else
5062 #endif /* IOCTLRESP_USE_CONSTMEM */
5063 {
5064 map_handle = dhd->prot->pktid_ctrl_map;
5065 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5066 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5067 buf_type);
5068 }
5069
5070 if (pktid == DHD_PKTID_INVALID) {
5071 if (ring->wr == 0) {
5072 ring->wr = ring->max_items - 1;
5073 } else {
5074 ring->wr--;
5075 if (ring->wr == 0) {
5076 ring->current_phase = ring->current_phase ? 0 :
5077 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5078 }
5079 }
5080 DHD_RING_UNLOCK(ring->ring_lock, flags);
5081 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5082 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5083 goto free_pkt_return;
5084 }
5085
5086 #ifdef DHD_PKTID_AUDIT_RING
5087 DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
5088 #endif /* DHD_PKTID_AUDIT_RING */
5089
5090 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5091 rxbuf_post->cmn_hdr.if_id = 0;
5092 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5093 ring->seqnum++;
5094 rxbuf_post->cmn_hdr.flags = ring->current_phase;
5095
5096 #if defined(DHD_PCIE_PKTID)
5097 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
5098 if (ring->wr == 0) {
5099 ring->wr = ring->max_items - 1;
5100 } else {
5101 if (ring->wr == 0) {
5102 ring->current_phase = ring->current_phase ? 0 :
5103 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5104 }
5105 }
5106 DHD_RING_UNLOCK(ring->ring_lock, flags);
5107 #ifdef IOCTLRESP_USE_CONSTMEM
5108 if (non_ioctl_resp_buf)
5109 #endif /* IOCTLRESP_USE_CONSTMEM */
5110 {
5111 if (SECURE_DMA_ENAB(dhd->osh)) {
5112 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5113 ring->dma_buf.secdma, 0);
5114 } else
5115 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5116 }
5117 goto free_pkt_return;
5118 }
5119 #endif /* DHD_PCIE_PKTID */
5120
5121 #ifndef IOCTLRESP_USE_CONSTMEM
5122 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
5123 #else
5124 rxbuf_post->host_buf_len = htol16((uint16)pktlen);
5125 #endif /* IOCTLRESP_USE_CONSTMEM */
5126 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5127 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5128
5129 #ifdef DHD_LBUF_AUDIT
5130 if (non_ioctl_resp_buf)
5131 PKTAUDIT(dhd->osh, p);
5132 #endif // endif
5133
5134 /* update ring's WR index and ring doorbell to dongle */
5135 dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
5136
5137 DHD_RING_UNLOCK(ring->ring_lock, flags);
5138
5139 return 1;
5140
5141 free_pkt_return:
5142 if (!non_ioctl_resp_buf) {
5143 #ifdef IOCTLRESP_USE_CONSTMEM
5144 free_ioctl_return_buffer(dhd, &retbuf);
5145 #else
5146 dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5147 #endif /* IOCTLRESP_USE_CONSTMEM */
5148 } else {
5149 dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5150 }
5151
5152 return -1;
5153 } /* dhd_prot_rxbufpost_ctrl */
5154
5155 static uint16
5156 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
5157 {
5158 uint32 i = 0;
5159 int32 ret_val;
5160
5161 DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
5162
5163 if (dhd->busstate == DHD_BUS_DOWN) {
5164 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5165 return 0;
5166 }
5167
5168 while (i < max_to_post) {
5169 ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
5170 if (ret_val < 0)
5171 break;
5172 i++;
5173 }
5174 DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
5175 return (uint16)i;
5176 }
5177
5178 static void
5179 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
5180 {
5181 dhd_prot_t *prot = dhd->prot;
5182 int max_to_post;
5183
5184 DHD_INFO(("ioctl resp buf post\n"));
5185 max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
5186 if (max_to_post <= 0) {
5187 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
5188 __FUNCTION__));
5189 return;
5190 }
5191 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5192 MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
5193 }
5194
5195 static void
5196 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
5197 {
5198 dhd_prot_t *prot = dhd->prot;
5199 int max_to_post;
5200
5201 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
5202 if (max_to_post <= 0) {
5203 DHD_ERROR(("%s: Cannot post more than max event buffers\n",
5204 __FUNCTION__));
5205 return;
5206 }
5207 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5208 MSG_TYPE_EVENT_BUF_POST, max_to_post);
5209 }
5210
5211 static int
5212 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
5213 {
5214 return 0;
5215 }
5216
5217 bool BCMFASTPATH
5218 dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
5219 {
5220 dhd_prot_t *prot = dhd->prot;
5221 bool more = TRUE;
5222 uint n = 0;
5223 msgbuf_ring_t *ring = prot->d2hring_info_cpln;
5224 unsigned long flags;
5225
5226 if (ring == NULL)
5227 return FALSE;
5228 if (ring->inited != TRUE)
5229 return FALSE;
5230
5231 /* Process all the messages - DTOH direction */
5232 while (!dhd_is_device_removed(dhd)) {
5233 uint8 *msg_addr;
5234 uint32 msg_len;
5235
5236 if (dhd_query_bus_erros(dhd)) {
5237 more = FALSE;
5238 break;
5239 }
5240
5241 if (dhd->hang_was_sent) {
5242 more = FALSE;
5243 break;
5244 }
5245
5246 if (dhd->smmu_fault_occurred) {
5247 more = FALSE;
5248 break;
5249 }
5250
5251 DHD_RING_LOCK(ring->ring_lock, flags);
5252 /* Get the message from ring */
5253 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5254 DHD_RING_UNLOCK(ring->ring_lock, flags);
5255 if (msg_addr == NULL) {
5256 more = FALSE;
5257 break;
5258 }
5259
5260 /* Prefetch data to populate the cache */
5261 OSL_PREFETCH(msg_addr);
5262
5263 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5264 DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
5265 __FUNCTION__, msg_len));
5266 }
5267
5268 /* Update read pointer */
5269 dhd_prot_upd_read_idx(dhd, ring);
5270
5271 /* After batch processing, check RX bound */
5272 n += msg_len / ring->item_len;
5273 if (n >= bound) {
5274 break;
5275 }
5276 }
5277
5278 return more;
5279 }
5280
5281 #ifdef EWP_EDL
5282 bool
5283 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
5284 {
5285 dhd_prot_t *prot = dhd->prot;
5286 msgbuf_ring_t *ring = prot->d2hring_edl;
5287 unsigned long flags = 0;
5288 uint32 items = 0;
5289 uint16 rd = 0;
5290 uint16 depth = 0;
5291
5292 if (ring == NULL)
5293 return FALSE;
5294 if (ring->inited != TRUE)
5295 return FALSE;
5296 if (ring->item_len == 0) {
5297 DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
5298 __FUNCTION__, ring->idx, ring->item_len));
5299 return FALSE;
5300 }
5301
5302 if (dhd_query_bus_erros(dhd)) {
5303 return FALSE;
5304 }
5305
5306 if (dhd->hang_was_sent) {
5307 return FALSE;
5308 }
5309
5310 /* in this DPC context just check if wr index has moved
5311 * and schedule deferred context to actually process the
5312 * work items.
5313 */
5314 /* update the write index */
5315 DHD_RING_LOCK(ring->ring_lock, flags);
5316 if (dhd->dma_d2h_ring_upd_support) {
5317 /* DMAing write/read indices supported */
5318 ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5319 } else {
5320 dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
5321 }
5322 rd = ring->rd;
5323 DHD_RING_UNLOCK(ring->ring_lock, flags);
5324
5325 depth = ring->max_items;
5326 /* check for avail space, in number of ring items */
5327 items = READ_AVAIL_SPACE(ring->wr, rd, depth);
5328 if (items == 0) {
5329 /* no work items in edl ring */
5330 return FALSE;
5331 }
5332 if (items > ring->max_items) {
5333 DHD_ERROR(("\r\n======================= \r\n"));
5334 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5335 __FUNCTION__, ring, ring->name, ring->max_items, items));
5336 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
5337 ring->wr, ring->rd, depth));
5338 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
5339 dhd->busstate, dhd->bus->wait_for_d3_ack));
5340 DHD_ERROR(("\r\n======================= \r\n"));
5341 #ifdef SUPPORT_LINKDOWN_RECOVERY
5342 if (ring->wr >= ring->max_items) {
5343 dhd->bus->read_shm_fail = TRUE;
5344 }
5345 #else
5346 #ifdef DHD_FW_COREDUMP
5347 if (dhd->memdump_enabled) {
5348 /* collect core dump */
5349 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
5350 dhd_bus_mem_dump(dhd);
5351
5352 }
5353 #endif /* DHD_FW_COREDUMP */
5354 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5355 dhd_schedule_reset(dhd);
5356
5357 return FALSE;
5358 }
5359
5360 if (items > D2HRING_EDL_WATERMARK) {
5361 DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
5362 " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
5363 ring->rd, ring->wr, depth));
5364 }
5365
5366 dhd_schedule_logtrace(dhd->info);
5367
5368 return FALSE;
5369 }
5370
5371 /* This is called either from work queue context of 'event_log_dispatcher_work' or
5372 * from the kthread context of dhd_logtrace_thread
5373 */
5374 int
5375 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
5376 {
5377 dhd_prot_t *prot = NULL;
5378 msgbuf_ring_t *ring = NULL;
5379 int err = 0;
5380 unsigned long flags = 0;
5381 cmn_msg_hdr_t *msg = NULL;
5382 uint8 *msg_addr = NULL;
5383 uint32 max_items_to_process = 0, n = 0;
5384 uint32 num_items = 0, new_items = 0;
5385 uint16 depth = 0;
5386 volatile uint16 wr = 0;
5387
5388 if (!dhd || !dhd->prot)
5389 return 0;
5390
5391 prot = dhd->prot;
5392 ring = prot->d2hring_edl;
5393 if (!ring || !evt_decode_data) {
5394 return 0;
5395 }
5396
5397 if (dhd->hang_was_sent) {
5398 return FALSE;
5399 }
5400
5401 DHD_RING_LOCK(ring->ring_lock, flags);
5402 ring->curr_rd = ring->rd;
5403 wr = ring->wr;
5404 depth = ring->max_items;
5405 /* check for avail space, in number of ring items
5406 * Note, that this will only give the # of items
5407 * from rd to wr if wr>=rd, or from rd to ring end
5408 * if wr < rd. So in the latter case strictly speaking
5409 * not all the items are read. But this is OK, because
5410 * these will be processed in the next doorbell as rd
5411 * would have wrapped around. Processing in the next
5412 * doorbell is acceptable since EDL only contains debug data
5413 */
5414 num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5415
5416 if (num_items == 0) {
5417 /* no work items in edl ring */
5418 DHD_RING_UNLOCK(ring->ring_lock, flags);
5419 return 0;
5420 }
5421
5422 DHD_INFO(("%s: EDL work items [%u] available \n",
5423 __FUNCTION__, num_items));
5424
5425 /* if space is available, calculate address to be read */
5426 msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
5427
5428 max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
5429
5430 DHD_RING_UNLOCK(ring->ring_lock, flags);
5431
5432 /* Prefetch data to populate the cache */
5433 OSL_PREFETCH(msg_addr);
5434
5435 n = max_items_to_process;
5436 while (n > 0) {
5437 msg = (cmn_msg_hdr_t *)msg_addr;
5438 /* wait for DMA of work item to complete */
5439 if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
5440 DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
5441 "ring; err = %d\n", __FUNCTION__, err));
5442 }
5443
5444 /*
5445 * Update the curr_rd to the current index in the ring, from where
5446 * the work item is fetched. This way if the fetched work item
5447 * fails in LIVELOCK, we can print the exact read index in the ring
5448 * that shows up the corrupted work item.
5449 */
5450 if ((ring->curr_rd + 1) >= ring->max_items) {
5451 ring->curr_rd = 0;
5452 } else {
5453 ring->curr_rd += 1;
5454 }
5455
5456 if (err != BCME_OK) {
5457 return 0;
5458 }
5459
5460 /* process the edl work item, i.e, the event log */
5461 err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
5462
5463 /* Dummy sleep so that scheduler kicks in after processing any logprints */
5464 OSL_SLEEP(0);
5465
5466 /* Prefetch data to populate the cache */
5467 OSL_PREFETCH(msg_addr + ring->item_len);
5468
5469 msg_addr += ring->item_len;
5470 --n;
5471 }
5472
5473 DHD_RING_LOCK(ring->ring_lock, flags);
5474 /* update host ring read pointer */
5475 if ((ring->rd + max_items_to_process) >= ring->max_items)
5476 ring->rd = 0;
5477 else
5478 ring->rd += max_items_to_process;
5479 DHD_RING_UNLOCK(ring->ring_lock, flags);
5480
5481 /* Now after processing max_items_to_process update dongle rd index.
5482 * The TCM rd index is updated only if bus is not
5483 * in D3. Else, the rd index is updated from resume
5484 * context in - 'dhdpcie_bus_suspend'
5485 */
5486 DHD_GENERAL_LOCK(dhd, flags);
5487 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
5488 DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5489 __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
5490 DHD_GENERAL_UNLOCK(dhd, flags);
5491 } else {
5492 DHD_GENERAL_UNLOCK(dhd, flags);
5493 DHD_EDL_RING_TCM_RD_UPDATE(dhd);
5494 }
5495
5496 /* if num_items > bound, then anyway we will reschedule and
5497 * this function runs again, so that if in between the DPC has
5498 * updated the wr index, then the updated wr is read. But if
5499 * num_items <= bound, and if DPC executes and updates the wr index
5500 * when the above while loop is running, then the updated 'wr' index
5501 * needs to be re-read from here, If we don't do so, then till
5502 * the next time this function is scheduled
5503 * the event logs will not be processed.
5504 */
5505 if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
5506 /* read the updated wr index if reqd. and update num_items */
5507 DHD_RING_LOCK(ring->ring_lock, flags);
5508 if (wr != (volatile uint16)ring->wr) {
5509 wr = (volatile uint16)ring->wr;
5510 new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5511 DHD_INFO(("%s: new items [%u] avail in edl\n",
5512 __FUNCTION__, new_items));
5513 num_items += new_items;
5514 }
5515 DHD_RING_UNLOCK(ring->ring_lock, flags);
5516 }
5517
5518 /* if # of items processed is less than num_items, need to re-schedule
5519 * the deferred ctx
5520 */
5521 if (max_items_to_process < num_items) {
5522 DHD_INFO(("%s: EDL bound hit / new items found, "
5523 "items processed=%u; remaining=%u, "
5524 "resched deferred ctx...\n",
5525 __FUNCTION__, max_items_to_process,
5526 num_items - max_items_to_process));
5527 return (num_items - max_items_to_process);
5528 }
5529
5530 return 0;
5531
5532 }
5533
5534 void
5535 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
5536 {
5537 dhd_prot_t *prot = NULL;
5538 unsigned long flags = 0;
5539 msgbuf_ring_t *ring = NULL;
5540
5541 if (!dhd)
5542 return;
5543
5544 prot = dhd->prot;
5545 if (!prot || !prot->d2hring_edl)
5546 return;
5547
5548 ring = prot->d2hring_edl;
5549 DHD_RING_LOCK(ring->ring_lock, flags);
5550 dhd_prot_upd_read_idx(dhd, ring);
5551 DHD_RING_UNLOCK(ring->ring_lock, flags);
5552 }
5553 #endif /* EWP_EDL */
5554
5555 /* called when DHD needs to check for 'receive complete' messages from the dongle */
5556 bool BCMFASTPATH
5557 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5558 {
5559 bool more = FALSE;
5560 uint n = 0;
5561 dhd_prot_t *prot = dhd->prot;
5562 msgbuf_ring_t *ring;
5563 uint16 item_len;
5564 host_rxbuf_cmpl_t *msg = NULL;
5565 uint8 *msg_addr;
5566 uint32 msg_len;
5567 uint16 pkt_cnt, pkt_cnt_newidx;
5568 unsigned long flags;
5569 dmaaddr_t pa;
5570 uint32 len;
5571 void *dmah;
5572 void *secdma;
5573 int ifidx = 0, if_newidx = 0;
5574 void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
5575 uint32 pktid;
5576 int i;
5577 uint8 sync;
5578 ts_timestamp_t *ts;
5579
5580 BCM_REFERENCE(ts);
5581 #ifdef DHD_HP2P
5582 if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
5583 ring = prot->d2hring_hp2p_rxcpl;
5584 else
5585 #endif /* DHD_HP2P */
5586 ring = &prot->d2hring_rx_cpln;
5587 item_len = ring->item_len;
5588 while (1) {
5589 if (dhd_is_device_removed(dhd))
5590 break;
5591
5592 if (dhd_query_bus_erros(dhd))
5593 break;
5594
5595 if (dhd->hang_was_sent)
5596 break;
5597
5598 if (dhd->smmu_fault_occurred) {
5599 break;
5600 }
5601
5602 pkt_cnt = 0;
5603 pktqhead = pkt_newidx = NULL;
5604 pkt_cnt_newidx = 0;
5605
5606 DHD_RING_LOCK(ring->ring_lock, flags);
5607
5608 /* Get the address of the next message to be read from ring */
5609 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5610 if (msg_addr == NULL) {
5611 DHD_RING_UNLOCK(ring->ring_lock, flags);
5612 break;
5613 }
5614
5615 while (msg_len > 0) {
5616 msg = (host_rxbuf_cmpl_t *)msg_addr;
5617
5618 /* Wait until DMA completes, then fetch msg_type */
5619 sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
5620 /*
5621 * Update the curr_rd to the current index in the ring, from where
5622 * the work item is fetched. This way if the fetched work item
5623 * fails in LIVELOCK, we can print the exact read index in the ring
5624 * that shows up the corrupted work item.
5625 */
5626 if ((ring->curr_rd + 1) >= ring->max_items) {
5627 ring->curr_rd = 0;
5628 } else {
5629 ring->curr_rd += 1;
5630 }
5631
5632 if (!sync) {
5633 msg_len -= item_len;
5634 msg_addr += item_len;
5635 continue;
5636 }
5637
5638 pktid = ltoh32(msg->cmn_hdr.request_id);
5639
5640 #ifdef DHD_PKTID_AUDIT_RING
5641 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
5642 DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
5643 #endif /* DHD_PKTID_AUDIT_RING */
5644
5645 pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
5646 len, dmah, secdma, PKTTYPE_DATA_RX);
5647 if (!pkt) {
5648 msg_len -= item_len;
5649 msg_addr += item_len;
5650 continue;
5651 }
5652
5653 if (SECURE_DMA_ENAB(dhd->osh))
5654 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
5655 dmah, secdma, 0);
5656 else
5657 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5658
5659 #ifdef DMAMAP_STATS
5660 dhd->dma_stats.rxdata--;
5661 dhd->dma_stats.rxdata_sz -= len;
5662 #endif /* DMAMAP_STATS */
5663 DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
5664 "pktdata %p, metalen %d\n",
5665 ltoh32(msg->cmn_hdr.request_id),
5666 ltoh16(msg->data_offset),
5667 ltoh16(msg->data_len), msg->cmn_hdr.if_id,
5668 msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
5669 ltoh16(msg->metadata_len)));
5670
5671 pkt_cnt++;
5672 msg_len -= item_len;
5673 msg_addr += item_len;
5674
5675 #if DHD_DBG_SHOW_METADATA
5676 if (prot->metadata_dbg && prot->rx_metadata_offset &&
5677 msg->metadata_len) {
5678 uchar *ptr;
5679 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
5680 /* header followed by data */
5681 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
5682 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
5683 }
5684 #endif /* DHD_DBG_SHOW_METADATA */
5685
5686 /* data_offset from buf start */
5687 if (ltoh16(msg->data_offset)) {
5688 /* data offset given from dongle after split rx */
5689 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
5690 }
5691 else if (prot->rx_dataoffset) {
5692 /* DMA RX offset updated through shared area */
5693 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
5694 }
5695 /* Actual length of the packet */
5696 PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
5697
5698 #if defined(WL_MONITOR)
5699 if (dhd_monitor_enabled(dhd, ifidx)) {
5700 if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
5701 dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
5702 continue;
5703 } else {
5704 DHD_ERROR(("Received non 802.11 packet, "
5705 "when monitor mode is enabled\n"));
5706 }
5707 }
5708 #endif /* WL_MONITOR */
5709
5710 if (msg->flags & BCMPCIE_PKT_FLAGS_NO_FORWARD) {
5711 DHD_PKT_FLAGS_SET_NO_FWD(pkt);
5712 }
5713
5714 if (!pktqhead) {
5715 pktqhead = prevpkt = pkt;
5716 ifidx = msg->cmn_hdr.if_id;
5717 } else {
5718 if (ifidx != msg->cmn_hdr.if_id) {
5719 pkt_newidx = pkt;
5720 if_newidx = msg->cmn_hdr.if_id;
5721 pkt_cnt--;
5722 pkt_cnt_newidx = 1;
5723 break;
5724 } else {
5725 PKTSETNEXT(dhd->osh, prevpkt, pkt);
5726 prevpkt = pkt;
5727 }
5728 }
5729
5730 #ifdef DHD_HP2P
5731 if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
5732 #ifdef DHD_HP2P_DEBUG
5733 bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
5734 #endif /* DHD_HP2P_DEBUG */
5735 dhd_update_hp2p_rxstats(dhd, msg);
5736 }
5737 #endif /* DHD_HP2P */
5738
5739 #ifdef DHD_LBUF_AUDIT
5740 PKTAUDIT(dhd->osh, pkt);
5741 #endif // endif
5742 }
5743
5744 /* roll back read pointer for unprocessed message */
5745 if (msg_len > 0) {
5746 if (ring->rd < msg_len / item_len)
5747 ring->rd = ring->max_items - msg_len / item_len;
5748 else
5749 ring->rd -= msg_len / item_len;
5750 }
5751
5752 /* Update read pointer */
5753 dhd_prot_upd_read_idx(dhd, ring);
5754
5755 DHD_RING_UNLOCK(ring->ring_lock, flags);
5756
5757 pkt = pktqhead;
5758 for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
5759 nextpkt = PKTNEXT(dhd->osh, pkt);
5760 PKTSETNEXT(dhd->osh, pkt, NULL);
5761 #ifdef DHD_LB_RXP
5762 dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
5763 #elif defined(DHD_RX_CHAINING)
5764 dhd_rxchain_frame(dhd, pkt, ifidx);
5765 #else
5766 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5767 #endif /* DHD_LB_RXP */
5768 }
5769
5770 if (pkt_newidx) {
5771 #ifdef DHD_LB_RXP
5772 dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
5773 #elif defined(DHD_RX_CHAINING)
5774 dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
5775 #else
5776 dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
5777 #endif /* DHD_LB_RXP */
5778 }
5779
5780 pkt_cnt += pkt_cnt_newidx;
5781
5782 /* Post another set of rxbufs to the device */
5783 dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
5784
5785 #ifdef DHD_RX_CHAINING
5786 dhd_rxchain_commit(dhd);
5787 #endif // endif
5788
5789 /* After batch processing, check RX bound */
5790 n += pkt_cnt;
5791 if (n >= bound) {
5792 more = TRUE;
5793 break;
5794 }
5795 }
5796
5797 /* Call lb_dispatch only if packets are queued */
5798 if (n &&
5799 #ifdef WL_MONITOR
5800 !(dhd_monitor_enabled(dhd, ifidx)) &&
5801 #endif /* WL_MONITOR */
5802 TRUE) {
5803 DHD_LB_DISPATCH_RX_COMPL(dhd);
5804 DHD_LB_DISPATCH_RX_PROCESS(dhd);
5805 }
5806
5807 return more;
5808
5809 }
5810
5811 /**
5812 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
5813 */
5814 void
5815 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
5816 {
5817 msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
5818
5819 if (ring == NULL) {
5820 DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
5821 return;
5822 }
5823 /* Update read pointer */
5824 if (dhd->dma_d2h_ring_upd_support) {
5825 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
5826 }
5827
5828 DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
5829 ring->idx, flowid, ring->wr, ring->rd));
5830
5831 /* Need more logic here, but for now use it directly */
5832 dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
5833 }
5834
5835 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
5836 bool BCMFASTPATH
5837 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5838 {
5839 bool more = TRUE;
5840 uint n = 0;
5841 msgbuf_ring_t *ring;
5842 unsigned long flags;
5843
5844 #ifdef DHD_HP2P
5845 if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
5846 ring = dhd->prot->d2hring_hp2p_txcpl;
5847 else
5848 #endif /* DHD_HP2P */
5849 ring = &dhd->prot->d2hring_tx_cpln;
5850
5851 /* Process all the messages - DTOH direction */
5852 while (!dhd_is_device_removed(dhd)) {
5853 uint8 *msg_addr;
5854 uint32 msg_len;
5855
5856 if (dhd_query_bus_erros(dhd)) {
5857 more = FALSE;
5858 break;
5859 }
5860
5861 if (dhd->hang_was_sent) {
5862 more = FALSE;
5863 break;
5864 }
5865
5866 if (dhd->smmu_fault_occurred) {
5867 more = FALSE;
5868 break;
5869 }
5870
5871 DHD_RING_LOCK(ring->ring_lock, flags);
5872 /* Get the address of the next message to be read from ring */
5873 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5874 DHD_RING_UNLOCK(ring->ring_lock, flags);
5875
5876 if (msg_addr == NULL) {
5877 more = FALSE;
5878 break;
5879 }
5880
5881 /* Prefetch data to populate the cache */
5882 OSL_PREFETCH(msg_addr);
5883
5884 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5885 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5886 __FUNCTION__, ring->name, msg_addr, msg_len));
5887 }
5888
5889 /* Write to dngl rd ptr */
5890 dhd_prot_upd_read_idx(dhd, ring);
5891
5892 /* After batch processing, check bound */
5893 n += msg_len / ring->item_len;
5894 if (n >= bound) {
5895 break;
5896 }
5897 }
5898
5899 DHD_LB_DISPATCH_TX_COMPL(dhd);
5900
5901 return more;
5902 }
5903
5904 int BCMFASTPATH
5905 dhd_prot_process_trapbuf(dhd_pub_t *dhd)
5906 {
5907 uint32 data;
5908 dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
5909
5910 /* Interrupts can come in before this struct
5911 * has been initialized.
5912 */
5913 if (trap_addr->va == NULL) {
5914 DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
5915 return 0;
5916 }
5917
5918 OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
5919 data = *(uint32 *)(trap_addr->va);
5920
5921 if (data & D2H_DEV_FWHALT) {
5922 DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
5923
5924 if (data & D2H_DEV_EXT_TRAP_DATA)
5925 {
5926 if (dhd->extended_trap_data) {
5927 OSL_CACHE_INV((void *)trap_addr->va,
5928 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5929 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
5930 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5931 }
5932 DHD_ERROR(("Extended trap data available\n"));
5933 }
5934 return data;
5935 }
5936 return 0;
5937 }
5938
5939 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
5940 int BCMFASTPATH
5941 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
5942 {
5943 dhd_prot_t *prot = dhd->prot;
5944 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
5945 unsigned long flags;
5946
5947 /* Process all the messages - DTOH direction */
5948 while (!dhd_is_device_removed(dhd)) {
5949 uint8 *msg_addr;
5950 uint32 msg_len;
5951
5952 if (dhd_query_bus_erros(dhd)) {
5953 break;
5954 }
5955
5956 if (dhd->hang_was_sent) {
5957 break;
5958 }
5959
5960 if (dhd->smmu_fault_occurred) {
5961 break;
5962 }
5963
5964 DHD_RING_LOCK(ring->ring_lock, flags);
5965 /* Get the address of the next message to be read from ring */
5966 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5967 DHD_RING_UNLOCK(ring->ring_lock, flags);
5968
5969 if (msg_addr == NULL) {
5970 break;
5971 }
5972
5973 /* Prefetch data to populate the cache */
5974 OSL_PREFETCH(msg_addr);
5975 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5976 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5977 __FUNCTION__, ring->name, msg_addr, msg_len));
5978 }
5979
5980 /* Write to dngl rd ptr */
5981 dhd_prot_upd_read_idx(dhd, ring);
5982 }
5983
5984 return 0;
5985 }
5986
5987 /**
5988 * Consume messages out of the D2H ring. Ensure that the message's DMA to host
5989 * memory has completed, before invoking the message handler via a table lookup
5990 * of the cmn_msg_hdr::msg_type.
5991 */
5992 static int BCMFASTPATH
5993 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
5994 {
5995 uint32 buf_len = len;
5996 uint16 item_len;
5997 uint8 msg_type;
5998 cmn_msg_hdr_t *msg = NULL;
5999 int ret = BCME_OK;
6000
6001 ASSERT(ring);
6002 item_len = ring->item_len;
6003 if (item_len == 0) {
6004 DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
6005 __FUNCTION__, ring->idx, item_len, buf_len));
6006 return BCME_ERROR;
6007 }
6008
6009 while (buf_len > 0) {
6010 if (dhd->hang_was_sent) {
6011 ret = BCME_ERROR;
6012 goto done;
6013 }
6014
6015 if (dhd->smmu_fault_occurred) {
6016 ret = BCME_ERROR;
6017 goto done;
6018 }
6019
6020 msg = (cmn_msg_hdr_t *)buf;
6021
6022 /* Wait until DMA completes, then fetch msg_type */
6023 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
6024
6025 /*
6026 * Update the curr_rd to the current index in the ring, from where
6027 * the work item is fetched. This way if the fetched work item
6028 * fails in LIVELOCK, we can print the exact read index in the ring
6029 * that shows up the corrupted work item.
6030 */
6031 if ((ring->curr_rd + 1) >= ring->max_items) {
6032 ring->curr_rd = 0;
6033 } else {
6034 ring->curr_rd += 1;
6035 }
6036
6037 /* Prefetch data to populate the cache */
6038 OSL_PREFETCH(buf + item_len);
6039
6040 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
6041 msg_type, item_len, buf_len));
6042
6043 if (msg_type == MSG_TYPE_LOOPBACK) {
6044 bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
6045 DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
6046 }
6047
6048 ASSERT(msg_type < DHD_PROT_FUNCS);
6049 if (msg_type >= DHD_PROT_FUNCS) {
6050 DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
6051 __FUNCTION__, msg_type, item_len, buf_len));
6052 ret = BCME_ERROR;
6053 goto done;
6054 }
6055
6056 if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
6057 if (ring == dhd->prot->d2hring_info_cpln) {
6058 if (!dhd->prot->infobufpost) {
6059 DHD_ERROR(("infobuf posted are zero,"
6060 "but there is a completion\n"));
6061 goto done;
6062 }
6063 dhd->prot->infobufpost--;
6064 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
6065 dhd_prot_process_infobuf_complete(dhd, buf);
6066 }
6067 } else
6068 if (table_lookup[msg_type]) {
6069 table_lookup[msg_type](dhd, buf);
6070 }
6071
6072 if (buf_len < item_len) {
6073 ret = BCME_ERROR;
6074 goto done;
6075 }
6076 buf_len = buf_len - item_len;
6077 buf = buf + item_len;
6078 }
6079
6080 done:
6081
6082 #ifdef DHD_RX_CHAINING
6083 dhd_rxchain_commit(dhd);
6084 #endif // endif
6085
6086 return ret;
6087 } /* dhd_prot_process_msgtype */
6088
6089 static void
6090 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
6091 {
6092 return;
6093 }
6094
6095 /** called on MSG_TYPE_RING_STATUS message received from dongle */
6096 static void
6097 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
6098 {
6099 pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
6100 uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
6101 uint16 status = ltoh16(ring_status->compl_hdr.status);
6102 uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
6103
6104 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
6105 request_id, status, ring_id, ltoh16(ring_status->write_idx)));
6106
6107 if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
6108 return;
6109 if (status == BCMPCIE_BAD_PHASE) {
6110 /* bad phase report from */
6111 DHD_ERROR(("Bad phase\n"));
6112 }
6113 if (status != BCMPCIE_BADOPTION)
6114 return;
6115
6116 if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
6117 if (dhd->prot->h2dring_info_subn != NULL) {
6118 if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
6119 DHD_ERROR(("H2D ring create failed for info ring\n"));
6120 dhd->prot->h2dring_info_subn->create_pending = FALSE;
6121 }
6122 else
6123 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6124 } else {
6125 DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
6126 }
6127 }
6128 else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
6129 if (dhd->prot->d2hring_info_cpln != NULL) {
6130 if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
6131 DHD_ERROR(("D2H ring create failed for info ring\n"));
6132 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
6133 }
6134 else
6135 DHD_ERROR(("ring create ID for info ring, create not pending\n"));
6136 } else {
6137 DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
6138 }
6139 }
6140 #ifdef DHD_HP2P
6141 else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
6142 if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
6143 if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
6144 DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
6145 dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
6146 }
6147 else
6148 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6149 } else {
6150 DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
6151 }
6152 }
6153 else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
6154 if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
6155 if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
6156 DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
6157 dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
6158 }
6159 else
6160 DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
6161 } else {
6162 DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
6163 }
6164 }
6165 #endif /* DHD_HP2P */
6166 else {
6167 DHD_ERROR(("don;t know how to pair with original request\n"));
6168 }
6169 /* How do we track this to pair it with ??? */
6170 return;
6171 }
6172
6173 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
6174 static void
6175 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
6176 {
6177 pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
6178 DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
6179 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
6180 gen_status->compl_hdr.flow_ring_id));
6181
6182 /* How do we track this to pair it with ??? */
6183 return;
6184 }
6185
6186 /**
6187 * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
6188 * dongle received the ioctl message in dongle memory.
6189 */
6190 static void
6191 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
6192 {
6193 ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
6194 unsigned long flags;
6195 #if defined(DHD_PKTID_AUDIT_RING)
6196 uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6197 #endif // endif
6198
6199 #if defined(DHD_PKTID_AUDIT_RING)
6200 /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
6201 if (pktid != DHD_IOCTL_REQ_PKTID) {
6202 #ifndef IOCTLRESP_USE_CONSTMEM
6203 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6204 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6205 #else
6206 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
6207 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6208 #endif /* !IOCTLRESP_USE_CONSTMEM */
6209 }
6210 #endif // endif
6211
6212 dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
6213
6214 DHD_GENERAL_LOCK(dhd, flags);
6215 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
6216 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6217 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
6218 } else {
6219 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
6220 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6221 prhex("dhd_prot_ioctack_process:",
6222 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6223 }
6224 DHD_GENERAL_UNLOCK(dhd, flags);
6225
6226 DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
6227 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
6228 ioct_ack->compl_hdr.flow_ring_id));
6229 if (ioct_ack->compl_hdr.status != 0) {
6230 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
6231 }
6232 }
6233
6234 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
6235 static void
6236 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
6237 {
6238 dhd_prot_t *prot = dhd->prot;
6239 uint32 pkt_id, xt_id;
6240 ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
6241 void *pkt;
6242 unsigned long flags;
6243 dhd_dma_buf_t retbuf;
6244
6245 /* Check for ioctl timeout induce flag, which is set by firing
6246 * dhd iovar to induce IOCTL timeout. If flag is set,
6247 * return from here, which results in to IOCTL timeout.
6248 */
6249 if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
6250 DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
6251 return;
6252 }
6253
6254 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
6255
6256 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
6257
6258 #if defined(DHD_PKTID_AUDIT_RING)
6259 #ifndef IOCTLRESP_USE_CONSTMEM
6260 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
6261 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6262 #else
6263 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
6264 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6265 #endif /* !IOCTLRESP_USE_CONSTMEM */
6266 #endif // endif
6267
6268 DHD_GENERAL_LOCK(dhd, flags);
6269 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
6270 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6271 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
6272 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6273 prhex("dhd_prot_ioctcmplt_process:",
6274 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6275 DHD_GENERAL_UNLOCK(dhd, flags);
6276 return;
6277 }
6278
6279 dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
6280
6281 /* Clear Response pending bit */
6282 prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
6283 DHD_GENERAL_UNLOCK(dhd, flags);
6284
6285 #ifndef IOCTLRESP_USE_CONSTMEM
6286 pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
6287 #else
6288 dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
6289 pkt = retbuf.va;
6290 #endif /* !IOCTLRESP_USE_CONSTMEM */
6291 if (!pkt) {
6292 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
6293 prhex("dhd_prot_ioctcmplt_process:",
6294 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6295 return;
6296 }
6297
6298 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
6299 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
6300 xt_id = ltoh16(ioct_resp->trans_id);
6301
6302 if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
6303 DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
6304 __FUNCTION__, xt_id, prot->ioctl_trans_id,
6305 prot->curr_ioctl_cmd, ioct_resp->cmd));
6306 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
6307 dhd_prot_debug_info_print(dhd);
6308 #ifdef DHD_FW_COREDUMP
6309 if (dhd->memdump_enabled) {
6310 /* collect core dump */
6311 dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
6312 dhd_bus_mem_dump(dhd);
6313 }
6314 #else
6315 ASSERT(0);
6316 #endif /* DHD_FW_COREDUMP */
6317 dhd_schedule_reset(dhd);
6318 goto exit;
6319 }
6320 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
6321 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
6322
6323 if (prot->ioctl_resplen > 0) {
6324 #ifndef IOCTLRESP_USE_CONSTMEM
6325 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
6326 #else
6327 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
6328 #endif /* !IOCTLRESP_USE_CONSTMEM */
6329 }
6330
6331 /* wake up any dhd_os_ioctl_resp_wait() */
6332 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
6333
6334 exit:
6335 #ifndef IOCTLRESP_USE_CONSTMEM
6336 dhd_prot_packet_free(dhd, pkt,
6337 PKTTYPE_IOCTL_RX, FALSE);
6338 #else
6339 free_ioctl_return_buffer(dhd, &retbuf);
6340 #endif /* !IOCTLRESP_USE_CONSTMEM */
6341
6342 /* Post another ioctl buf to the device */
6343 if (prot->cur_ioctlresp_bufs_posted > 0) {
6344 prot->cur_ioctlresp_bufs_posted--;
6345 }
6346
6347 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
6348 }
6349
6350 int
6351 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
6352 {
6353 return dhd->prot->no_tx_resource;
6354 }
6355
6356 void
6357 dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
6358 {
6359 dhd->prot->pktid_txq_stop_cnt++;
6360 }
6361
6362 void
6363 dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
6364 {
6365 dhd->prot->pktid_txq_start_cnt++;
6366 }
6367
6368 /** called on MSG_TYPE_TX_STATUS message received from dongle */
6369 static void BCMFASTPATH
6370 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
6371 {
6372 dhd_prot_t *prot = dhd->prot;
6373 host_txbuf_cmpl_t * txstatus;
6374 unsigned long flags;
6375 uint32 pktid;
6376 void *pkt;
6377 dmaaddr_t pa;
6378 uint32 len;
6379 void *dmah;
6380 void *secdma;
6381 bool pkt_fate;
6382 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
6383 #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
6384 flow_info_t *flow_info;
6385 uint64 tx_status_latency;
6386 #endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
6387 #if defined(TX_STATUS_LATENCY_STATS)
6388 flow_ring_node_t *flow_ring_node;
6389 uint16 flowid;
6390 #endif // endif
6391 ts_timestamp_t *ts;
6392
6393 BCM_REFERENCE(ts);
6394 txstatus = (host_txbuf_cmpl_t *)msg;
6395 #if defined(TX_STATUS_LATENCY_STATS)
6396 flowid = txstatus->compl_hdr.flow_ring_id;
6397 flow_ring_node = DHD_FLOW_RING(dhd, flowid);
6398 #endif // endif
6399
6400 /* locks required to protect circular buffer accesses */
6401 DHD_RING_LOCK(ring->ring_lock, flags);
6402 pktid = ltoh32(txstatus->cmn_hdr.request_id);
6403 pkt_fate = TRUE;
6404
6405 #if defined(DHD_PKTID_AUDIT_RING)
6406 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
6407 DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
6408 #endif // endif
6409
6410 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
6411 if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
6412 DHD_ERROR(("Extra packets are freed\n"));
6413 }
6414 ASSERT(pktid != 0);
6415
6416 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6417 pa, len, dmah, secdma, PKTTYPE_DATA_TX);
6418 if (!pkt) {
6419 DHD_RING_UNLOCK(ring->ring_lock, flags);
6420 DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
6421 prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
6422 #ifdef DHD_FW_COREDUMP
6423 if (dhd->memdump_enabled) {
6424 /* collect core dump */
6425 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
6426 dhd_bus_mem_dump(dhd);
6427 }
6428 #else
6429 ASSERT(0);
6430 #endif /* DHD_FW_COREDUMP */
6431 return;
6432 }
6433
6434 if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
6435 dhd->prot->no_tx_resource = FALSE;
6436 dhd_bus_start_queue(dhd->bus);
6437 }
6438
6439 if (SECURE_DMA_ENAB(dhd->osh)) {
6440 int offset = 0;
6441 BCM_REFERENCE(offset);
6442
6443 if (dhd->prot->tx_metadata_offset)
6444 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
6445 SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
6446 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
6447 secdma, offset);
6448 } else {
6449 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6450 }
6451
6452 #ifdef TX_STATUS_LATENCY_STATS
6453 /* update the tx status latency for flowid */
6454 flow_info = &flow_ring_node->flow_info;
6455 tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
6456 flow_info->cum_tx_status_latency += tx_status_latency;
6457 flow_info->num_tx_status++;
6458 #endif /* TX_STATUS_LATENCY_STATS */
6459 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
6460 {
6461 int elem_ix;
6462 void **elem;
6463 bcm_workq_t *workq;
6464
6465 workq = &prot->tx_compl_prod;
6466 /*
6467 * Produce the packet into the tx_compl workq for the tx compl tasklet
6468 * to consume.
6469 */
6470 OSL_PREFETCH(PKTTAG(pkt));
6471
6472 /* fetch next available slot in workq */
6473 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
6474
6475 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
6476 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
6477
6478 if (elem_ix == BCM_RING_FULL) {
6479 DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
6480 goto workq_ring_full;
6481 }
6482
6483 elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
6484 *elem = pkt;
6485
6486 smp_wmb();
6487
6488 /* Sync WR index to consumer if the SYNC threshold has been reached */
6489 if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
6490 bcm_workq_prod_sync(workq);
6491 prot->tx_compl_prod_sync = 0;
6492 }
6493
6494 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
6495 __FUNCTION__, pkt, prot->tx_compl_prod_sync));
6496
6497 DHD_RING_UNLOCK(ring->ring_lock, flags);
6498 return;
6499 }
6500
6501 workq_ring_full:
6502
6503 #endif /* !DHD_LB_TXC */
6504
6505 #ifdef DMAMAP_STATS
6506 dhd->dma_stats.txdata--;
6507 dhd->dma_stats.txdata_sz -= len;
6508 #endif /* DMAMAP_STATS */
6509 pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
6510 ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
6511 #ifdef DHD_PKT_LOGGING
6512 if (dhd->d11_tx_status) {
6513 uint16 status = ltoh16(txstatus->compl_hdr.status) &
6514 WLFC_CTL_PKTFLAG_MASK;
6515 uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
6516 DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
6517 dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
6518 (uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
6519 &pkthash, &status);
6520 }
6521 #endif /* DHD_PKT_LOGGING */
6522
6523 #if defined(BCMPCIE)
6524 dhd_txcomplete(dhd, pkt, pkt_fate);
6525 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6526 dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
6527 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6528 #endif // endif
6529
6530 #if DHD_DBG_SHOW_METADATA
6531 if (dhd->prot->metadata_dbg &&
6532 dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
6533 uchar *ptr;
6534 /* The Ethernet header of TX frame was copied and removed.
6535 * Here, move the data pointer forward by Ethernet header size.
6536 */
6537 PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
6538 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
6539 bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
6540 dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
6541 }
6542 #endif /* DHD_DBG_SHOW_METADATA */
6543
6544 #ifdef DHD_HP2P
6545 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6546 #ifdef DHD_HP2P_DEBUG
6547 bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
6548 #endif /* DHD_HP2P_DEBUG */
6549 dhd_update_hp2p_txstats(dhd, txstatus);
6550 }
6551 #endif /* DHD_HP2P */
6552
6553 #ifdef DHD_LBUF_AUDIT
6554 PKTAUDIT(dhd->osh, pkt);
6555 #endif // endif
6556
6557 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
6558 txstatus->tx_status);
6559 DHD_RING_UNLOCK(ring->ring_lock, flags);
6560 PKTFREE(dhd->osh, pkt, TRUE);
6561 return;
6562 } /* dhd_prot_txstatus_process */
6563
6564 /** called on MSG_TYPE_WL_EVENT message received from dongle */
6565 static void
6566 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
6567 {
6568 wlevent_req_msg_t *evnt;
6569 uint32 bufid;
6570 uint16 buflen;
6571 int ifidx = 0;
6572 void* pkt;
6573 dhd_prot_t *prot = dhd->prot;
6574
6575 /* Event complete header */
6576 evnt = (wlevent_req_msg_t *)msg;
6577 bufid = ltoh32(evnt->cmn_hdr.request_id);
6578
6579 #if defined(DHD_PKTID_AUDIT_RING)
6580 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
6581 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6582 #endif // endif
6583
6584 buflen = ltoh16(evnt->event_data_len);
6585
6586 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
6587
6588 /* Post another rxbuf to the device */
6589 if (prot->cur_event_bufs_posted)
6590 prot->cur_event_bufs_posted--;
6591 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
6592
6593 pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
6594
6595 if (!pkt) {
6596 DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
6597 return;
6598 }
6599
6600 /* DMA RX offset updated through shared area */
6601 if (dhd->prot->rx_dataoffset)
6602 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6603
6604 PKTSETLEN(dhd->osh, pkt, buflen);
6605 #ifdef DHD_LBUF_AUDIT
6606 PKTAUDIT(dhd->osh, pkt);
6607 #endif // endif
6608 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
6609 }
6610
6611 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
6612 static void BCMFASTPATH
6613 dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
6614 {
6615 info_buf_resp_t *resp;
6616 uint32 pktid;
6617 uint16 buflen;
6618 void * pkt;
6619
6620 resp = (info_buf_resp_t *)buf;
6621 pktid = ltoh32(resp->cmn_hdr.request_id);
6622 buflen = ltoh16(resp->info_data_len);
6623
6624 #ifdef DHD_PKTID_AUDIT_RING
6625 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6626 DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
6627 #endif /* DHD_PKTID_AUDIT_RING */
6628
6629 DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
6630 pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
6631 dhd->prot->rx_dataoffset));
6632
6633 if (dhd->debug_buf_dest_support) {
6634 if (resp->dest < DEBUG_BUF_DEST_MAX) {
6635 dhd->debug_buf_dest_stat[resp->dest]++;
6636 }
6637 }
6638
6639 pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
6640 if (!pkt)
6641 return;
6642
6643 /* DMA RX offset updated through shared area */
6644 if (dhd->prot->rx_dataoffset)
6645 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6646
6647 PKTSETLEN(dhd->osh, pkt, buflen);
6648
6649 #ifdef DHD_LBUF_AUDIT
6650 PKTAUDIT(dhd->osh, pkt);
6651 #endif // endif
6652
6653 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
6654 * special ifidx of -1. This is just internal to dhd to get the data to
6655 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
6656 */
6657 dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
6658 }
6659
6660 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
6661 static void BCMFASTPATH
6662 dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
6663 {
6664 }
6665
6666 /** Stop protocol: sync w/dongle state. */
6667 void dhd_prot_stop(dhd_pub_t *dhd)
6668 {
6669 ASSERT(dhd);
6670 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6671
6672 }
6673
6674 /* Add any protocol-specific data header.
6675 * Caller must reserve prot_hdrlen prepend space.
6676 */
6677 void BCMFASTPATH
6678 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
6679 {
6680 return;
6681 }
6682
6683 uint
6684 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
6685 {
6686 return 0;
6687 }
6688
6689 #define MAX_MTU_SZ (1600u)
6690
6691 #define PKTBUF pktbuf
6692
6693 /**
6694 * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
6695 * the corresponding flow ring.
6696 */
6697 int BCMFASTPATH
6698 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
6699 {
6700 unsigned long flags;
6701 dhd_prot_t *prot = dhd->prot;
6702 host_txbuf_post_t *txdesc = NULL;
6703 dmaaddr_t pa, meta_pa;
6704 uint8 *pktdata;
6705 uint32 pktlen;
6706 uint32 pktid;
6707 uint8 prio;
6708 uint16 flowid = 0;
6709 uint16 alloced = 0;
6710 uint16 headroom;
6711 msgbuf_ring_t *ring;
6712 flow_ring_table_t *flow_ring_table;
6713 flow_ring_node_t *flow_ring_node;
6714 #ifdef DHD_PKT_LOGGING
6715 uint32 pkthash;
6716 #endif /* DHD_PKT_LOGGING */
6717
6718 if (dhd->flow_ring_table == NULL) {
6719 DHD_ERROR(("dhd flow_ring_table is NULL\n"));
6720 return BCME_NORESOURCE;
6721 }
6722 #ifdef DHD_PCIE_PKTID
6723 if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
6724 if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
6725 dhd_bus_stop_queue(dhd->bus);
6726 dhd->prot->no_tx_resource = TRUE;
6727 }
6728 dhd->prot->pktid_depleted_cnt++;
6729 goto err_no_res;
6730 } else {
6731 dhd->prot->pktid_depleted_cnt = 0;
6732 }
6733 #endif /* DHD_PCIE_PKTID */
6734
6735 flowid = DHD_PKT_GET_FLOWID(PKTBUF);
6736 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
6737 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
6738
6739 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
6740
6741 DHD_RING_LOCK(ring->ring_lock, flags);
6742
6743 /* Create a unique 32-bit packet id */
6744 pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
6745 PKTBUF, PKTTYPE_DATA_TX);
6746 #if defined(DHD_PCIE_PKTID)
6747 if (pktid == DHD_PKTID_INVALID) {
6748 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
6749 /*
6750 * If we return error here, the caller would queue the packet
6751 * again. So we'll just free the skb allocated in DMA Zone.
6752 * Since we have not freed the original SKB yet the caller would
6753 * requeue the same.
6754 */
6755 goto err_no_res_pktfree;
6756 }
6757 #endif /* DHD_PCIE_PKTID */
6758
6759 /* Reserve space in the circular buffer */
6760 txdesc = (host_txbuf_post_t *)
6761 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6762 if (txdesc == NULL) {
6763 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
6764 __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
6765 goto err_free_pktid;
6766 }
6767
6768 /* Extract the data pointer and length information */
6769 pktdata = PKTDATA(dhd->osh, PKTBUF);
6770 pktlen = PKTLEN(dhd->osh, PKTBUF);
6771
6772 DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
6773 #ifdef DHD_PKT_LOGGING
6774 DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
6775 /* Dump TX packet */
6776 pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
6777 dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
6778 #endif /* DHD_PKT_LOGGING */
6779
6780 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
6781 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
6782
6783 /* Extract the ethernet header and adjust the data pointer and length */
6784 pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6785 pktlen -= ETHER_HDR_LEN;
6786
6787 /* Map the data pointer to a DMA-able address */
6788 if (SECURE_DMA_ENAB(dhd->osh)) {
6789 int offset = 0;
6790 BCM_REFERENCE(offset);
6791
6792 if (prot->tx_metadata_offset)
6793 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6794
6795 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
6796 DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
6797 }
6798 #ifndef BCM_SECURE_DMA
6799 else
6800 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
6801 #endif /* #ifndef BCM_SECURE_DMA */
6802
6803 if (PHYSADDRISZERO(pa)) {
6804 DHD_ERROR(("%s: Something really bad, unless 0 is "
6805 "a valid phyaddr for pa\n", __FUNCTION__));
6806 ASSERT(0);
6807 goto err_rollback_idx;
6808 }
6809
6810 #ifdef DMAMAP_STATS
6811 dhd->dma_stats.txdata++;
6812 dhd->dma_stats.txdata_sz += pktlen;
6813 #endif /* DMAMAP_STATS */
6814 /* No need to lock. Save the rest of the packet's metadata */
6815 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
6816 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
6817
6818 #ifdef TXP_FLUSH_NITEMS
6819 if (ring->pend_items_count == 0)
6820 ring->start_addr = (void *)txdesc;
6821 ring->pend_items_count++;
6822 #endif // endif
6823
6824 /* Form the Tx descriptor message buffer */
6825
6826 /* Common message hdr */
6827 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
6828 txdesc->cmn_hdr.if_id = ifidx;
6829 txdesc->cmn_hdr.flags = ring->current_phase;
6830
6831 txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
6832 prio = (uint8)PKTPRIO(PKTBUF);
6833
6834 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
6835 txdesc->seg_cnt = 1;
6836
6837 txdesc->data_len = htol16((uint16) pktlen);
6838 txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6839 txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
6840
6841 /* Move data pointer to keep ether header in local PKTBUF for later reference */
6842 PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6843
6844 /* Handle Tx metadata */
6845 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
6846 if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
6847 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
6848 prot->tx_metadata_offset, headroom));
6849
6850 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
6851 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
6852
6853 /* Adjust the data pointer to account for meta data in DMA_MAP */
6854 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6855
6856 if (SECURE_DMA_ENAB(dhd->osh)) {
6857 meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6858 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
6859 0, ring->dma_buf.secdma);
6860 }
6861 #ifndef BCM_SECURE_DMA
6862 else
6863 meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6864 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
6865 #endif /* #ifndef BCM_SECURE_DMA */
6866
6867 if (PHYSADDRISZERO(meta_pa)) {
6868 /* Unmap the data pointer to a DMA-able address */
6869 if (SECURE_DMA_ENAB(dhd->osh)) {
6870 int offset = 0;
6871 BCM_REFERENCE(offset);
6872
6873 if (prot->tx_metadata_offset) {
6874 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6875 }
6876
6877 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
6878 DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
6879 }
6880 #ifndef BCM_SECURE_DMA
6881 else {
6882 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
6883 }
6884 #endif /* #ifndef BCM_SECURE_DMA */
6885 #ifdef TXP_FLUSH_NITEMS
6886 /* update pend_items_count */
6887 ring->pend_items_count--;
6888 #endif /* TXP_FLUSH_NITEMS */
6889
6890 DHD_ERROR(("%s: Something really bad, unless 0 is "
6891 "a valid phyaddr for meta_pa\n", __FUNCTION__));
6892 ASSERT(0);
6893 goto err_rollback_idx;
6894 }
6895
6896 /* Adjust the data pointer back to original value */
6897 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6898
6899 txdesc->metadata_buf_len = prot->tx_metadata_offset;
6900 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
6901 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
6902 } else {
6903 #ifdef DHD_HP2P
6904 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6905 dhd_update_hp2p_txdesc(dhd, txdesc);
6906 } else
6907 #endif /* DHD_HP2P */
6908 if (1)
6909 {
6910 txdesc->metadata_buf_len = htol16(0);
6911 txdesc->metadata_buf_addr.high_addr = 0;
6912 txdesc->metadata_buf_addr.low_addr = 0;
6913 }
6914 }
6915
6916 #ifdef DHD_PKTID_AUDIT_RING
6917 DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
6918 #endif /* DHD_PKTID_AUDIT_RING */
6919
6920 txdesc->cmn_hdr.request_id = htol32(pktid);
6921
6922 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
6923 txdesc->cmn_hdr.request_id));
6924
6925 #ifdef DHD_LBUF_AUDIT
6926 PKTAUDIT(dhd->osh, PKTBUF);
6927 #endif // endif
6928
6929 if (pktlen > MAX_MTU_SZ) {
6930 DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
6931 __FUNCTION__, pktlen, MAX_MTU_SZ));
6932 dhd_prhex("txringitem", (volatile uchar*)txdesc,
6933 sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
6934 }
6935
6936 /* Update the write pointer in TCM & ring bell */
6937 #if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
6938 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6939 dhd_calc_hp2p_burst(dhd, ring, flowid);
6940 } else {
6941 if ((ring->pend_items_count == prot->txp_threshold) ||
6942 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6943 dhd_prot_txdata_write_flush(dhd, flowid);
6944 }
6945 }
6946 #elif defined(TXP_FLUSH_NITEMS)
6947 /* Flush if we have either hit the txp_threshold or if this msg is */
6948 /* occupying the last slot in the flow_ring - before wrap around. */
6949 if ((ring->pend_items_count == prot->txp_threshold) ||
6950 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6951 dhd_prot_txdata_write_flush(dhd, flowid);
6952 }
6953 #else
6954 /* update ring's WR index and ring doorbell to dongle */
6955 dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
6956 #endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
6957
6958 #if defined(TX_STATUS_LATENCY_STATS)
6959 /* set the time when pkt is queued to flowring */
6960 DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
6961 #endif // endif
6962
6963 OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
6964 /*
6965 * Take a wake lock, do not sleep if we have atleast one packet
6966 * to finish.
6967 */
6968 DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
6969
6970 DHD_RING_UNLOCK(ring->ring_lock, flags);
6971
6972 #ifdef TX_STATUS_LATENCY_STATS
6973 flow_ring_node->flow_info.num_tx_pkts++;
6974 #endif /* TX_STATUS_LATENCY_STATS */
6975 return BCME_OK;
6976
6977 err_rollback_idx:
6978 /* roll back write pointer for unprocessed message */
6979 if (ring->wr == 0) {
6980 ring->wr = ring->max_items - 1;
6981 } else {
6982 ring->wr--;
6983 if (ring->wr == 0) {
6984 DHD_INFO(("%s: flipping the phase now\n", ring->name));
6985 ring->current_phase = ring->current_phase ?
6986 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6987 }
6988 }
6989
6990 err_free_pktid:
6991 #if defined(DHD_PCIE_PKTID)
6992 {
6993 void *dmah;
6994 void *secdma;
6995 /* Free up the PKTID. physaddr and pktlen will be garbage. */
6996 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6997 pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
6998 }
6999
7000 err_no_res_pktfree:
7001 #endif /* DHD_PCIE_PKTID */
7002
7003 DHD_RING_UNLOCK(ring->ring_lock, flags);
7004 err_no_res:
7005 return BCME_NORESOURCE;
7006 } /* dhd_prot_txdata */
7007
7008 /* called with a ring_lock */
7009 /** optimization to write "n" tx items at a time to ring */
7010 void BCMFASTPATH
7011 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
7012 {
7013 #ifdef TXP_FLUSH_NITEMS
7014 flow_ring_table_t *flow_ring_table;
7015 flow_ring_node_t *flow_ring_node;
7016 msgbuf_ring_t *ring;
7017
7018 if (dhd->flow_ring_table == NULL) {
7019 return;
7020 }
7021
7022 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
7023 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
7024 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
7025
7026 if (ring->pend_items_count) {
7027 /* update ring's WR index and ring doorbell to dongle */
7028 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
7029 ring->pend_items_count);
7030 ring->pend_items_count = 0;
7031 ring->start_addr = NULL;
7032 }
7033 #endif /* TXP_FLUSH_NITEMS */
7034 }
7035
7036 #undef PKTBUF /* Only defined in the above routine */
7037
7038 int BCMFASTPATH
7039 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
7040 {
7041 return 0;
7042 }
7043
7044 /** post a set of receive buffers to the dongle */
7045 static void BCMFASTPATH
7046 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
7047 {
7048 dhd_prot_t *prot = dhd->prot;
7049 #if defined(DHD_LB_RXC)
7050 int elem_ix;
7051 uint32 *elem;
7052 bcm_workq_t *workq;
7053
7054 workq = &prot->rx_compl_prod;
7055
7056 /* Produce the work item */
7057 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
7058 if (elem_ix == BCM_RING_FULL) {
7059 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
7060 ASSERT(0);
7061 return;
7062 }
7063
7064 elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
7065 *elem = pktid;
7066
7067 smp_wmb();
7068
7069 /* Sync WR index to consumer if the SYNC threshold has been reached */
7070 if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
7071 bcm_workq_prod_sync(workq);
7072 prot->rx_compl_prod_sync = 0;
7073 }
7074
7075 DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
7076 __FUNCTION__, pktid, prot->rx_compl_prod_sync));
7077
7078 #endif /* DHD_LB_RXC */
7079
7080 if (prot->rxbufpost >= rxcnt) {
7081 prot->rxbufpost -= (uint16)rxcnt;
7082 } else {
7083 /* ASSERT(0); */
7084 prot->rxbufpost = 0;
7085 }
7086
7087 #if !defined(DHD_LB_RXC)
7088 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
7089 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
7090 #endif /* !DHD_LB_RXC */
7091 return;
7092 }
7093
7094 /* called before an ioctl is sent to the dongle */
7095 static void
7096 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
7097 {
7098 dhd_prot_t *prot = dhd->prot;
7099 int slen = 0;
7100
7101 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
7102 pcie_bus_tput_params_t *tput_params;
7103
7104 slen = strlen("pcie_bus_tput") + 1;
7105 tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
7106 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
7107 sizeof(tput_params->host_buf_addr));
7108 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
7109 }
7110
7111 }
7112
7113 /* called after an ioctl returns from dongle */
7114 static void
7115 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
7116 int ifidx, int ret, int len)
7117 {
7118
7119 if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
7120 /* Intercept the wme_dp ioctl here */
7121 if (!strcmp(buf, "wme_dp")) {
7122 int slen, val = 0;
7123
7124 slen = strlen("wme_dp") + 1;
7125 if (len >= (int)(slen + sizeof(int)))
7126 bcopy(((char *)buf + slen), &val, sizeof(int));
7127 dhd->wme_dp = (uint8) ltoh32(val);
7128 }
7129
7130 }
7131
7132 }
7133
7134 #ifdef DHD_PM_CONTROL_FROM_FILE
7135 extern bool g_pm_control;
7136 #endif /* DHD_PM_CONTROL_FROM_FILE */
7137
7138 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
7139 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
7140 {
7141 int ret = -1;
7142 uint8 action;
7143
7144 if (dhd->bus->is_linkdown) {
7145 DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7146 goto done;
7147 }
7148
7149 if (dhd_query_bus_erros(dhd)) {
7150 DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
7151 goto done;
7152 }
7153
7154 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
7155 DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
7156 " bus state: %d, sent hang: %d\n", __FUNCTION__,
7157 dhd->busstate, dhd->hang_was_sent));
7158 goto done;
7159 }
7160
7161 if (dhd->busstate == DHD_BUS_SUSPEND) {
7162 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
7163 goto done;
7164 }
7165
7166 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7167
7168 if (ioc->cmd == WLC_SET_PM) {
7169 #ifdef DHD_PM_CONTROL_FROM_FILE
7170 if (g_pm_control == TRUE) {
7171 DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
7172 __FUNCTION__, buf ? *(char *)buf : 0));
7173 goto done;
7174 }
7175 #endif /* DHD_PM_CONTROL_FROM_FILE */
7176 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
7177 }
7178
7179 ASSERT(len <= WLC_IOCTL_MAXLEN);
7180
7181 if (len > WLC_IOCTL_MAXLEN)
7182 goto done;
7183
7184 action = ioc->set;
7185
7186 dhd_prot_wlioctl_intercept(dhd, ioc, buf);
7187
7188 if (action & WL_IOCTL_ACTION_SET) {
7189 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7190 } else {
7191 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7192 if (ret > 0)
7193 ioc->used = ret;
7194 }
7195
7196 /* Too many programs assume ioctl() returns 0 on success */
7197 if (ret >= 0) {
7198 ret = 0;
7199 } else {
7200 DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
7201 dhd->dongle_error = ret;
7202 }
7203
7204 dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
7205
7206 done:
7207 return ret;
7208
7209 } /* dhd_prot_ioctl */
7210
7211 /** test / loopback */
7212
7213 int
7214 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
7215 {
7216 unsigned long flags;
7217 dhd_prot_t *prot = dhd->prot;
7218 uint16 alloced = 0;
7219
7220 ioct_reqst_hdr_t *ioct_rqst;
7221
7222 uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
7223 uint16 msglen = len + hdrlen;
7224 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7225
7226 msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
7227 msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
7228
7229 DHD_RING_LOCK(ring->ring_lock, flags);
7230
7231 ioct_rqst = (ioct_reqst_hdr_t *)
7232 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7233
7234 if (ioct_rqst == NULL) {
7235 DHD_RING_UNLOCK(ring->ring_lock, flags);
7236 return 0;
7237 }
7238
7239 {
7240 uint8 *ptr;
7241 uint16 i;
7242
7243 ptr = (uint8 *)ioct_rqst;
7244 for (i = 0; i < msglen; i++) {
7245 ptr[i] = i % 256;
7246 }
7247 }
7248
7249 /* Common msg buf hdr */
7250 ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7251 ring->seqnum++;
7252
7253 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
7254 ioct_rqst->msg.if_id = 0;
7255 ioct_rqst->msg.flags = ring->current_phase;
7256
7257 bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
7258
7259 /* update ring's WR index and ring doorbell to dongle */
7260 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
7261
7262 DHD_RING_UNLOCK(ring->ring_lock, flags);
7263
7264 return 0;
7265 }
7266
7267 /** test / loopback */
7268 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
7269 {
7270 if (dmaxfer == NULL)
7271 return;
7272
7273 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7274 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
7275 }
7276
7277 /** test / loopback */
7278 int
7279 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
7280 {
7281 dhd_prot_t *prot = dhdp->prot;
7282 dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
7283 dmaxref_mem_map_t *dmap = NULL;
7284
7285 dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
7286 if (!dmap) {
7287 DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
7288 goto mem_alloc_fail;
7289 }
7290 dmap->srcmem = &(dmaxfer->srcmem);
7291 dmap->dstmem = &(dmaxfer->dstmem);
7292
7293 DMAXFER_FREE(dhdp, dmap);
7294 return BCME_OK;
7295
7296 mem_alloc_fail:
7297 if (dmap) {
7298 MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
7299 dmap = NULL;
7300 }
7301 return BCME_NOMEM;
7302 } /* dhd_prepare_schedule_dmaxfer_free */
7303
7304 /** test / loopback */
7305 void
7306 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
7307 {
7308
7309 dhd_dma_buf_free(dhdp, dmmap->srcmem);
7310 dhd_dma_buf_free(dhdp, dmmap->dstmem);
7311
7312 MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
7313
7314 dhdp->bus->dmaxfer_complete = TRUE;
7315 dhd_os_dmaxfer_wake(dhdp);
7316
7317 dmmap = NULL;
7318
7319 } /* dmaxfer_free_prev_dmaaddr */
7320
7321 /** test / loopback */
7322 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
7323 uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
7324 {
7325 uint i = 0, j = 0;
7326 if (!dmaxfer)
7327 return BCME_ERROR;
7328
7329 /* First free up existing buffers */
7330 dmaxfer_free_dmaaddr(dhd, dmaxfer);
7331
7332 if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
7333 return BCME_NOMEM;
7334 }
7335
7336 if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
7337 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7338 return BCME_NOMEM;
7339 }
7340
7341 dmaxfer->len = len;
7342
7343 /* Populate source with a pattern like below
7344 * 0x00000000
7345 * 0x01010101
7346 * 0x02020202
7347 * 0x03030303
7348 * 0x04040404
7349 * 0x05050505
7350 * ...
7351 * 0xFFFFFFFF
7352 */
7353 while (i < dmaxfer->len) {
7354 ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
7355 i++;
7356 if (i % 4 == 0) {
7357 j++;
7358 }
7359 }
7360
7361 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
7362
7363 dmaxfer->srcdelay = srcdelay;
7364 dmaxfer->destdelay = destdelay;
7365
7366 return BCME_OK;
7367 } /* dmaxfer_prepare_dmaaddr */
7368
7369 static void
7370 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
7371 {
7372 dhd_prot_t *prot = dhd->prot;
7373 uint64 end_usec;
7374 pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
7375 int buf_free_scheduled;
7376
7377 BCM_REFERENCE(cmplt);
7378 end_usec = OSL_SYSUPTIME_US();
7379
7380 DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
7381 prot->dmaxfer.status = cmplt->compl_hdr.status;
7382 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7383 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
7384 if (memcmp(prot->dmaxfer.srcmem.va,
7385 prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
7386 cmplt->compl_hdr.status != BCME_OK) {
7387 DHD_ERROR(("DMA loopback failed\n"));
7388 /* it is observed that some times the completion
7389 * header status is set as OK, but the memcmp fails
7390 * hence always explicitly set the dmaxfer status
7391 * as error if this happens.
7392 */
7393 prot->dmaxfer.status = BCME_ERROR;
7394 prhex("XFER SRC: ",
7395 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
7396 prhex("XFER DST: ",
7397 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7398 }
7399 else {
7400 switch (prot->dmaxfer.d11_lpbk) {
7401 case M2M_DMA_LPBK: {
7402 DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
7403 } break;
7404 case D11_LPBK: {
7405 DHD_ERROR(("DMA successful with d11 loopback\n"));
7406 } break;
7407 case BMC_LPBK: {
7408 DHD_ERROR(("DMA successful with bmc loopback\n"));
7409 } break;
7410 case M2M_NON_DMA_LPBK: {
7411 DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
7412 } break;
7413 case D11_HOST_MEM_LPBK: {
7414 DHD_ERROR(("DMA successful d11 host mem loopback\n"));
7415 } break;
7416 case BMC_HOST_MEM_LPBK: {
7417 DHD_ERROR(("DMA successful bmc host mem loopback\n"));
7418 } break;
7419 default: {
7420 DHD_ERROR(("Invalid loopback option\n"));
7421 } break;
7422 }
7423
7424 if (DHD_LPBKDTDUMP_ON()) {
7425 /* debug info print of the Tx and Rx buffers */
7426 dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
7427 prot->dmaxfer.len, DHD_INFO_VAL);
7428 dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
7429 prot->dmaxfer.len, DHD_INFO_VAL);
7430 }
7431 }
7432 }
7433
7434 buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
7435 end_usec -= prot->dmaxfer.start_usec;
7436 if (end_usec) {
7437 prot->dmaxfer.time_taken = end_usec;
7438 DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
7439 prot->dmaxfer.len, (unsigned long)end_usec,
7440 (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
7441 }
7442 dhd->prot->dmaxfer.in_progress = FALSE;
7443
7444 if (buf_free_scheduled != BCME_OK) {
7445 dhd->bus->dmaxfer_complete = TRUE;
7446 dhd_os_dmaxfer_wake(dhd);
7447 }
7448 }
7449
7450 /** Test functionality.
7451 * Transfers bytes from host to dongle and to host again using DMA
7452 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
7453 * by a spinlock.
7454 */
7455 int
7456 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
7457 uint d11_lpbk, uint core_num)
7458 {
7459 unsigned long flags;
7460 int ret = BCME_OK;
7461 dhd_prot_t *prot = dhd->prot;
7462 pcie_dma_xfer_params_t *dmap;
7463 uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
7464 uint16 alloced = 0;
7465 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7466
7467 if (prot->dmaxfer.in_progress) {
7468 DHD_ERROR(("DMA is in progress...\n"));
7469 return BCME_ERROR;
7470 }
7471
7472 if (d11_lpbk >= MAX_LPBK) {
7473 DHD_ERROR(("loopback mode should be either"
7474 " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
7475 return BCME_ERROR;
7476 }
7477
7478 DHD_RING_LOCK(ring->ring_lock, flags);
7479
7480 prot->dmaxfer.in_progress = TRUE;
7481 if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
7482 &prot->dmaxfer)) != BCME_OK) {
7483 prot->dmaxfer.in_progress = FALSE;
7484 DHD_RING_UNLOCK(ring->ring_lock, flags);
7485 return ret;
7486 }
7487
7488 dmap = (pcie_dma_xfer_params_t *)
7489 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7490
7491 if (dmap == NULL) {
7492 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
7493 prot->dmaxfer.in_progress = FALSE;
7494 DHD_RING_UNLOCK(ring->ring_lock, flags);
7495 return BCME_NOMEM;
7496 }
7497
7498 /* Common msg buf hdr */
7499 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
7500 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
7501 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7502 dmap->cmn_hdr.flags = ring->current_phase;
7503 ring->seqnum++;
7504
7505 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
7506 dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
7507 dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
7508 dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
7509 dmap->xfer_len = htol32(prot->dmaxfer.len);
7510 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
7511 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
7512 prot->dmaxfer.d11_lpbk = d11_lpbk;
7513 dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
7514 << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
7515 ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
7516 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
7517 prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
7518
7519 /* update ring's WR index and ring doorbell to dongle */
7520 dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
7521
7522 DHD_RING_UNLOCK(ring->ring_lock, flags);
7523
7524 DHD_ERROR(("DMA loopback Started...\n"));
7525
7526 return BCME_OK;
7527 } /* dhdmsgbuf_dmaxfer_req */
7528
7529 int
7530 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
7531 {
7532 dhd_prot_t *prot = dhd->prot;
7533
7534 if (prot->dmaxfer.in_progress)
7535 result->status = DMA_XFER_IN_PROGRESS;
7536 else if (prot->dmaxfer.status == 0)
7537 result->status = DMA_XFER_SUCCESS;
7538 else
7539 result->status = DMA_XFER_FAILED;
7540
7541 result->type = prot->dmaxfer.d11_lpbk;
7542 result->error_code = prot->dmaxfer.status;
7543 result->num_bytes = prot->dmaxfer.len;
7544 result->time_taken = prot->dmaxfer.time_taken;
7545 if (prot->dmaxfer.time_taken) {
7546 /* throughput in kBps */
7547 result->tput =
7548 (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
7549 (uint32)prot->dmaxfer.time_taken;
7550 }
7551
7552 return BCME_OK;
7553 }
7554
7555 /** Called in the process of submitting an ioctl to the dongle */
7556 static int
7557 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7558 {
7559 int ret = 0;
7560 uint copylen = 0;
7561
7562 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7563
7564 if (dhd->bus->is_linkdown) {
7565 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7566 __FUNCTION__));
7567 return -EIO;
7568 }
7569
7570 if (dhd->busstate == DHD_BUS_DOWN) {
7571 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7572 return -EIO;
7573 }
7574
7575 /* don't talk to the dongle if fw is about to be reloaded */
7576 if (dhd->hang_was_sent) {
7577 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7578 __FUNCTION__));
7579 return -EIO;
7580 }
7581
7582 if (cmd == WLC_GET_VAR && buf)
7583 {
7584 if (!len || !*(uint8 *)buf) {
7585 DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
7586 ret = BCME_BADARG;
7587 goto done;
7588 }
7589
7590 /* Respond "bcmerror" and "bcmerrorstr" with local cache */
7591 copylen = MIN(len, BCME_STRLEN);
7592
7593 if ((len >= strlen("bcmerrorstr")) &&
7594 (!strcmp((char *)buf, "bcmerrorstr"))) {
7595 strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
7596 *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
7597 goto done;
7598 } else if ((len >= strlen("bcmerror")) &&
7599 !strcmp((char *)buf, "bcmerror")) {
7600 *(uint32 *)(uint32 *)buf = dhd->dongle_error;
7601 goto done;
7602 }
7603 }
7604
7605 DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
7606 action, ifidx, cmd, len));
7607
7608 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7609
7610 if (ret < 0) {
7611 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7612 goto done;
7613 }
7614
7615 /* wait for IOCTL completion message from dongle and get first fragment */
7616 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7617
7618 done:
7619 return ret;
7620 }
7621
7622 void
7623 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
7624 {
7625 uint32 intstatus;
7626 dhd_prot_t *prot = dhd->prot;
7627 dhd->rxcnt_timeout++;
7628 dhd->rx_ctlerrs++;
7629 dhd->iovar_timeout_occured = TRUE;
7630 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
7631 "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
7632 dhd->is_sched_error ? " due to scheduling problem" : "",
7633 dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
7634 prot->ioctl_state, dhd->busstate, prot->ioctl_received));
7635 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7636 if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
7637 /* change g_assert_type to trigger Kernel panic */
7638 g_assert_type = 2;
7639 /* use ASSERT() to trigger panic */
7640 ASSERT(0);
7641 }
7642 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7643
7644 if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
7645 prot->curr_ioctl_cmd == WLC_GET_VAR) {
7646 char iovbuf[32];
7647 int i;
7648 int dump_size = 128;
7649 uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
7650 memset(iovbuf, 0, sizeof(iovbuf));
7651 strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
7652 iovbuf[sizeof(iovbuf) - 1] = '\0';
7653 DHD_ERROR(("Current IOVAR (%s): %s\n",
7654 prot->curr_ioctl_cmd == WLC_SET_VAR ?
7655 "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
7656 DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
7657 for (i = 0; i < dump_size; i++) {
7658 DHD_ERROR(("%02X ", ioctl_buf[i]));
7659 if ((i % 32) == 31) {
7660 DHD_ERROR(("\n"));
7661 }
7662 }
7663 DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
7664 }
7665
7666 /* Check the PCIe link status by reading intstatus register */
7667 intstatus = si_corereg(dhd->bus->sih,
7668 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7669 if (intstatus == (uint32)-1) {
7670 DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
7671 dhd->bus->is_linkdown = TRUE;
7672 }
7673
7674 dhd_bus_dump_console_buffer(dhd->bus);
7675 dhd_prot_debug_info_print(dhd);
7676 }
7677
7678 /**
7679 * Waits for IOCTL completion message from the dongle, copies this into caller
7680 * provided parameter 'buf'.
7681 */
7682 static int
7683 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
7684 {
7685 dhd_prot_t *prot = dhd->prot;
7686 int timeleft;
7687 unsigned long flags;
7688 int ret = 0;
7689
7690 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7691
7692 if (dhd_query_bus_erros(dhd)) {
7693 ret = -EIO;
7694 goto out;
7695 }
7696
7697 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7698
7699 #ifdef DHD_RECOVER_TIMEOUT
7700 if (prot->ioctl_received == 0) {
7701 uint32 intstatus = si_corereg(dhd->bus->sih,
7702 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7703 int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
7704 if ((intstatus) && (intstatus != (uint32)-1) &&
7705 (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
7706 DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
7707 " host_irq_disabled=%d\n",
7708 __FUNCTION__, intstatus, host_irq_disbled));
7709 dhd_pcie_intr_count_dump(dhd);
7710 dhd_print_tasklet_status(dhd);
7711 dhd_prot_process_ctrlbuf(dhd);
7712 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7713 /* Clear Interrupts */
7714 dhdpcie_bus_clear_intstatus(dhd->bus);
7715 }
7716 }
7717 #endif /* DHD_RECOVER_TIMEOUT */
7718
7719 if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7720 /* check if resumed on time out related to scheduling issue */
7721 dhd->is_sched_error = FALSE;
7722 if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
7723 dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
7724 }
7725
7726 dhd_msgbuf_iovar_timeout_dump(dhd);
7727
7728 #ifdef DHD_FW_COREDUMP
7729 /* Collect socram dump */
7730 if (dhd->memdump_enabled) {
7731 /* collect core dump */
7732 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
7733 dhd_bus_mem_dump(dhd);
7734 }
7735 #endif /* DHD_FW_COREDUMP */
7736
7737 #ifdef SUPPORT_LINKDOWN_RECOVERY
7738 #ifdef CONFIG_ARCH_MSM
7739 dhd->bus->no_cfg_restore = 1;
7740 #endif /* CONFIG_ARCH_MSM */
7741 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7742 ret = -ETIMEDOUT;
7743 goto out;
7744 } else {
7745 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
7746 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
7747 __FUNCTION__, prot->ioctl_received));
7748 ret = -EINVAL;
7749 goto out;
7750 }
7751 dhd->rxcnt_timeout = 0;
7752 dhd->rx_ctlpkts++;
7753 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
7754 __FUNCTION__, prot->ioctl_resplen));
7755 }
7756
7757 if (dhd->prot->ioctl_resplen > len)
7758 dhd->prot->ioctl_resplen = (uint16)len;
7759 if (buf)
7760 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
7761
7762 ret = (int)(dhd->prot->ioctl_status);
7763
7764 out:
7765 DHD_GENERAL_LOCK(dhd, flags);
7766 dhd->prot->ioctl_state = 0;
7767 dhd->prot->ioctl_resplen = 0;
7768 dhd->prot->ioctl_received = IOCTL_WAIT;
7769 dhd->prot->curr_ioctl_cmd = 0;
7770 DHD_GENERAL_UNLOCK(dhd, flags);
7771
7772 return ret;
7773 } /* dhd_msgbuf_wait_ioctl_cmplt */
7774
7775 static int
7776 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7777 {
7778 int ret = 0;
7779
7780 DHD_TRACE(("%s: Enter \n", __FUNCTION__));
7781
7782 if (dhd->bus->is_linkdown) {
7783 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7784 __FUNCTION__));
7785 return -EIO;
7786 }
7787
7788 if (dhd->busstate == DHD_BUS_DOWN) {
7789 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7790 return -EIO;
7791 }
7792
7793 /* don't talk to the dongle if fw is about to be reloaded */
7794 if (dhd->hang_was_sent) {
7795 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7796 __FUNCTION__));
7797 return -EIO;
7798 }
7799
7800 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
7801 action, ifidx, cmd, len));
7802
7803 /* Fill up msgbuf for ioctl req */
7804 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7805
7806 if (ret < 0) {
7807 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7808 goto done;
7809 }
7810
7811 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7812
7813 done:
7814 return ret;
7815 }
7816
7817 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
7818 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
7819 {
7820 return 0;
7821 }
7822
7823 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
7824 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
7825 void *params, int plen, void *arg, int len, bool set)
7826 {
7827 return BCME_UNSUPPORTED;
7828 }
7829
7830 #ifdef DHD_DUMP_PCIE_RINGS
7831 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
7832 unsigned long *file_posn, bool file_write)
7833 {
7834 dhd_prot_t *prot;
7835 msgbuf_ring_t *ring;
7836 int ret = 0;
7837 uint16 h2d_flowrings_total;
7838 uint16 flowid;
7839
7840 if (!(dhd) || !(dhd->prot)) {
7841 goto exit;
7842 }
7843 prot = dhd->prot;
7844
7845 /* Below is the same ring dump sequence followed in parser as well. */
7846 ring = &prot->h2dring_ctrl_subn;
7847 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7848 goto exit;
7849
7850 ring = &prot->h2dring_rxp_subn;
7851 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7852 goto exit;
7853
7854 ring = &prot->d2hring_ctrl_cpln;
7855 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7856 goto exit;
7857
7858 ring = &prot->d2hring_tx_cpln;
7859 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7860 goto exit;
7861
7862 ring = &prot->d2hring_rx_cpln;
7863 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7864 goto exit;
7865
7866 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7867 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7868 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7869 goto exit;
7870 }
7871 }
7872
7873 #ifdef EWP_EDL
7874 if (dhd->dongle_edl_support) {
7875 ring = prot->d2hring_edl;
7876 if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
7877 goto exit;
7878 }
7879 else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
7880 #else
7881 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7882 #endif /* EWP_EDL */
7883 {
7884 ring = prot->h2dring_info_subn;
7885 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7886 goto exit;
7887
7888 ring = prot->d2hring_info_cpln;
7889 if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7890 goto exit;
7891 }
7892
7893 exit :
7894 return ret;
7895 }
7896
7897 /* Write to file */
7898 static
7899 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
7900 const void *user_buf, unsigned long *file_posn)
7901 {
7902 int ret = 0;
7903
7904 if (ring == NULL) {
7905 DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7906 __FUNCTION__));
7907 return BCME_ERROR;
7908 }
7909 if (file) {
7910 ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
7911 ((unsigned long)(ring->max_items) * (ring->item_len)));
7912 if (ret < 0) {
7913 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7914 ret = BCME_ERROR;
7915 }
7916 } else if (user_buf) {
7917 ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
7918 ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
7919 }
7920 return ret;
7921 }
7922 #endif /* DHD_DUMP_PCIE_RINGS */
7923
7924 #ifdef EWP_EDL
7925 /* Write to file */
7926 static
7927 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
7928 unsigned long *file_posn)
7929 {
7930 int ret = 0, nitems = 0;
7931 char *buf = NULL, *ptr = NULL;
7932 uint8 *msg_addr = NULL;
7933 uint16 rd = 0;
7934
7935 if (ring == NULL) {
7936 DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7937 __FUNCTION__));
7938 ret = BCME_ERROR;
7939 goto done;
7940 }
7941
7942 buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7943 if (buf == NULL) {
7944 DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
7945 ret = BCME_ERROR;
7946 goto done;
7947 }
7948 ptr = buf;
7949
7950 for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
7951 msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
7952 memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
7953 ptr += D2HRING_EDL_HDR_SIZE;
7954 }
7955 if (file) {
7956 ret = dhd_os_write_file_posn(file, file_posn, buf,
7957 (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
7958 if (ret < 0) {
7959 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7960 goto done;
7961 }
7962 }
7963 else {
7964 ret = dhd_export_debug_data(buf, NULL, user_buf,
7965 (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
7966 }
7967
7968 done:
7969 if (buf) {
7970 MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7971 }
7972 return ret;
7973 }
7974 #endif /* EWP_EDL */
7975
7976 /** Add prot dump output to a buffer */
7977 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
7978 {
7979
7980 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
7981 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
7982 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
7983 bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
7984 else
7985 bcm_bprintf(b, "\nd2h_sync: NONE:");
7986 bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
7987 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
7988
7989 bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
7990 dhd->dma_h2d_ring_upd_support,
7991 dhd->dma_d2h_ring_upd_support,
7992 dhd->prot->rw_index_sz);
7993 bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
7994 h2d_max_txpost, dhd->prot->h2d_max_txpost);
7995 bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
7996 bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
7997 bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
7998 }
7999
8000 /* Update local copy of dongle statistics */
8001 void dhd_prot_dstats(dhd_pub_t *dhd)
8002 {
8003 return;
8004 }
8005
8006 /** Called by upper DHD layer */
8007 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
8008 uint reorder_info_len, void **pkt, uint32 *free_buf_count)
8009 {
8010 return 0;
8011 }
8012
8013 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
8014 int
8015 dhd_post_dummy_msg(dhd_pub_t *dhd)
8016 {
8017 unsigned long flags;
8018 hostevent_hdr_t *hevent = NULL;
8019 uint16 alloced = 0;
8020
8021 dhd_prot_t *prot = dhd->prot;
8022 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8023
8024 DHD_RING_LOCK(ring->ring_lock, flags);
8025
8026 hevent = (hostevent_hdr_t *)
8027 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8028
8029 if (hevent == NULL) {
8030 DHD_RING_UNLOCK(ring->ring_lock, flags);
8031 return -1;
8032 }
8033
8034 /* CMN msg header */
8035 hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8036 ring->seqnum++;
8037 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
8038 hevent->msg.if_id = 0;
8039 hevent->msg.flags = ring->current_phase;
8040
8041 /* Event payload */
8042 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
8043
8044 /* Since, we are filling the data directly into the bufptr obtained
8045 * from the msgbuf, we can directly call the write_complete
8046 */
8047 dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
8048
8049 DHD_RING_UNLOCK(ring->ring_lock, flags);
8050
8051 return 0;
8052 }
8053
8054 /**
8055 * If exactly_nitems is true, this function will allocate space for nitems or fail
8056 * If exactly_nitems is false, this function will allocate space for nitems or less
8057 */
8058 static void * BCMFASTPATH
8059 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
8060 uint16 nitems, uint16 * alloced, bool exactly_nitems)
8061 {
8062 void * ret_buf;
8063
8064 /* Alloc space for nitems in the ring */
8065 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8066
8067 if (ret_buf == NULL) {
8068 /* HWA TODO, need to get RD pointer from different array
8069 * which HWA will directly write into host memory
8070 */
8071 /* if alloc failed , invalidate cached read ptr */
8072 if (dhd->dma_d2h_ring_upd_support) {
8073 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
8074 } else {
8075 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
8076 #ifdef SUPPORT_LINKDOWN_RECOVERY
8077 /* Check if ring->rd is valid */
8078 if (ring->rd >= ring->max_items) {
8079 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
8080 dhd->bus->read_shm_fail = TRUE;
8081 return NULL;
8082 }
8083 #endif /* SUPPORT_LINKDOWN_RECOVERY */
8084 }
8085
8086 /* Try allocating once more */
8087 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8088
8089 if (ret_buf == NULL) {
8090 DHD_INFO(("%s: Ring space not available \n", ring->name));
8091 return NULL;
8092 }
8093 }
8094
8095 if (ret_buf == HOST_RING_BASE(ring)) {
8096 DHD_INFO(("%s: setting the phase now\n", ring->name));
8097 ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
8098 }
8099
8100 /* Return alloced space */
8101 return ret_buf;
8102 }
8103
8104 /**
8105 * Non inline ioct request.
8106 * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
8107 * Form a separate request buffer where a 4 byte cmn header is added in the front
8108 * buf contents from parent function is copied to remaining section of this buffer
8109 */
8110 static int
8111 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
8112 {
8113 dhd_prot_t *prot = dhd->prot;
8114 ioctl_req_msg_t *ioct_rqst;
8115 void * ioct_buf; /* For ioctl payload */
8116 uint16 rqstlen, resplen;
8117 unsigned long flags;
8118 uint16 alloced = 0;
8119 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8120
8121 if (dhd_query_bus_erros(dhd)) {
8122 return -EIO;
8123 }
8124
8125 rqstlen = len;
8126 resplen = len;
8127
8128 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
8129 /* 8K allocation of dongle buffer fails */
8130 /* dhd doesnt give separate input & output buf lens */
8131 /* so making the assumption that input length can never be more than 2k */
8132 rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
8133
8134 DHD_RING_LOCK(ring->ring_lock, flags);
8135
8136 if (prot->ioctl_state) {
8137 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
8138 DHD_RING_UNLOCK(ring->ring_lock, flags);
8139 return BCME_BUSY;
8140 } else {
8141 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
8142 }
8143
8144 /* Request for cbuf space */
8145 ioct_rqst = (ioctl_req_msg_t*)
8146 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8147 if (ioct_rqst == NULL) {
8148 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
8149 prot->ioctl_state = 0;
8150 prot->curr_ioctl_cmd = 0;
8151 prot->ioctl_received = IOCTL_WAIT;
8152 DHD_RING_UNLOCK(ring->ring_lock, flags);
8153 return -1;
8154 }
8155
8156 /* Common msg buf hdr */
8157 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
8158 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
8159 ioct_rqst->cmn_hdr.flags = ring->current_phase;
8160 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
8161 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8162 ring->seqnum++;
8163
8164 ioct_rqst->cmd = htol32(cmd);
8165 prot->curr_ioctl_cmd = cmd;
8166 ioct_rqst->output_buf_len = htol16(resplen);
8167 prot->ioctl_trans_id++;
8168 ioct_rqst->trans_id = prot->ioctl_trans_id;
8169
8170 /* populate ioctl buffer info */
8171 ioct_rqst->input_buf_len = htol16(rqstlen);
8172 ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
8173 ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
8174 /* copy ioct payload */
8175 ioct_buf = (void *) prot->ioctbuf.va;
8176
8177 prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
8178
8179 if (buf)
8180 memcpy(ioct_buf, buf, len);
8181
8182 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
8183
8184 if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
8185 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
8186
8187 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
8188 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
8189 ioct_rqst->trans_id));
8190
8191 /* update ring's WR index and ring doorbell to dongle */
8192 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
8193
8194 DHD_RING_UNLOCK(ring->ring_lock, flags);
8195
8196 return 0;
8197 } /* dhd_fillup_ioct_reqst */
8198
8199 /**
8200 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
8201 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
8202 * information is posted to the dongle.
8203 *
8204 * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
8205 * each flowring in pool of flowrings.
8206 *
8207 * returns BCME_OK=0 on success
8208 * returns non-zero negative error value on failure.
8209 */
8210 static int
8211 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
8212 uint16 max_items, uint16 item_len, uint16 ringid)
8213 {
8214 int dma_buf_alloced = BCME_NOMEM;
8215 uint32 dma_buf_len = max_items * item_len;
8216 dhd_prot_t *prot = dhd->prot;
8217 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8218 dhd_dma_buf_t *dma_buf = NULL;
8219
8220 ASSERT(ring);
8221 ASSERT(name);
8222 ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
8223
8224 /* Init name */
8225 strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
8226 ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
8227
8228 ring->idx = ringid;
8229
8230 ring->max_items = max_items;
8231 ring->item_len = item_len;
8232
8233 /* A contiguous space may be reserved for all flowrings */
8234 if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8235 /* Carve out from the contiguous DMA-able flowring buffer */
8236 uint16 flowid;
8237 uint32 base_offset;
8238
8239 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
8240 dma_buf = &ring->dma_buf;
8241
8242 flowid = DHD_RINGID_TO_FLOWID(ringid);
8243 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
8244
8245 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
8246
8247 dma_buf->len = dma_buf_len;
8248 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
8249 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
8250 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
8251
8252 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
8253 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
8254
8255 dma_buf->dmah = rsv_buf->dmah;
8256 dma_buf->secdma = rsv_buf->secdma;
8257
8258 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8259 } else {
8260 #ifdef EWP_EDL
8261 if (ring == dhd->prot->d2hring_edl) {
8262 /* For EDL ring, memory is alloced during attach,
8263 * so just need to copy the dma_buf to the ring's dma_buf
8264 */
8265 memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
8266 dma_buf = &ring->dma_buf;
8267 if (dma_buf->va == NULL) {
8268 return BCME_NOMEM;
8269 }
8270 } else
8271 #endif /* EWP_EDL */
8272 {
8273 /* Allocate a dhd_dma_buf */
8274 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
8275 if (dma_buf_alloced != BCME_OK) {
8276 return BCME_NOMEM;
8277 }
8278 }
8279 }
8280
8281 /* CAUTION: Save ring::base_addr in little endian format! */
8282 dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
8283
8284 #ifdef BCM_SECURE_DMA
8285 if (SECURE_DMA_ENAB(prot->osh)) {
8286 ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
8287 if (ring->dma_buf.secdma == NULL) {
8288 goto free_dma_buf;
8289 }
8290 }
8291 #endif /* BCM_SECURE_DMA */
8292
8293 ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
8294
8295 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
8296 "ring start %p buf phys addr %x:%x \n",
8297 ring->name, ring->max_items, ring->item_len,
8298 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8299 ltoh32(ring->base_addr.low_addr)));
8300
8301 return BCME_OK;
8302
8303 #ifdef BCM_SECURE_DMA
8304 free_dma_buf:
8305 if (dma_buf_alloced == BCME_OK) {
8306 dhd_dma_buf_free(dhd, &ring->dma_buf);
8307 }
8308 #endif /* BCM_SECURE_DMA */
8309
8310 return BCME_NOMEM;
8311
8312 } /* dhd_prot_ring_attach */
8313
8314 /**
8315 * dhd_prot_ring_init - Post the common ring information to dongle.
8316 *
8317 * Used only for common rings.
8318 *
8319 * The flowrings information is passed via the create flowring control message
8320 * (tx_flowring_create_request_t) sent over the H2D control submission common
8321 * ring.
8322 */
8323 static void
8324 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8325 {
8326 ring->wr = 0;
8327 ring->rd = 0;
8328 ring->curr_rd = 0;
8329 /* Reset hwa_db_type for all rings,
8330 * for data path rings, it will be assigned separately post init
8331 * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
8332 */
8333 ring->hwa_db_type = 0;
8334
8335 /* CAUTION: ring::base_addr already in Little Endian */
8336 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
8337 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
8338 dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
8339 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
8340 dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
8341 sizeof(uint16), RING_ITEM_LEN, ring->idx);
8342
8343 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8344 sizeof(uint16), RING_WR_UPD, ring->idx);
8345 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8346 sizeof(uint16), RING_RD_UPD, ring->idx);
8347
8348 /* ring inited */
8349 ring->inited = TRUE;
8350
8351 } /* dhd_prot_ring_init */
8352
8353 /**
8354 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
8355 * Reset WR and RD indices to 0.
8356 */
8357 static void
8358 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8359 {
8360 DHD_TRACE(("%s\n", __FUNCTION__));
8361
8362 dhd_dma_buf_reset(dhd, &ring->dma_buf);
8363
8364 ring->rd = ring->wr = 0;
8365 ring->curr_rd = 0;
8366 ring->inited = FALSE;
8367 ring->create_pending = FALSE;
8368 }
8369
8370 /**
8371 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
8372 * hanging off the msgbuf_ring.
8373 */
8374 static void
8375 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8376 {
8377 dhd_prot_t *prot = dhd->prot;
8378 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8379 ASSERT(ring);
8380
8381 ring->inited = FALSE;
8382 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
8383
8384 #ifdef BCM_SECURE_DMA
8385 if (SECURE_DMA_ENAB(prot->osh)) {
8386 if (ring->dma_buf.secdma) {
8387 SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
8388 MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
8389 ring->dma_buf.secdma = NULL;
8390 }
8391 }
8392 #endif /* BCM_SECURE_DMA */
8393
8394 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
8395 * memory, then simply stop using it.
8396 */
8397 if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8398 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8399 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
8400 } else {
8401 dhd_dma_buf_free(dhd, &ring->dma_buf);
8402 }
8403
8404 dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
8405
8406 } /* dhd_prot_ring_detach */
8407
8408 /* Fetch number of H2D flowrings given the total number of h2d rings */
8409 uint16
8410 dhd_get_max_flow_rings(dhd_pub_t *dhd)
8411 {
8412 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
8413 return dhd->bus->max_tx_flowrings;
8414 else
8415 return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
8416 }
8417
8418 /**
8419 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
8420 *
8421 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
8422 * Dongle includes common rings when it advertizes the number of H2D rings.
8423 * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
8424 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
8425 *
8426 * dhd_prot_ring_attach is invoked to perform the actual initialization and
8427 * attaching the DMA-able buffer.
8428 *
8429 * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
8430 * initialized msgbuf_ring_t object.
8431 *
8432 * returns BCME_OK=0 on success
8433 * returns non-zero negative error value on failure.
8434 */
8435 static int
8436 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
8437 {
8438 uint16 flowid;
8439 msgbuf_ring_t *ring;
8440 uint16 h2d_flowrings_total; /* exclude H2D common rings */
8441 dhd_prot_t *prot = dhd->prot;
8442 char ring_name[RING_NAME_MAX_LENGTH];
8443
8444 if (prot->h2d_flowrings_pool != NULL)
8445 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
8446
8447 ASSERT(prot->h2d_rings_total == 0);
8448
8449 /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
8450 prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
8451
8452 if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
8453 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
8454 __FUNCTION__, prot->h2d_rings_total));
8455 return BCME_ERROR;
8456 }
8457
8458 /* Subtract number of H2D common rings, to determine number of flowrings */
8459 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8460
8461 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
8462
8463 /* Allocate pool of msgbuf_ring_t objects for all flowrings */
8464 prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
8465 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8466
8467 if (prot->h2d_flowrings_pool == NULL) {
8468 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
8469 __FUNCTION__, h2d_flowrings_total));
8470 goto fail;
8471 }
8472
8473 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
8474 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8475 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
8476 if (dhd_prot_ring_attach(dhd, ring, ring_name,
8477 prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
8478 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
8479 goto attach_fail;
8480 }
8481 /*
8482 * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
8483 * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
8484 */
8485 ring->hwa_db_type = 0;
8486 }
8487
8488 return BCME_OK;
8489
8490 attach_fail:
8491 dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
8492
8493 fail:
8494 prot->h2d_rings_total = 0;
8495 return BCME_NOMEM;
8496
8497 } /* dhd_prot_flowrings_pool_attach */
8498
8499 /**
8500 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
8501 * Invokes dhd_prot_ring_reset to perform the actual reset.
8502 *
8503 * The DMA-able buffer is not freed during reset and neither is the flowring
8504 * pool freed.
8505 *
8506 * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
8507 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
8508 * from a previous flowring pool instantiation will be reused.
8509 *
8510 * This will avoid a fragmented DMA-able memory condition, if multiple
8511 * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
8512 * cycle.
8513 */
8514 static void
8515 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
8516 {
8517 uint16 flowid, h2d_flowrings_total;
8518 msgbuf_ring_t *ring;
8519 dhd_prot_t *prot = dhd->prot;
8520
8521 if (prot->h2d_flowrings_pool == NULL) {
8522 ASSERT(prot->h2d_rings_total == 0);
8523 return;
8524 }
8525 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8526 /* Reset each flowring in the flowring pool */
8527 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8528 dhd_prot_ring_reset(dhd, ring);
8529 ring->inited = FALSE;
8530 }
8531
8532 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
8533 }
8534
8535 /**
8536 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
8537 * DMA-able buffers for flowrings.
8538 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
8539 * de-initialization of each msgbuf_ring_t.
8540 */
8541 static void
8542 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
8543 {
8544 int flowid;
8545 msgbuf_ring_t *ring;
8546 uint16 h2d_flowrings_total; /* exclude H2D common rings */
8547 dhd_prot_t *prot = dhd->prot;
8548
8549 if (prot->h2d_flowrings_pool == NULL) {
8550 ASSERT(prot->h2d_rings_total == 0);
8551 return;
8552 }
8553
8554 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8555 /* Detach the DMA-able buffer for each flowring in the flowring pool */
8556 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8557 dhd_prot_ring_detach(dhd, ring);
8558 }
8559
8560 MFREE(prot->osh, prot->h2d_flowrings_pool,
8561 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8562
8563 prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
8564 prot->h2d_rings_total = 0;
8565
8566 } /* dhd_prot_flowrings_pool_detach */
8567
8568 /**
8569 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
8570 * msgbuf_ring from the flowring pool, and assign it.
8571 *
8572 * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
8573 * ring information to the dongle, a flowring's information is passed via a
8574 * flowring create control message.
8575 *
8576 * Only the ring state (WR, RD) index are initialized.
8577 */
8578 static msgbuf_ring_t *
8579 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
8580 {
8581 msgbuf_ring_t *ring;
8582 dhd_prot_t *prot = dhd->prot;
8583
8584 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8585 ASSERT(flowid < prot->h2d_rings_total);
8586 ASSERT(prot->h2d_flowrings_pool != NULL);
8587
8588 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8589
8590 /* ASSERT flow_ring->inited == FALSE */
8591
8592 ring->wr = 0;
8593 ring->rd = 0;
8594 ring->curr_rd = 0;
8595 ring->inited = TRUE;
8596 /**
8597 * Every time a flowring starts dynamically, initialize current_phase with 0
8598 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
8599 */
8600 ring->current_phase = 0;
8601 return ring;
8602 }
8603
8604 /**
8605 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
8606 * msgbuf_ring back to the flow_ring pool.
8607 */
8608 void
8609 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
8610 {
8611 msgbuf_ring_t *ring;
8612 dhd_prot_t *prot = dhd->prot;
8613
8614 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8615 ASSERT(flowid < prot->h2d_rings_total);
8616 ASSERT(prot->h2d_flowrings_pool != NULL);
8617
8618 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8619
8620 ASSERT(ring == (msgbuf_ring_t*)flow_ring);
8621 /* ASSERT flow_ring->inited == TRUE */
8622
8623 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8624
8625 ring->wr = 0;
8626 ring->rd = 0;
8627 ring->inited = FALSE;
8628
8629 ring->curr_rd = 0;
8630 }
8631
8632 /* Assumes only one index is updated at a time */
8633 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
8634 /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
8635 /* If exactly_nitems is false, this function will allocate space for nitems or less */
8636 static void *BCMFASTPATH
8637 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
8638 bool exactly_nitems)
8639 {
8640 void *ret_ptr = NULL;
8641 uint16 ring_avail_cnt;
8642
8643 ASSERT(nitems <= ring->max_items);
8644
8645 ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
8646
8647 if ((ring_avail_cnt == 0) ||
8648 (exactly_nitems && (ring_avail_cnt < nitems) &&
8649 ((ring->max_items - ring->wr) >= nitems))) {
8650 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
8651 ring->name, nitems, ring->wr, ring->rd));
8652 return NULL;
8653 }
8654 *alloced = MIN(nitems, ring_avail_cnt);
8655
8656 /* Return next available space */
8657 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
8658
8659 /* Update write index */
8660 if ((ring->wr + *alloced) == ring->max_items)
8661 ring->wr = 0;
8662 else if ((ring->wr + *alloced) < ring->max_items)
8663 ring->wr += *alloced;
8664 else {
8665 /* Should never hit this */
8666 ASSERT(0);
8667 return NULL;
8668 }
8669
8670 return ret_ptr;
8671 } /* dhd_prot_get_ring_space */
8672
8673 /**
8674 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
8675 * new messages in a H2D ring. The messages are flushed from cache prior to
8676 * posting the new WR index. The new WR index will be updated in the DMA index
8677 * array or directly in the dongle's ring state memory.
8678 * A PCIE doorbell will be generated to wake up the dongle.
8679 * This is a non-atomic function, make sure the callers
8680 * always hold appropriate locks.
8681 */
8682 static void BCMFASTPATH
8683 __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8684 uint16 nitems)
8685 {
8686 dhd_prot_t *prot = dhd->prot;
8687 uint32 db_index;
8688 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8689 uint corerev;
8690
8691 /* cache flush */
8692 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
8693
8694 /* For HWA, update db_index and ring mb2 DB and return */
8695 if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8696 db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
8697 DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
8698 __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
8699 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8700 return;
8701 }
8702
8703 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8704 dhd_prot_dma_indx_set(dhd, ring->wr,
8705 H2D_DMA_INDX_WR_UPD, ring->idx);
8706 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
8707 dhd_prot_dma_indx_set(dhd, ring->wr,
8708 H2D_IFRM_INDX_WR_UPD, ring->idx);
8709 } else {
8710 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8711 sizeof(uint16), RING_WR_UPD, ring->idx);
8712 }
8713
8714 /* raise h2d interrupt */
8715 if (IDMA_ACTIVE(dhd) ||
8716 (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
8717 db_index = IDMA_IDX0;
8718 /* this api is called in wl down path..in that case sih is freed already */
8719 if (dhd->bus->sih) {
8720 corerev = dhd->bus->sih->buscorerev;
8721 /* We need to explictly configure the type of DMA for core rev >= 24 */
8722 if (corerev >= 24) {
8723 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8724 }
8725 }
8726 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8727 } else {
8728 prot->mb_ring_fn(dhd->bus, ring->wr);
8729 }
8730 }
8731
8732 static void BCMFASTPATH
8733 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8734 uint16 nitems)
8735 {
8736 unsigned long flags_bus;
8737 DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8738 __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8739 DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8740 }
8741
8742 /**
8743 * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
8744 * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
8745 * to indicate D3_INFORM sent in the same BUS_LOCK.
8746 */
8747 static void BCMFASTPATH
8748 dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
8749 uint16 nitems, uint32 mb_data)
8750 {
8751 unsigned long flags_bus;
8752
8753 DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8754
8755 __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8756
8757 /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
8758 if (mb_data == H2D_HOST_D3_INFORM) {
8759 dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
8760 }
8761
8762 DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8763 }
8764
8765 /**
8766 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
8767 * from a D2H ring. The new RD index will be updated in the DMA Index array or
8768 * directly in dongle's ring state memory.
8769 */
8770 static void
8771 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
8772 {
8773 dhd_prot_t *prot = dhd->prot;
8774 uint32 db_index;
8775 uint corerev;
8776
8777 /* For HWA, update db_index and ring mb2 DB and return */
8778 if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8779 db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
8780 DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
8781 __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
8782 prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8783 return;
8784 }
8785
8786 /* update read index */
8787 /* If dma'ing h2d indices supported
8788 * update the r -indices in the
8789 * host memory o/w in TCM
8790 */
8791 if (IDMA_ACTIVE(dhd)) {
8792 dhd_prot_dma_indx_set(dhd, ring->rd,
8793 D2H_DMA_INDX_RD_UPD, ring->idx);
8794 db_index = IDMA_IDX1;
8795 if (dhd->bus->sih) {
8796 corerev = dhd->bus->sih->buscorerev;
8797 /* We need to explictly configure the type of DMA for core rev >= 24 */
8798 if (corerev >= 24) {
8799 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8800 }
8801 }
8802 prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8803 } else if (dhd->dma_h2d_ring_upd_support) {
8804 dhd_prot_dma_indx_set(dhd, ring->rd,
8805 D2H_DMA_INDX_RD_UPD, ring->idx);
8806 } else {
8807 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8808 sizeof(uint16), RING_RD_UPD, ring->idx);
8809 }
8810 }
8811
8812 static int
8813 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
8814 uint16 ring_type, uint32 req_id)
8815 {
8816 unsigned long flags;
8817 d2h_ring_create_req_t *d2h_ring;
8818 uint16 alloced = 0;
8819 int ret = BCME_OK;
8820 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8821 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8822
8823 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8824
8825 DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
8826
8827 if (ring_to_create == NULL) {
8828 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8829 ret = BCME_ERROR;
8830 goto err;
8831 }
8832
8833 /* Request for ring buffer space */
8834 d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
8835 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8836 &alloced, FALSE);
8837
8838 if (d2h_ring == NULL) {
8839 DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
8840 __FUNCTION__));
8841 ret = BCME_NOMEM;
8842 goto err;
8843 }
8844 ring_to_create->create_req_id = (uint16)req_id;
8845 ring_to_create->create_pending = TRUE;
8846
8847 /* Common msg buf hdr */
8848 d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
8849 d2h_ring->msg.if_id = 0;
8850 d2h_ring->msg.flags = ctrl_ring->current_phase;
8851 d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8852 d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
8853 DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
8854 ring_to_create->idx, max_h2d_rings));
8855
8856 d2h_ring->ring_type = ring_type;
8857 d2h_ring->max_items = htol16(ring_to_create->max_items);
8858 d2h_ring->len_item = htol16(ring_to_create->item_len);
8859 d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8860 d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8861
8862 d2h_ring->flags = 0;
8863 d2h_ring->msg.epoch =
8864 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8865 ctrl_ring->seqnum++;
8866 #ifdef EWP_EDL
8867 if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
8868 DHD_ERROR(("%s: sending d2h EDL ring create: "
8869 "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
8870 __FUNCTION__, ltoh16(d2h_ring->max_items),
8871 ltoh16(d2h_ring->len_item),
8872 ltoh16(d2h_ring->ring_id),
8873 d2h_ring->ring_ptr.low_addr,
8874 d2h_ring->ring_ptr.high_addr));
8875 }
8876 #endif /* EWP_EDL */
8877
8878 /* Update the flow_ring's WRITE index */
8879 dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
8880 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8881
8882 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8883
8884 return ret;
8885 err:
8886 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8887
8888 return ret;
8889 }
8890
8891 static int
8892 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
8893 {
8894 unsigned long flags;
8895 h2d_ring_create_req_t *h2d_ring;
8896 uint16 alloced = 0;
8897 uint8 i = 0;
8898 int ret = BCME_OK;
8899 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8900
8901 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8902
8903 DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
8904
8905 if (ring_to_create == NULL) {
8906 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8907 ret = BCME_ERROR;
8908 goto err;
8909 }
8910
8911 /* Request for ring buffer space */
8912 h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
8913 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8914 &alloced, FALSE);
8915
8916 if (h2d_ring == NULL) {
8917 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
8918 __FUNCTION__));
8919 ret = BCME_NOMEM;
8920 goto err;
8921 }
8922 ring_to_create->create_req_id = (uint16)id;
8923 ring_to_create->create_pending = TRUE;
8924
8925 /* Common msg buf hdr */
8926 h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
8927 h2d_ring->msg.if_id = 0;
8928 h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8929 h2d_ring->msg.flags = ctrl_ring->current_phase;
8930 h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
8931 h2d_ring->ring_type = ring_type;
8932 h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
8933 h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
8934 h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
8935 h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8936 h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8937
8938 for (i = 0; i < ring_to_create->n_completion_ids; i++) {
8939 h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
8940 }
8941
8942 h2d_ring->flags = 0;
8943 h2d_ring->msg.epoch =
8944 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8945 ctrl_ring->seqnum++;
8946
8947 /* Update the flow_ring's WRITE index */
8948 dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
8949 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8950
8951 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8952
8953 return ret;
8954 err:
8955 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8956
8957 return ret;
8958 }
8959
8960 /**
8961 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
8962 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
8963 * See dhd_prot_dma_indx_init()
8964 */
8965 void
8966 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
8967 {
8968 uint8 *ptr;
8969 uint16 offset;
8970 dhd_prot_t *prot = dhd->prot;
8971 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8972
8973 switch (type) {
8974 case H2D_DMA_INDX_WR_UPD:
8975 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
8976 offset = DHD_H2D_RING_OFFSET(ringid);
8977 break;
8978
8979 case D2H_DMA_INDX_RD_UPD:
8980 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
8981 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8982 break;
8983
8984 case H2D_IFRM_INDX_WR_UPD:
8985 ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
8986 offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
8987 break;
8988
8989 default:
8990 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
8991 __FUNCTION__));
8992 return;
8993 }
8994
8995 ASSERT(prot->rw_index_sz != 0);
8996 ptr += offset * prot->rw_index_sz;
8997
8998 *(uint16*)ptr = htol16(new_index);
8999
9000 OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
9001
9002 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9003 __FUNCTION__, new_index, type, ringid, ptr, offset));
9004
9005 } /* dhd_prot_dma_indx_set */
9006
9007 /**
9008 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
9009 * array.
9010 * Dongle DMAes an entire array to host memory (if the feature is enabled).
9011 * See dhd_prot_dma_indx_init()
9012 */
9013 static uint16
9014 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
9015 {
9016 uint8 *ptr;
9017 uint16 data;
9018 uint16 offset;
9019 dhd_prot_t *prot = dhd->prot;
9020 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
9021
9022 switch (type) {
9023 case H2D_DMA_INDX_WR_UPD:
9024 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
9025 offset = DHD_H2D_RING_OFFSET(ringid);
9026 break;
9027
9028 case H2D_DMA_INDX_RD_UPD:
9029 ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
9030 offset = DHD_H2D_RING_OFFSET(ringid);
9031 break;
9032
9033 case D2H_DMA_INDX_WR_UPD:
9034 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
9035 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9036 break;
9037
9038 case D2H_DMA_INDX_RD_UPD:
9039 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
9040 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9041 break;
9042
9043 default:
9044 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
9045 __FUNCTION__));
9046 return 0;
9047 }
9048
9049 ASSERT(prot->rw_index_sz != 0);
9050 ptr += offset * prot->rw_index_sz;
9051
9052 OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
9053
9054 data = LTOH16(*((uint16*)ptr));
9055
9056 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9057 __FUNCTION__, data, type, ringid, ptr, offset));
9058
9059 return (data);
9060
9061 } /* dhd_prot_dma_indx_get */
9062
9063 /**
9064 * An array of DMA read/write indices, containing information about host rings, can be maintained
9065 * either in host memory or in device memory, dependent on preprocessor options. This function is,
9066 * dependent on these options, called during driver initialization. It reserves and initializes
9067 * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
9068 * address of these host memory blocks are communicated to the dongle later on. By reading this host
9069 * memory, the dongle learns about the state of the host rings.
9070 */
9071
9072 static INLINE int
9073 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
9074 dhd_dma_buf_t *dma_buf, uint32 bufsz)
9075 {
9076 int rc;
9077
9078 if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
9079 return BCME_OK;
9080
9081 rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
9082
9083 return rc;
9084 }
9085
9086 int
9087 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
9088 {
9089 uint32 bufsz;
9090 dhd_prot_t *prot = dhd->prot;
9091 dhd_dma_buf_t *dma_buf;
9092
9093 if (prot == NULL) {
9094 DHD_ERROR(("prot is not inited\n"));
9095 return BCME_ERROR;
9096 }
9097
9098 /* Dongle advertizes 2B or 4B RW index size */
9099 ASSERT(rw_index_sz != 0);
9100 prot->rw_index_sz = rw_index_sz;
9101
9102 bufsz = rw_index_sz * length;
9103
9104 switch (type) {
9105 case H2D_DMA_INDX_WR_BUF:
9106 dma_buf = &prot->h2d_dma_indx_wr_buf;
9107 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9108 goto ret_no_mem;
9109 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
9110 dma_buf->len, rw_index_sz, length));
9111 break;
9112
9113 case H2D_DMA_INDX_RD_BUF:
9114 dma_buf = &prot->h2d_dma_indx_rd_buf;
9115 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9116 goto ret_no_mem;
9117 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
9118 dma_buf->len, rw_index_sz, length));
9119 break;
9120
9121 case D2H_DMA_INDX_WR_BUF:
9122 dma_buf = &prot->d2h_dma_indx_wr_buf;
9123 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9124 goto ret_no_mem;
9125 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
9126 dma_buf->len, rw_index_sz, length));
9127 break;
9128
9129 case D2H_DMA_INDX_RD_BUF:
9130 dma_buf = &prot->d2h_dma_indx_rd_buf;
9131 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9132 goto ret_no_mem;
9133 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
9134 dma_buf->len, rw_index_sz, length));
9135 break;
9136
9137 case H2D_IFRM_INDX_WR_BUF:
9138 dma_buf = &prot->h2d_ifrm_indx_wr_buf;
9139 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9140 goto ret_no_mem;
9141 DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
9142 dma_buf->len, rw_index_sz, length));
9143 break;
9144
9145 default:
9146 DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
9147 return BCME_BADOPTION;
9148 }
9149
9150 return BCME_OK;
9151
9152 ret_no_mem:
9153 DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
9154 __FUNCTION__, type, bufsz));
9155 return BCME_NOMEM;
9156
9157 } /* dhd_prot_dma_indx_init */
9158
9159 /**
9160 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
9161 * from, or NULL if there are no more messages to read.
9162 */
9163 static uint8*
9164 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
9165 {
9166 uint16 wr;
9167 uint16 rd;
9168 uint16 depth;
9169 uint16 items;
9170 void *read_addr = NULL; /* address of next msg to be read in ring */
9171 uint16 d2h_wr = 0;
9172
9173 DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
9174 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
9175 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
9176
9177 /* Remember the read index in a variable.
9178 * This is becuase ring->rd gets updated in the end of this function
9179 * So if we have to print the exact read index from which the
9180 * message is read its not possible.
9181 */
9182 ring->curr_rd = ring->rd;
9183
9184 /* update write pointer */
9185 if (dhd->dma_d2h_ring_upd_support) {
9186 /* DMAing write/read indices supported */
9187 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
9188 ring->wr = d2h_wr;
9189 } else {
9190 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
9191 }
9192
9193 wr = ring->wr;
9194 rd = ring->rd;
9195 depth = ring->max_items;
9196
9197 /* check for avail space, in number of ring items */
9198 items = READ_AVAIL_SPACE(wr, rd, depth);
9199 if (items == 0)
9200 return NULL;
9201
9202 /*
9203 * Note that there are builds where Assert translates to just printk
9204 * so, even if we had hit this condition we would never halt. Now
9205 * dhd_prot_process_msgtype can get into an big loop if this
9206 * happens.
9207 */
9208 if (items > ring->max_items) {
9209 DHD_ERROR(("\r\n======================= \r\n"));
9210 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
9211 __FUNCTION__, ring, ring->name, ring->max_items, items));
9212 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
9213 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
9214 dhd->busstate, dhd->bus->wait_for_d3_ack));
9215 DHD_ERROR(("\r\n======================= \r\n"));
9216 #ifdef SUPPORT_LINKDOWN_RECOVERY
9217 if (wr >= ring->max_items) {
9218 dhd->bus->read_shm_fail = TRUE;
9219 }
9220 #else
9221 #ifdef DHD_FW_COREDUMP
9222 if (dhd->memdump_enabled) {
9223 /* collect core dump */
9224 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
9225 dhd_bus_mem_dump(dhd);
9226
9227 }
9228 #endif /* DHD_FW_COREDUMP */
9229 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9230
9231 *available_len = 0;
9232 dhd_schedule_reset(dhd);
9233
9234 return NULL;
9235 }
9236
9237 /* if space is available, calculate address to be read */
9238 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
9239
9240 /* update read pointer */
9241 if ((ring->rd + items) >= ring->max_items)
9242 ring->rd = 0;
9243 else
9244 ring->rd += items;
9245
9246 ASSERT(ring->rd < ring->max_items);
9247
9248 /* convert items to bytes : available_len must be 32bits */
9249 *available_len = (uint32)(items * ring->item_len);
9250
9251 OSL_CACHE_INV(read_addr, *available_len);
9252
9253 /* return read address */
9254 return read_addr;
9255
9256 } /* dhd_prot_get_read_addr */
9257
9258 /**
9259 * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
9260 * make sure the callers always hold appropriate locks.
9261 */
9262 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
9263 {
9264 h2d_mailbox_data_t *h2d_mb_data;
9265 uint16 alloced = 0;
9266 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9267 unsigned long flags;
9268 int num_post = 1;
9269 int i;
9270
9271 DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
9272 __FUNCTION__, mb_data));
9273 if (!ctrl_ring->inited) {
9274 DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
9275 return BCME_ERROR;
9276 }
9277
9278 for (i = 0; i < num_post; i ++) {
9279 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9280 /* Request for ring buffer space */
9281 h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
9282 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
9283 &alloced, FALSE);
9284
9285 if (h2d_mb_data == NULL) {
9286 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
9287 __FUNCTION__));
9288 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9289 return BCME_NOMEM;
9290 }
9291
9292 memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
9293 /* Common msg buf hdr */
9294 h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
9295 h2d_mb_data->msg.flags = ctrl_ring->current_phase;
9296
9297 h2d_mb_data->msg.epoch =
9298 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9299 ctrl_ring->seqnum++;
9300
9301 /* Update flow create message */
9302 h2d_mb_data->mail_box_data = htol32(mb_data);
9303 {
9304 h2d_mb_data->mail_box_data = htol32(mb_data);
9305 }
9306
9307 DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9308
9309 /* upd wrt ptr and raise interrupt */
9310 dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
9311 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
9312
9313 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9314
9315 }
9316 return 0;
9317 }
9318
9319 /** Creates a flow ring and informs dongle of this event */
9320 int
9321 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9322 {
9323 tx_flowring_create_request_t *flow_create_rqst;
9324 msgbuf_ring_t *flow_ring;
9325 dhd_prot_t *prot = dhd->prot;
9326 unsigned long flags;
9327 uint16 alloced = 0;
9328 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9329 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
9330
9331 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9332 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
9333 if (flow_ring == NULL) {
9334 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9335 __FUNCTION__, flow_ring_node->flowid));
9336 return BCME_NOMEM;
9337 }
9338
9339 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9340
9341 /* Request for ctrl_ring buffer space */
9342 flow_create_rqst = (tx_flowring_create_request_t *)
9343 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
9344
9345 if (flow_create_rqst == NULL) {
9346 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
9347 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
9348 __FUNCTION__, flow_ring_node->flowid));
9349 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9350 return BCME_NOMEM;
9351 }
9352
9353 flow_ring_node->prot_info = (void *)flow_ring;
9354
9355 /* Common msg buf hdr */
9356 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
9357 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9358 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
9359 flow_create_rqst->msg.flags = ctrl_ring->current_phase;
9360
9361 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9362 ctrl_ring->seqnum++;
9363
9364 /* Update flow create message */
9365 flow_create_rqst->tid = flow_ring_node->flow_info.tid;
9366 flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9367 memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
9368 memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
9369 /* CAUTION: ring::base_addr already in Little Endian */
9370 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
9371 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
9372 flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
9373 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
9374 flow_create_rqst->if_flags = 0;
9375
9376 #ifdef DHD_HP2P
9377 /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
9378 /* and traffic is not multicast */
9379 /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
9380 /* Allow only one HP2P Flow active at a time */
9381 if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
9382 flow_ring_node->flow_info.tid == HP2P_PRIO &&
9383 (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
9384 !ETHER_ISMULTI(flow_create_rqst->da)) {
9385 flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
9386 flow_ring_node->hp2p_ring = TRUE;
9387 dhd->hp2p_ring_active = TRUE;
9388
9389 DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
9390 __FUNCTION__, flow_ring_node->flow_info.tid,
9391 flow_ring_node->flowid));
9392 }
9393 #endif /* DHD_HP2P */
9394
9395 /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
9396 * currently it is not used for priority. so uses solely for ifrm mask
9397 */
9398 if (IFRM_ACTIVE(dhd))
9399 flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
9400
9401 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
9402 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9403 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
9404 flow_ring_node->flow_info.ifindex));
9405
9406 /* Update the flow_ring's WRITE index */
9407 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
9408 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9409 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9410 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
9411 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9412 H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
9413 } else {
9414 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
9415 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
9416 }
9417
9418 /* update control subn ring's WR index and ring doorbell to dongle */
9419 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
9420
9421 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9422
9423 return BCME_OK;
9424 } /* dhd_prot_flow_ring_create */
9425
9426 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
9427 static void
9428 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
9429 {
9430 tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
9431
9432 DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
9433 ltoh16(flow_create_resp->cmplt.status),
9434 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
9435
9436 dhd_bus_flow_ring_create_response(dhd->bus,
9437 ltoh16(flow_create_resp->cmplt.flow_ring_id),
9438 ltoh16(flow_create_resp->cmplt.status));
9439 }
9440
9441 static void
9442 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
9443 {
9444 h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
9445 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9446 ltoh16(resp->cmplt.status),
9447 ltoh16(resp->cmplt.ring_id),
9448 ltoh32(resp->cmn_hdr.request_id)));
9449 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
9450 (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
9451 DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
9452 return;
9453 }
9454 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
9455 !dhd->prot->h2dring_info_subn->create_pending) {
9456 DHD_ERROR(("info ring create status for not pending submit ring\n"));
9457 }
9458
9459 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9460 DHD_ERROR(("info/btlog ring create failed with status %d\n",
9461 ltoh16(resp->cmplt.status)));
9462 return;
9463 }
9464 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
9465 dhd->prot->h2dring_info_subn->create_pending = FALSE;
9466 dhd->prot->h2dring_info_subn->inited = TRUE;
9467 DHD_ERROR(("info buffer post after ring create\n"));
9468 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
9469 }
9470 }
9471
9472 static void
9473 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
9474 {
9475 d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
9476 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9477 ltoh16(resp->cmplt.status),
9478 ltoh16(resp->cmplt.ring_id),
9479 ltoh32(resp->cmn_hdr.request_id)));
9480 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
9481 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
9482 #ifdef DHD_HP2P
9483 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
9484 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
9485 #endif /* DHD_HP2P */
9486 TRUE) {
9487 DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
9488 return;
9489 }
9490 if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
9491 #ifdef EWP_EDL
9492 if (!dhd->dongle_edl_support)
9493 #endif // endif
9494 {
9495 if (!dhd->prot->d2hring_info_cpln->create_pending) {
9496 DHD_ERROR(("info ring create status for not pending cpl ring\n"));
9497 return;
9498 }
9499
9500 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9501 DHD_ERROR(("info cpl ring create failed with status %d\n",
9502 ltoh16(resp->cmplt.status)));
9503 return;
9504 }
9505 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
9506 dhd->prot->d2hring_info_cpln->inited = TRUE;
9507 }
9508 #ifdef EWP_EDL
9509 else {
9510 if (!dhd->prot->d2hring_edl->create_pending) {
9511 DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
9512 return;
9513 }
9514
9515 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9516 DHD_ERROR(("edl cpl ring create failed with status %d\n",
9517 ltoh16(resp->cmplt.status)));
9518 return;
9519 }
9520 dhd->prot->d2hring_edl->create_pending = FALSE;
9521 dhd->prot->d2hring_edl->inited = TRUE;
9522 }
9523 #endif /* EWP_EDL */
9524 }
9525
9526 #ifdef DHD_HP2P
9527 if (dhd->prot->d2hring_hp2p_txcpl &&
9528 ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
9529 if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
9530 DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
9531 return;
9532 }
9533
9534 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9535 DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
9536 ltoh16(resp->cmplt.status)));
9537 return;
9538 }
9539 dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
9540 dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
9541 }
9542 if (dhd->prot->d2hring_hp2p_rxcpl &&
9543 ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
9544 if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
9545 DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
9546 return;
9547 }
9548
9549 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9550 DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
9551 ltoh16(resp->cmplt.status)));
9552 return;
9553 }
9554 dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
9555 dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
9556 }
9557 #endif /* DHD_HP2P */
9558 }
9559
9560 static void
9561 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
9562 {
9563 d2h_mailbox_data_t *d2h_data;
9564
9565 d2h_data = (d2h_mailbox_data_t *)buf;
9566 DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
9567 d2h_data->d2h_mailbox_data));
9568 dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
9569 }
9570
9571 static void
9572 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
9573 {
9574 DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
9575
9576 }
9577
9578 /** called on e.g. flow ring delete */
9579 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
9580 {
9581 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9582 dhd_prot_ring_detach(dhd, flow_ring);
9583 DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
9584 }
9585
9586 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
9587 struct bcmstrbuf *strbuf, const char * fmt)
9588 {
9589 const char *default_fmt =
9590 "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
9591 "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
9592 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9593 uint16 rd, wr;
9594 uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
9595
9596 if (fmt == NULL) {
9597 fmt = default_fmt;
9598 }
9599
9600 if (dhd->bus->is_linkdown) {
9601 DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
9602 return;
9603 }
9604
9605 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
9606 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
9607 bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
9608 ltoh32(flow_ring->base_addr.high_addr),
9609 ltoh32(flow_ring->base_addr.low_addr),
9610 flow_ring->item_len, flow_ring->max_items,
9611 dma_buf_len);
9612 }
9613
9614 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
9615 {
9616 dhd_prot_t *prot = dhd->prot;
9617 bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
9618 dhd->prot->device_ipc_version,
9619 dhd->prot->host_ipc_version,
9620 dhd->prot->active_ipc_version);
9621
9622 bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
9623 dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
9624 bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
9625 dhd->prot->max_infobufpost, dhd->prot->infobufpost);
9626 bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
9627 dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
9628 bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
9629 dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
9630 bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
9631 dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
9632
9633 bcm_bprintf(strbuf,
9634 "%14s %5s %5s %17s %17s %14s %14s %10s\n",
9635 "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
9636 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
9637 bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
9638 dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
9639 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9640 bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
9641 dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
9642 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9643 bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
9644 dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
9645 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9646 bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
9647 dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
9648 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9649 bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
9650 dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
9651 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9652 if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
9653 bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
9654 dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
9655 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9656 bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
9657 dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
9658 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9659 }
9660 if (dhd->prot->d2hring_edl != NULL) {
9661 bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
9662 dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
9663 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9664 }
9665
9666 bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
9667 OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
9668 DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
9669 DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
9670 DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
9671
9672 }
9673
9674 int
9675 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9676 {
9677 tx_flowring_delete_request_t *flow_delete_rqst;
9678 dhd_prot_t *prot = dhd->prot;
9679 unsigned long flags;
9680 uint16 alloced = 0;
9681 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9682
9683 DHD_RING_LOCK(ring->ring_lock, flags);
9684
9685 /* Request for ring buffer space */
9686 flow_delete_rqst = (tx_flowring_delete_request_t *)
9687 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9688
9689 if (flow_delete_rqst == NULL) {
9690 DHD_RING_UNLOCK(ring->ring_lock, flags);
9691 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
9692 return BCME_NOMEM;
9693 }
9694
9695 /* Common msg buf hdr */
9696 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
9697 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9698 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
9699 flow_delete_rqst->msg.flags = ring->current_phase;
9700
9701 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9702 ring->seqnum++;
9703
9704 /* Update Delete info */
9705 flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9706 flow_delete_rqst->reason = htol16(BCME_OK);
9707
9708 DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
9709 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9710 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
9711 flow_ring_node->flow_info.ifindex));
9712
9713 /* update ring's WR index and ring doorbell to dongle */
9714 dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
9715
9716 DHD_RING_UNLOCK(ring->ring_lock, flags);
9717
9718 return BCME_OK;
9719 }
9720
9721 static void BCMFASTPATH
9722 dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
9723 {
9724 flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
9725 msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9726 host_txbuf_cmpl_t txstatus;
9727 host_txbuf_post_t *txdesc;
9728 uint16 wr_idx;
9729
9730 DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
9731 __FUNCTION__, flowid, rd_idx, ring->wr));
9732
9733 memset(&txstatus, 0, sizeof(txstatus));
9734 txstatus.compl_hdr.flow_ring_id = flowid;
9735 txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
9736 wr_idx = ring->wr;
9737
9738 while (wr_idx != rd_idx) {
9739 if (wr_idx)
9740 wr_idx--;
9741 else
9742 wr_idx = ring->max_items - 1;
9743 txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
9744 (wr_idx * ring->item_len));
9745 txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
9746 dhd_prot_txstatus_process(dhd, &txstatus);
9747 }
9748 }
9749
9750 static void
9751 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
9752 {
9753 tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
9754
9755 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
9756 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
9757
9758 if (dhd->fast_delete_ring_support) {
9759 dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
9760 flow_delete_resp->read_idx);
9761 }
9762 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
9763 flow_delete_resp->cmplt.status);
9764 }
9765
9766 static void
9767 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
9768 {
9769 #ifdef IDLE_TX_FLOW_MGMT
9770 tx_idle_flowring_resume_response_t *flow_resume_resp =
9771 (tx_idle_flowring_resume_response_t *)msg;
9772
9773 DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
9774 flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
9775
9776 dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
9777 flow_resume_resp->cmplt.status);
9778 #endif /* IDLE_TX_FLOW_MGMT */
9779 }
9780
9781 static void
9782 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
9783 {
9784 #ifdef IDLE_TX_FLOW_MGMT
9785 int16 status;
9786 tx_idle_flowring_suspend_response_t *flow_suspend_resp =
9787 (tx_idle_flowring_suspend_response_t *)msg;
9788 status = flow_suspend_resp->cmplt.status;
9789
9790 DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
9791 __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
9792 status));
9793
9794 if (status != BCME_OK) {
9795
9796 DHD_ERROR(("%s Error in Suspending Flow rings!!"
9797 "Dongle will still be polling idle rings!!Status = %d \n",
9798 __FUNCTION__, status));
9799 }
9800 #endif /* IDLE_TX_FLOW_MGMT */
9801 }
9802
9803 int
9804 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9805 {
9806 tx_flowring_flush_request_t *flow_flush_rqst;
9807 dhd_prot_t *prot = dhd->prot;
9808 unsigned long flags;
9809 uint16 alloced = 0;
9810 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9811
9812 DHD_RING_LOCK(ring->ring_lock, flags);
9813
9814 /* Request for ring buffer space */
9815 flow_flush_rqst = (tx_flowring_flush_request_t *)
9816 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9817 if (flow_flush_rqst == NULL) {
9818 DHD_RING_UNLOCK(ring->ring_lock, flags);
9819 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
9820 return BCME_NOMEM;
9821 }
9822
9823 /* Common msg buf hdr */
9824 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
9825 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9826 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
9827 flow_flush_rqst->msg.flags = ring->current_phase;
9828 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9829 ring->seqnum++;
9830
9831 flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9832 flow_flush_rqst->reason = htol16(BCME_OK);
9833
9834 DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
9835
9836 /* update ring's WR index and ring doorbell to dongle */
9837 dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
9838
9839 DHD_RING_UNLOCK(ring->ring_lock, flags);
9840
9841 return BCME_OK;
9842 } /* dhd_prot_flow_ring_flush */
9843
9844 static void
9845 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
9846 {
9847 tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
9848
9849 DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
9850 flow_flush_resp->cmplt.status));
9851
9852 dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
9853 flow_flush_resp->cmplt.status);
9854 }
9855
9856 /**
9857 * Request dongle to configure soft doorbells for D2H rings. Host populated soft
9858 * doorbell information is transferred to dongle via the d2h ring config control
9859 * message.
9860 */
9861 void
9862 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
9863 {
9864 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
9865 uint16 ring_idx;
9866 uint8 *msg_next;
9867 void *msg_start;
9868 uint16 alloced = 0;
9869 unsigned long flags;
9870 dhd_prot_t *prot = dhd->prot;
9871 ring_config_req_t *ring_config_req;
9872 bcmpcie_soft_doorbell_t *soft_doorbell;
9873 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9874 const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9875
9876 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
9877 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9878 msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
9879
9880 if (msg_start == NULL) {
9881 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
9882 __FUNCTION__, d2h_rings));
9883 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9884 return;
9885 }
9886
9887 msg_next = (uint8*)msg_start;
9888
9889 for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
9890
9891 /* position the ring_config_req into the ctrl subm ring */
9892 ring_config_req = (ring_config_req_t *)msg_next;
9893
9894 /* Common msg header */
9895 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
9896 ring_config_req->msg.if_id = 0;
9897 ring_config_req->msg.flags = 0;
9898
9899 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9900 ctrl_ring->seqnum++;
9901
9902 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
9903
9904 /* Ring Config subtype and d2h ring_id */
9905 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
9906 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
9907
9908 /* Host soft doorbell configuration */
9909 soft_doorbell = &prot->soft_doorbell[ring_idx];
9910
9911 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
9912 ring_config_req->soft_doorbell.haddr.high =
9913 htol32(soft_doorbell->haddr.high);
9914 ring_config_req->soft_doorbell.haddr.low =
9915 htol32(soft_doorbell->haddr.low);
9916 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
9917 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
9918
9919 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
9920 __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
9921 ring_config_req->soft_doorbell.haddr.low,
9922 ring_config_req->soft_doorbell.value));
9923
9924 msg_next = msg_next + ctrl_ring->item_len;
9925 }
9926
9927 /* update control subn ring's WR index and ring doorbell to dongle */
9928 dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
9929
9930 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9931
9932 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
9933 }
9934
9935 static void
9936 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
9937 {
9938 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
9939 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
9940 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
9941 }
9942
9943 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9944 void
9945 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
9946 {
9947 uint32 *ext_data = dhd->extended_trap_data;
9948 hnd_ext_trap_hdr_t *hdr;
9949 const bcm_tlv_t *tlv;
9950
9951 if (ext_data == NULL) {
9952 return;
9953 }
9954 /* First word is original trap_data */
9955 ext_data++;
9956
9957 /* Followed by the extended trap data header */
9958 hdr = (hnd_ext_trap_hdr_t *)ext_data;
9959
9960 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
9961 if (tlv) {
9962 memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
9963 }
9964 }
9965 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
9966
9967 typedef struct {
9968 char name[HANG_INFO_TRAP_T_NAME_MAX];
9969 uint32 offset;
9970 } hang_info_trap_t;
9971
9972 #ifdef DHD_EWPR_VER2
9973 static hang_info_trap_t hang_info_trap_tbl[] = {
9974 {"reason", 0},
9975 {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9976 {"stype", 0},
9977 TRAP_T_NAME_OFFSET(type),
9978 TRAP_T_NAME_OFFSET(epc),
9979 {"resrvd", 0},
9980 {"resrvd", 0},
9981 {"resrvd", 0},
9982 {"resrvd", 0},
9983 {"", 0}
9984 };
9985 #else
9986 static hang_info_trap_t hang_info_trap_tbl[] = {
9987 {"reason", 0},
9988 {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
9989 {"stype", 0},
9990 TRAP_T_NAME_OFFSET(type),
9991 TRAP_T_NAME_OFFSET(epc),
9992 TRAP_T_NAME_OFFSET(cpsr),
9993 TRAP_T_NAME_OFFSET(spsr),
9994 TRAP_T_NAME_OFFSET(r0),
9995 TRAP_T_NAME_OFFSET(r1),
9996 TRAP_T_NAME_OFFSET(r2),
9997 TRAP_T_NAME_OFFSET(r3),
9998 TRAP_T_NAME_OFFSET(r4),
9999 TRAP_T_NAME_OFFSET(r5),
10000 TRAP_T_NAME_OFFSET(r6),
10001 TRAP_T_NAME_OFFSET(r7),
10002 TRAP_T_NAME_OFFSET(r8),
10003 TRAP_T_NAME_OFFSET(r9),
10004 TRAP_T_NAME_OFFSET(r10),
10005 TRAP_T_NAME_OFFSET(r11),
10006 TRAP_T_NAME_OFFSET(r12),
10007 TRAP_T_NAME_OFFSET(r13),
10008 TRAP_T_NAME_OFFSET(r14),
10009 TRAP_T_NAME_OFFSET(pc),
10010 {"", 0}
10011 };
10012 #endif /* DHD_EWPR_VER2 */
10013
10014 #define TAG_TRAP_IS_STATE(tag) \
10015 ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
10016 (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
10017 (tag == TAG_TRAP_CODE))
10018
10019 static void
10020 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
10021 int *bytes_written, int *cnt, char *cookie)
10022 {
10023 uint8 *ptr;
10024 int remain_len;
10025 int i;
10026
10027 ptr = (uint8 *)src;
10028
10029 memset(dest, 0, len);
10030 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10031
10032 /* hang reason, hang info ver */
10033 for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
10034 i++, (*cnt)++) {
10035 if (field_name) {
10036 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10037 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10038 hang_info_trap_tbl[i].name, HANG_KEY_DEL);
10039 }
10040 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10041 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10042 hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
10043
10044 }
10045
10046 if (*cnt < HANG_FIELD_CNT_MAX) {
10047 if (field_name) {
10048 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10049 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10050 "cookie", HANG_KEY_DEL);
10051 }
10052 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10053 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
10054 cookie, HANG_KEY_DEL);
10055 (*cnt)++;
10056 }
10057
10058 if (*cnt < HANG_FIELD_CNT_MAX) {
10059 if (field_name) {
10060 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10061 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10062 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
10063 HANG_KEY_DEL);
10064 }
10065 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10066 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10067 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
10068 HANG_KEY_DEL);
10069 (*cnt)++;
10070 }
10071
10072 if (*cnt < HANG_FIELD_CNT_MAX) {
10073 if (field_name) {
10074 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10075 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10076 hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
10077 HANG_KEY_DEL);
10078 }
10079 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10080 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
10081 *(uint32 *)
10082 (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
10083 HANG_KEY_DEL);
10084 (*cnt)++;
10085 }
10086 #ifdef DHD_EWPR_VER2
10087 /* put 0 for HG03 ~ HG06 (reserved for future use) */
10088 for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
10089 i++, (*cnt)++) {
10090 if (field_name) {
10091 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10092 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
10093 hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
10094 HANG_KEY_DEL);
10095 }
10096 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10097 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
10098 hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
10099 HANG_KEY_DEL);
10100 }
10101 #endif /* DHD_EWPR_VER2 */
10102 }
10103 #ifndef DHD_EWPR_VER2
10104 static void
10105 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
10106 int *bytes_written, int *cnt, char *cookie)
10107 {
10108 uint8 *ptr;
10109 int remain_len;
10110 int i;
10111
10112 ptr = (uint8 *)src;
10113
10114 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10115
10116 for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
10117 (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
10118 i++, (*cnt)++) {
10119 if (field_name) {
10120 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10121 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
10122 HANG_RAW_DEL, hang_info_trap_tbl[i].name);
10123 }
10124 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10125 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10126 HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
10127 }
10128 }
10129
10130 /* Ignore compiler warnings due to -Werror=cast-qual */
10131 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10132 #pragma GCC diagnostic push
10133 #pragma GCC diagnostic ignored "-Wcast-qual"
10134 #endif // endif
10135
10136 static void
10137 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10138 {
10139 int remain_len;
10140 int i = 0;
10141 const uint32 *stack;
10142 uint32 *ext_data = dhd->extended_trap_data;
10143 hnd_ext_trap_hdr_t *hdr;
10144 const bcm_tlv_t *tlv;
10145 int remain_stack_cnt = 0;
10146 uint32 dummy_data = 0;
10147 int bigdata_key_stack_cnt = 0;
10148
10149 if (ext_data == NULL) {
10150 return;
10151 }
10152 /* First word is original trap_data */
10153 ext_data++;
10154
10155 /* Followed by the extended trap data header */
10156 hdr = (hnd_ext_trap_hdr_t *)ext_data;
10157
10158 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10159
10160 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10161
10162 if (tlv) {
10163 stack = (const uint32 *)tlv->data;
10164
10165 *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10166 "%08x", *(uint32 *)(stack++));
10167 (*cnt)++;
10168 if (*cnt >= HANG_FIELD_CNT_MAX) {
10169 return;
10170 }
10171 for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
10172 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10173 /* Raw data for bigdata use '_' and Key data for bigdata use space */
10174 *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
10175 "%c%08x",
10176 i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
10177 *(uint32 *)(stack++));
10178
10179 (*cnt)++;
10180 if ((*cnt >= HANG_FIELD_CNT_MAX) ||
10181 (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
10182 return;
10183 }
10184 }
10185 }
10186
10187 remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
10188
10189 for (i = 0; i < remain_stack_cnt; i++) {
10190 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10191 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10192 HANG_RAW_DEL, dummy_data);
10193 (*cnt)++;
10194 if (*cnt >= HANG_FIELD_CNT_MAX) {
10195 return;
10196 }
10197 }
10198
10199 }
10200
10201 static void
10202 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10203 {
10204 int remain_len;
10205 int i;
10206 const uint32 *data;
10207 uint32 *ext_data = dhd->extended_trap_data;
10208 hnd_ext_trap_hdr_t *hdr;
10209 const bcm_tlv_t *tlv;
10210 int remain_trap_data = 0;
10211 uint8 buf_u8[sizeof(uint32)] = { 0, };
10212 const uint8 *p_u8;
10213
10214 if (ext_data == NULL) {
10215 return;
10216 }
10217 /* First word is original trap_data */
10218 ext_data++;
10219
10220 /* Followed by the extended trap data header */
10221 hdr = (hnd_ext_trap_hdr_t *)ext_data;
10222
10223 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
10224 if (tlv) {
10225 /* header include tlv hader */
10226 remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
10227 }
10228
10229 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10230 if (tlv) {
10231 /* header include tlv hader */
10232 remain_trap_data -= (tlv->len + sizeof(uint16));
10233 }
10234
10235 data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
10236
10237 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10238
10239 for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
10240 i++, (*cnt)++) {
10241 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10242 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10243 HANG_RAW_DEL, *(uint32 *)(data++));
10244 }
10245
10246 if (*cnt >= HANG_FIELD_CNT_MAX) {
10247 return;
10248 }
10249
10250 remain_trap_data -= (sizeof(uint32) * i);
10251
10252 if (remain_trap_data > sizeof(buf_u8)) {
10253 DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
10254 remain_trap_data = sizeof(buf_u8);
10255 }
10256
10257 if (remain_trap_data) {
10258 p_u8 = (const uint8 *)data;
10259 for (i = 0; i < remain_trap_data; i++) {
10260 buf_u8[i] = *(const uint8 *)(p_u8++);
10261 }
10262
10263 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10264 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
10265 HANG_RAW_DEL, ltoh32_ua(buf_u8));
10266 (*cnt)++;
10267 }
10268 }
10269 #endif /* DHD_EWPR_VER2 */
10270
10271 static void
10272 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
10273 {
10274 uint32 i;
10275 uint32 *ext_data = dhd->extended_trap_data;
10276 hnd_ext_trap_hdr_t *hdr;
10277 const bcm_tlv_t *tlv;
10278
10279 /* First word is original trap_data */
10280 ext_data++;
10281
10282 /* Followed by the extended trap data header */
10283 hdr = (hnd_ext_trap_hdr_t *)ext_data;
10284
10285 /* Dump a list of all tags found before parsing data */
10286 for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
10287 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
10288 if (tlv) {
10289 if (!TAG_TRAP_IS_STATE(i)) {
10290 *subtype = i;
10291 return;
10292 }
10293 }
10294 }
10295 }
10296 #ifdef DHD_EWPR_VER2
10297 static void
10298 copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
10299 {
10300 int remain_len;
10301 uint32 *ext_data = dhd->extended_trap_data;
10302 hnd_ext_trap_hdr_t *hdr;
10303 char *base64_out = NULL;
10304 int base64_cnt;
10305 int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
10306
10307 if (ext_data == NULL) {
10308 return;
10309 }
10310 /* First word is original trap_data */
10311 ext_data++;
10312
10313 /* Followed by the extended trap data header */
10314 hdr = (hnd_ext_trap_hdr_t *)ext_data;
10315
10316 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
10317
10318 if (remain_len <= 0) {
10319 DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
10320 return;
10321 }
10322
10323 if (remain_len < max_base64_len) {
10324 DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
10325 remain_len));
10326 max_base64_len = remain_len;
10327 }
10328
10329 base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
10330 if (base64_out == NULL) {
10331 DHD_ERROR(("%s: MALLOC failed for size %d\n",
10332 __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
10333 return;
10334 }
10335
10336 if (hdr->len > 0) {
10337 base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
10338 if (base64_cnt == 0) {
10339 DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
10340 }
10341 }
10342
10343 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
10344 base64_out);
10345 (*cnt)++;
10346 MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
10347 }
10348 #endif /* DHD_EWPR_VER2 */
10349
10350 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
10351 #pragma GCC diagnostic pop
10352 #endif // endif
10353
10354 void
10355 copy_hang_info_trap(dhd_pub_t *dhd)
10356 {
10357 trap_t tr;
10358 int bytes_written;
10359 int trap_subtype = 0;
10360
10361 if (!dhd || !dhd->hang_info) {
10362 DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
10363 dhd, (dhd ? dhd->hang_info : NULL)));
10364 return;
10365 }
10366
10367 if (!dhd->dongle_trap_occured) {
10368 DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
10369 return;
10370 }
10371
10372 memset(&tr, 0x00, sizeof(struct _trap_struct));
10373
10374 copy_ext_trap_sig(dhd, &tr);
10375 get_hang_info_trap_subtype(dhd, &trap_subtype);
10376
10377 hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
10378 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
10379
10380 bytes_written = 0;
10381 dhd->hang_info_cnt = 0;
10382 get_debug_dump_time(dhd->debug_dump_time_hang_str);
10383 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
10384
10385 copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10386 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10387
10388 DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
10389 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10390
10391 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
10392
10393 #ifdef DHD_EWPR_VER2
10394 /* stack info & trap info are included in etd data */
10395
10396 /* extended trap data dump */
10397 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10398 copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10399 DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10400 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10401 }
10402 #else
10403 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10404 copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10405 DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
10406 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10407 }
10408
10409 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10410 copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
10411 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
10412 DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
10413 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10414 }
10415
10416 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
10417 copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
10418 DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
10419 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
10420 }
10421 #endif /* DHD_EWPR_VER2 */
10422 }
10423 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10424
10425 int
10426 dhd_prot_debug_info_print(dhd_pub_t *dhd)
10427 {
10428 dhd_prot_t *prot = dhd->prot;
10429 msgbuf_ring_t *ring;
10430 uint16 rd, wr;
10431 uint32 dma_buf_len;
10432 uint64 current_time;
10433 ulong ring_tcm_rd_addr; /* dongle address */
10434 ulong ring_tcm_wr_addr; /* dongle address */
10435
10436 DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
10437 DHD_ERROR(("DHD: %s\n", dhd_version));
10438 DHD_ERROR(("Firmware: %s\n", fw_version));
10439
10440 DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
10441 DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
10442 prot->device_ipc_version,
10443 prot->host_ipc_version,
10444 prot->active_ipc_version));
10445 DHD_ERROR(("d2h_intr_method -> %s\n",
10446 dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
10447 DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
10448 prot->max_tsbufpost, prot->cur_ts_bufs_posted));
10449 DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
10450 prot->max_infobufpost, prot->infobufpost));
10451 DHD_ERROR(("max event bufs to post: %d, posted %d\n",
10452 prot->max_eventbufpost, prot->cur_event_bufs_posted));
10453 DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
10454 prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
10455 DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
10456 prot->max_rxbufpost, prot->rxbufpost));
10457 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10458 h2d_max_txpost, prot->h2d_max_txpost));
10459
10460 current_time = OSL_LOCALTIME_NS();
10461 DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
10462 DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
10463 " ioctl_ack_time="SEC_USEC_FMT
10464 " ioctl_cmplt_time="SEC_USEC_FMT"\n",
10465 GET_SEC_USEC(prot->ioctl_fillup_time),
10466 GET_SEC_USEC(prot->ioctl_ack_time),
10467 GET_SEC_USEC(prot->ioctl_cmplt_time)));
10468
10469 /* Check PCIe INT registers */
10470 if (!dhd_pcie_dump_int_regs(dhd)) {
10471 DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10472 dhd->bus->is_linkdown = TRUE;
10473 }
10474
10475 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
10476
10477 ring = &prot->h2dring_ctrl_subn;
10478 dma_buf_len = ring->max_items * ring->item_len;
10479 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10480 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10481 DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10482 "SIZE %d \r\n",
10483 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10484 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
10485 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10486 if (dhd->bus->is_linkdown) {
10487 DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
10488 " due to PCIe link down\r\n"));
10489 } else {
10490 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10491 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10492 DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10493 }
10494 DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10495
10496 ring = &prot->d2hring_ctrl_cpln;
10497 dma_buf_len = ring->max_items * ring->item_len;
10498 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10499 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10500 DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10501 "SIZE %d \r\n",
10502 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10503 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
10504 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10505 if (dhd->bus->is_linkdown) {
10506 DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
10507 " due to PCIe link down\r\n"));
10508 } else {
10509 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10510 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10511 DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10512 }
10513 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10514
10515 ring = prot->h2dring_info_subn;
10516 if (ring) {
10517 dma_buf_len = ring->max_items * ring->item_len;
10518 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10519 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10520 DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10521 "SIZE %d \r\n",
10522 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10523 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10524 dma_buf_len));
10525 DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10526 if (dhd->bus->is_linkdown) {
10527 DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
10528 " due to PCIe link down\r\n"));
10529 } else {
10530 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10531 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10532 DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10533 }
10534 DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10535 }
10536 ring = prot->d2hring_info_cpln;
10537 if (ring) {
10538 dma_buf_len = ring->max_items * ring->item_len;
10539 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10540 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10541 DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10542 "SIZE %d \r\n",
10543 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10544 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10545 dma_buf_len));
10546 DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10547 if (dhd->bus->is_linkdown) {
10548 DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
10549 " due to PCIe link down\r\n"));
10550 } else {
10551 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10552 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10553 DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10554 }
10555 DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10556 }
10557
10558 ring = &prot->d2hring_tx_cpln;
10559 if (ring) {
10560 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10561 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10562 dma_buf_len = ring->max_items * ring->item_len;
10563 DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10564 "SIZE %d \r\n",
10565 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10566 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10567 dma_buf_len));
10568 DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10569 if (dhd->bus->is_linkdown) {
10570 DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
10571 " due to PCIe link down\r\n"));
10572 } else {
10573 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10574 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10575 DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10576 }
10577 DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10578 }
10579
10580 ring = &prot->d2hring_rx_cpln;
10581 if (ring) {
10582 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10583 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10584 dma_buf_len = ring->max_items * ring->item_len;
10585 DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10586 "SIZE %d \r\n",
10587 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10588 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10589 dma_buf_len));
10590 DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10591 if (dhd->bus->is_linkdown) {
10592 DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
10593 " due to PCIe link down\r\n"));
10594 } else {
10595 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10596 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10597 DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10598 }
10599 DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10600 }
10601 #ifdef EWP_EDL
10602 ring = prot->d2hring_edl;
10603 if (ring) {
10604 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10605 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10606 dma_buf_len = ring->max_items * ring->item_len;
10607 DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10608 "SIZE %d \r\n",
10609 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10610 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10611 dma_buf_len));
10612 DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10613 if (dhd->bus->is_linkdown) {
10614 DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
10615 " due to PCIe link down\r\n"));
10616 } else {
10617 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10618 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10619 DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10620 }
10621 DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
10622 ring->seqnum % D2H_EPOCH_MODULO));
10623 }
10624 #endif /* EWP_EDL */
10625
10626 DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
10627 __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
10628 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
10629 DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
10630 __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
10631 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
10632
10633 DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
10634 DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
10635 DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
10636
10637 dhd_pcie_debug_info_dump(dhd);
10638
10639 return 0;
10640 }
10641
10642 int
10643 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10644 {
10645 uint32 *ptr;
10646 uint32 value;
10647
10648 if (dhd->prot->d2h_dma_indx_wr_buf.va) {
10649 uint32 i;
10650 uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
10651
10652 OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
10653 dhd->prot->d2h_dma_indx_wr_buf.len);
10654
10655 ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
10656
10657 bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
10658
10659 bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
10660 value = ltoh32(*ptr);
10661 bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
10662 ptr++;
10663 value = ltoh32(*ptr);
10664 bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10665
10666 ptr++;
10667 bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
10668 for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10669 value = ltoh32(*ptr);
10670 bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10671 ptr++;
10672 }
10673 }
10674
10675 if (dhd->prot->h2d_dma_indx_rd_buf.va) {
10676 OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
10677 dhd->prot->h2d_dma_indx_rd_buf.len);
10678
10679 ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
10680
10681 bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
10682 value = ltoh32(*ptr);
10683 bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
10684 ptr++;
10685 value = ltoh32(*ptr);
10686 bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
10687 ptr++;
10688 value = ltoh32(*ptr);
10689 bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10690 }
10691
10692 return 0;
10693 }
10694
10695 uint32
10696 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
10697 {
10698 dhd_prot_t *prot = dhd->prot;
10699 #if DHD_DBG_SHOW_METADATA
10700 prot->metadata_dbg = val;
10701 #endif // endif
10702 return (uint32)prot->metadata_dbg;
10703 }
10704
10705 uint32
10706 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
10707 {
10708 dhd_prot_t *prot = dhd->prot;
10709 return (uint32)prot->metadata_dbg;
10710 }
10711
10712 uint32
10713 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
10714 {
10715 dhd_prot_t *prot = dhd->prot;
10716 if (rx)
10717 prot->rx_metadata_offset = (uint16)val;
10718 else
10719 prot->tx_metadata_offset = (uint16)val;
10720 return dhd_prot_metadatalen_get(dhd, rx);
10721 }
10722
10723 uint32
10724 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
10725 {
10726 dhd_prot_t *prot = dhd->prot;
10727 if (rx)
10728 return prot->rx_metadata_offset;
10729 else
10730 return prot->tx_metadata_offset;
10731 }
10732
10733 /** optimization to write "n" tx items at a time to ring */
10734 uint32
10735 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10736 {
10737 dhd_prot_t *prot = dhd->prot;
10738 if (set)
10739 prot->txp_threshold = (uint16)val;
10740 val = prot->txp_threshold;
10741 return val;
10742 }
10743
10744 #ifdef DHD_RX_CHAINING
10745
10746 static INLINE void BCMFASTPATH
10747 dhd_rxchain_reset(rxchain_info_t *rxchain)
10748 {
10749 rxchain->pkt_count = 0;
10750 }
10751
10752 static void BCMFASTPATH
10753 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
10754 {
10755 uint8 *eh;
10756 uint8 prio;
10757 dhd_prot_t *prot = dhd->prot;
10758 rxchain_info_t *rxchain = &prot->rxchain;
10759
10760 ASSERT(!PKTISCHAINED(pkt));
10761 ASSERT(PKTCLINK(pkt) == NULL);
10762 ASSERT(PKTCGETATTR(pkt) == 0);
10763
10764 eh = PKTDATA(dhd->osh, pkt);
10765 prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
10766
10767 if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
10768 rxchain->h_da, rxchain->h_prio))) {
10769 /* Different flow - First release the existing chain */
10770 dhd_rxchain_commit(dhd);
10771 }
10772
10773 /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
10774 /* so that the chain can be handed off to CTF bridge as is. */
10775 if (rxchain->pkt_count == 0) {
10776 /* First packet in chain */
10777 rxchain->pkthead = rxchain->pkttail = pkt;
10778
10779 /* Keep a copy of ptr to ether_da, ether_sa and prio */
10780 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
10781 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
10782 rxchain->h_prio = prio;
10783 rxchain->ifidx = ifidx;
10784 rxchain->pkt_count++;
10785 } else {
10786 /* Same flow - keep chaining */
10787 PKTSETCLINK(rxchain->pkttail, pkt);
10788 rxchain->pkttail = pkt;
10789 rxchain->pkt_count++;
10790 }
10791
10792 if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
10793 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
10794 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
10795 PKTSETCHAINED(dhd->osh, pkt);
10796 PKTCINCRCNT(rxchain->pkthead);
10797 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
10798 } else {
10799 dhd_rxchain_commit(dhd);
10800 return;
10801 }
10802
10803 /* If we have hit the max chain length, dispatch the chain and reset */
10804 if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
10805 dhd_rxchain_commit(dhd);
10806 }
10807 }
10808
10809 static void BCMFASTPATH
10810 dhd_rxchain_commit(dhd_pub_t *dhd)
10811 {
10812 dhd_prot_t *prot = dhd->prot;
10813 rxchain_info_t *rxchain = &prot->rxchain;
10814
10815 if (rxchain->pkt_count == 0)
10816 return;
10817
10818 /* Release the packets to dhd_linux */
10819 dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
10820
10821 /* Reset the chain */
10822 dhd_rxchain_reset(rxchain);
10823 }
10824
10825 #endif /* DHD_RX_CHAINING */
10826
10827 #ifdef IDLE_TX_FLOW_MGMT
10828 int
10829 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
10830 {
10831 tx_idle_flowring_resume_request_t *flow_resume_rqst;
10832 msgbuf_ring_t *flow_ring;
10833 dhd_prot_t *prot = dhd->prot;
10834 unsigned long flags;
10835 uint16 alloced = 0;
10836 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10837
10838 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
10839 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
10840 if (flow_ring == NULL) {
10841 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
10842 __FUNCTION__, flow_ring_node->flowid));
10843 return BCME_NOMEM;
10844 }
10845
10846 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10847
10848 /* Request for ctrl_ring buffer space */
10849 flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
10850 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
10851
10852 if (flow_resume_rqst == NULL) {
10853 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
10854 DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
10855 __FUNCTION__, flow_ring_node->flowid));
10856 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10857 return BCME_NOMEM;
10858 }
10859
10860 flow_ring_node->prot_info = (void *)flow_ring;
10861
10862 /* Common msg buf hdr */
10863 flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
10864 flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
10865 flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
10866
10867 flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10868 ctrl_ring->seqnum++;
10869
10870 flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
10871 DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
10872 __FUNCTION__, flow_ring_node->flowid));
10873
10874 /* Update the flow_ring's WRITE index */
10875 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
10876 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10877 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
10878 } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
10879 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10880 H2D_IFRM_INDX_WR_UPD,
10881 (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
10882 } else {
10883 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
10884 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
10885 }
10886
10887 /* update control subn ring's WR index and ring doorbell to dongle */
10888 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
10889
10890 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10891
10892 return BCME_OK;
10893 } /* dhd_prot_flow_ring_create */
10894
10895 int
10896 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
10897 {
10898 tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
10899 dhd_prot_t *prot = dhd->prot;
10900 unsigned long flags;
10901 uint16 index;
10902 uint16 alloced = 0;
10903 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10904
10905 DHD_RING_LOCK(ring->ring_lock, flags);
10906
10907 /* Request for ring buffer space */
10908 flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
10909 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10910
10911 if (flow_suspend_rqst == NULL) {
10912 DHD_RING_UNLOCK(ring->ring_lock, flags);
10913 DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
10914 return BCME_NOMEM;
10915 }
10916
10917 /* Common msg buf hdr */
10918 flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
10919 /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
10920 flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
10921
10922 flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10923 ring->seqnum++;
10924
10925 /* Update flow id info */
10926 for (index = 0; index < count; index++)
10927 {
10928 flow_suspend_rqst->ring_id[index] = ringid[index];
10929 }
10930 flow_suspend_rqst->num = count;
10931
10932 DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
10933
10934 /* update ring's WR index and ring doorbell to dongle */
10935 dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
10936
10937 DHD_RING_UNLOCK(ring->ring_lock, flags);
10938
10939 return BCME_OK;
10940 }
10941 #endif /* IDLE_TX_FLOW_MGMT */
10942
10943 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
10944 {
10945 switch (tag)
10946 {
10947 case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
10948 case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
10949 case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
10950 case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
10951 case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
10952 case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
10953 case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
10954 case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
10955 case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
10956 case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
10957 case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
10958 case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
10959 case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
10960 case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
10961 case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
10962 case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
10963 case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
10964 case TAG_TRAP_LAST:
10965 default:
10966 return "Unknown";
10967 }
10968 return "Unknown";
10969 }
10970
10971 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
10972 {
10973 uint32 i;
10974 uint32 *ext_data;
10975 hnd_ext_trap_hdr_t *hdr;
10976 const bcm_tlv_t *tlv;
10977 const trap_t *tr;
10978 const uint32 *stack;
10979 const hnd_ext_trap_bp_err_t *bpe;
10980 uint32 raw_len;
10981
10982 ext_data = dhdp->extended_trap_data;
10983
10984 /* return if there is no extended trap data */
10985 if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
10986 {
10987 bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
10988 return BCME_OK;
10989 }
10990
10991 bcm_bprintf(b, "Extended trap data\n");
10992
10993 /* First word is original trap_data */
10994 bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
10995 ext_data++;
10996
10997 /* Followed by the extended trap data header */
10998 hdr = (hnd_ext_trap_hdr_t *)ext_data;
10999 bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
11000
11001 /* Dump a list of all tags found before parsing data */
11002 bcm_bprintf(b, "\nTags Found:\n");
11003 for (i = 0; i < TAG_TRAP_LAST; i++) {
11004 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
11005 if (tlv)
11006 bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
11007 }
11008
11009 if (raw)
11010 {
11011 raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
11012 for (i = 0; i < raw_len; i++)
11013 {
11014 bcm_bprintf(b, "0x%08x ", ext_data[i]);
11015 if (i % 4 == 3)
11016 bcm_bprintf(b, "\n");
11017 }
11018 return BCME_OK;
11019 }
11020
11021 /* Extract the various supported TLVs from the extended trap data */
11022 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
11023 if (tlv)
11024 {
11025 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
11026 bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
11027 }
11028
11029 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
11030 if (tlv)
11031 {
11032 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
11033 tr = (const trap_t *)tlv->data;
11034
11035 bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
11036 tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
11037 bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
11038 tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
11039 bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
11040 tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
11041 }
11042
11043 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
11044 if (tlv)
11045 {
11046 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
11047 stack = (const uint32 *)tlv->data;
11048 for (i = 0; i < (uint32)(tlv->len / 4); i++)
11049 {
11050 bcm_bprintf(b, " 0x%08x\n", *stack);
11051 stack++;
11052 }
11053 }
11054
11055 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
11056 if (tlv)
11057 {
11058 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
11059 bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
11060 bcm_bprintf(b, " error: %x\n", bpe->error);
11061 bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
11062 bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
11063 bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
11064 bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
11065 bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
11066 bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
11067 bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
11068 bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
11069 bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
11070 bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
11071 bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
11072 bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
11073 bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
11074 bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
11075 }
11076
11077 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
11078 if (tlv)
11079 {
11080 const hnd_ext_trap_heap_err_t* hme;
11081
11082 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
11083 hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
11084 bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
11085 bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
11086 bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
11087 bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
11088 bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
11089
11090 bcm_bprintf(b, " Histogram:\n");
11091 for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
11092 if (hme->heap_histogm[i] == 0xfffe)
11093 bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
11094 else if (hme->heap_histogm[i] == 0xffff)
11095 bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
11096 else
11097 bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
11098 hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
11099 * hme->heap_histogm[i + 1]);
11100 }
11101
11102 bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
11103 for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
11104 bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
11105 }
11106 }
11107
11108 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
11109 if (tlv)
11110 {
11111 const hnd_ext_trap_pcie_mem_err_t* pqme;
11112
11113 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
11114 pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
11115 bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
11116 bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
11117 }
11118
11119 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
11120 if (tlv)
11121 {
11122 const hnd_ext_trap_wlc_mem_err_t* wsme;
11123
11124 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
11125 wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
11126 bcm_bprintf(b, " instance: %d\n", wsme->instance);
11127 bcm_bprintf(b, " associated: %d\n", wsme->associated);
11128 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11129 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11130 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11131 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11132 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11133 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11134
11135 if (tlv->len >= (sizeof(*wsme) * 2)) {
11136 wsme++;
11137 bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
11138 bcm_bprintf(b, " associated: %d\n", wsme->associated);
11139 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
11140 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
11141 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
11142 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
11143 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
11144 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
11145 }
11146 }
11147
11148 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
11149 if (tlv)
11150 {
11151 const hnd_ext_trap_phydbg_t* phydbg;
11152 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
11153 phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
11154 bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
11155 bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
11156 bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
11157 bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
11158 bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
11159 bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
11160 bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
11161 bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
11162 bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
11163 bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
11164 bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
11165 bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
11166 bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
11167 bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
11168 bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
11169 bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
11170 bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
11171 bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
11172 bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
11173 bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
11174 bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
11175 bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
11176 bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
11177 bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
11178 bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
11179 bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
11180 bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
11181 for (i = 0; i < 3; i++)
11182 bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
11183 }
11184
11185 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
11186 if (tlv)
11187 {
11188 const hnd_ext_trap_psmwd_t* psmwd;
11189 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
11190 psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
11191 bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
11192 bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
11193 bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
11194 bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
11195 bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
11196 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
11197 for (i = 0; i < 3; i++)
11198 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
11199 bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
11200 bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
11201 bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
11202 bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
11203 bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
11204 bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
11205 bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
11206 bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
11207 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
11208 bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
11209 bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
11210 bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
11211 bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
11212 bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
11213 bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
11214 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
11215 bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
11216 bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
11217 bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
11218 bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
11219 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
11220 bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
11221 bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
11222 }
11223
11224 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
11225 if (tlv)
11226 {
11227 const hnd_ext_trap_macsusp_t* macsusp;
11228 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
11229 macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
11230 bcm_bprintf(b, " version: %d\n", macsusp->version);
11231 bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
11232 bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
11233 bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
11234 bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
11235 for (i = 0; i < 4; i++)
11236 bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
11237 for (i = 0; i < 8; i++)
11238 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
11239 bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
11240 bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
11241 bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
11242 bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
11243 bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
11244 bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
11245 bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
11246 bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
11247 bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
11248 bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
11249 bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
11250 bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
11251 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
11252 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
11253 }
11254
11255 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
11256 if (tlv)
11257 {
11258 const hnd_ext_trap_macenab_t* macwake;
11259 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
11260 macwake = (const hnd_ext_trap_macenab_t *)tlv;
11261 bcm_bprintf(b, " version: 0x%x\n", macwake->version);
11262 bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
11263 bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
11264 bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
11265 bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
11266 for (i = 0; i < 8; i++)
11267 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
11268 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
11269 bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
11270 bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
11271 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
11272 bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
11273 bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
11274 bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
11275 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
11276 bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
11277 bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
11278 bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
11279 bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
11280 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
11281 }
11282
11283 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
11284 if (tlv)
11285 {
11286 const bcm_dngl_pcie_hc_t* hc;
11287 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
11288 hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
11289 bcm_bprintf(b, " version: 0x%x\n", hc->version);
11290 bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
11291 bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
11292 bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
11293 bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
11294 for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
11295 bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
11296 }
11297
11298 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
11299 if (tlv)
11300 {
11301 const pcie_hmapviolation_t* hmap;
11302 hmap = (const pcie_hmapviolation_t *)tlv->data;
11303 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
11304 bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
11305 bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
11306 bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
11307 }
11308
11309 return BCME_OK;
11310 }
11311
11312 #ifdef BCMPCIE
11313 int
11314 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
11315 uint16 seqnum, uint16 xt_id)
11316 {
11317 dhd_prot_t *prot = dhdp->prot;
11318 host_timestamp_msg_t *ts_req;
11319 unsigned long flags;
11320 uint16 alloced = 0;
11321 uchar *ts_tlv_buf;
11322 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
11323
11324 if ((tlvs == NULL) || (tlv_len == 0)) {
11325 DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
11326 __FUNCTION__, tlvs, tlv_len));
11327 return -1;
11328 }
11329
11330 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11331
11332 /* if Host TS req already pending go away */
11333 if (prot->hostts_req_buf_inuse == TRUE) {
11334 DHD_ERROR(("one host TS request already pending at device\n"));
11335 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11336 return -1;
11337 }
11338
11339 /* Request for cbuf space */
11340 ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
11341 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
11342 if (ts_req == NULL) {
11343 DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
11344 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11345 return -1;
11346 }
11347
11348 /* Common msg buf hdr */
11349 ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
11350 ts_req->msg.if_id = 0;
11351 ts_req->msg.flags = ctrl_ring->current_phase;
11352 ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
11353
11354 ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11355 ctrl_ring->seqnum++;
11356
11357 ts_req->xt_id = xt_id;
11358 ts_req->seqnum = seqnum;
11359 /* populate TS req buffer info */
11360 ts_req->input_data_len = htol16(tlv_len);
11361 ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
11362 ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
11363 /* copy ioct payload */
11364 ts_tlv_buf = (void *) prot->hostts_req_buf.va;
11365 prot->hostts_req_buf_inuse = TRUE;
11366 memcpy(ts_tlv_buf, tlvs, tlv_len);
11367
11368 OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
11369
11370 if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
11371 DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
11372 }
11373
11374 DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
11375 ts_req->msg.request_id, ts_req->input_data_len,
11376 ts_req->xt_id, ts_req->seqnum));
11377
11378 /* upd wrt ptr and raise interrupt */
11379 dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
11380 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11381
11382 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11383
11384 return 0;
11385 } /* dhd_prot_send_host_timestamp */
11386
11387 bool
11388 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
11389 {
11390 if (set)
11391 dhd->prot->tx_ts_log_enabled = enable;
11392
11393 return dhd->prot->tx_ts_log_enabled;
11394 }
11395
11396 bool
11397 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
11398 {
11399 if (set)
11400 dhd->prot->rx_ts_log_enabled = enable;
11401
11402 return dhd->prot->rx_ts_log_enabled;
11403 }
11404
11405 bool
11406 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
11407 {
11408 if (set)
11409 dhd->prot->no_retry = enable;
11410
11411 return dhd->prot->no_retry;
11412 }
11413
11414 bool
11415 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
11416 {
11417 if (set)
11418 dhd->prot->no_aggr = enable;
11419
11420 return dhd->prot->no_aggr;
11421 }
11422
11423 bool
11424 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
11425 {
11426 if (set)
11427 dhd->prot->fixed_rate = enable;
11428
11429 return dhd->prot->fixed_rate;
11430 }
11431 #endif /* BCMPCIE */
11432
11433 void
11434 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
11435 {
11436 dhd_prot_t *prot = dhd->prot;
11437
11438 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
11439 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
11440 }
11441
11442 void
11443 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
11444 {
11445 if (dhd->prot->max_tsbufpost > 0)
11446 dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
11447 }
11448
11449 static void BCMFASTPATH
11450 dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
11451 {
11452 DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
11453
11454 }
11455
11456 uint16
11457 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
11458 {
11459 return dhdp->prot->ioctl_trans_id;
11460 }
11461
11462 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
11463 {
11464 if (!dhd->hscb_enable) {
11465 if (len) {
11466 /* prevent "Operation not supported" dhd message */
11467 *len = 0;
11468 return BCME_OK;
11469 }
11470 return BCME_UNSUPPORTED;
11471 }
11472
11473 if (va) {
11474 *va = dhd->prot->host_scb_buf.va;
11475 }
11476 if (len) {
11477 *len = dhd->prot->host_scb_buf.len;
11478 }
11479
11480 return BCME_OK;
11481 }
11482
11483 #ifdef DHD_BUS_MEM_ACCESS
11484 int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
11485 {
11486 if (!dhd->hscb_enable) {
11487 return BCME_UNSUPPORTED;
11488 }
11489
11490 if (dhd->prot->host_scb_buf.va == NULL ||
11491 ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
11492 return BCME_BADADDR;
11493 }
11494
11495 memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
11496
11497 return BCME_OK;
11498 }
11499 #endif /* DHD_BUS_MEM_ACCESS */
11500
11501 #ifdef DHD_HP2P
11502 uint32
11503 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11504 {
11505 if (set)
11506 dhd->pkt_thresh = (uint16)val;
11507
11508 val = dhd->pkt_thresh;
11509
11510 return val;
11511 }
11512
11513 uint32
11514 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11515 {
11516 if (set)
11517 dhd->time_thresh = (uint16)val;
11518
11519 val = dhd->time_thresh;
11520
11521 return val;
11522 }
11523
11524 uint32
11525 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
11526 {
11527 if (set)
11528 dhd->pkt_expiry = (uint16)val;
11529
11530 val = dhd->pkt_expiry;
11531
11532 return val;
11533 }
11534
11535 uint8
11536 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
11537 {
11538 uint8 ret = 0;
11539 if (set) {
11540 dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
11541 dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
11542
11543 if (enable) {
11544 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
11545 } else {
11546 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
11547 }
11548 }
11549 ret = dhd->hp2p_infra_enable ? 0x1:0x0;
11550 ret <<= 4;
11551 ret |= dhd->hp2p_enable ? 0x1:0x0;
11552
11553 return ret;
11554 }
11555
11556 static void
11557 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
11558 {
11559 ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
11560 hp2p_info_t *hp2p_info;
11561 uint32 dur1;
11562
11563 hp2p_info = &dhd->hp2p_info[0];
11564 dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
11565
11566 if (dur1 > (MAX_RX_HIST_BIN - 1)) {
11567 dur1 = MAX_RX_HIST_BIN - 1;
11568 DHD_ERROR(("%s: 0x%x 0x%x\n",
11569 __FUNCTION__, ts->low, ts->high));
11570 }
11571
11572 hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
11573 return;
11574 }
11575
11576 static void
11577 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
11578 {
11579 ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
11580 uint16 flowid = txstatus->compl_hdr.flow_ring_id;
11581 uint32 hp2p_flowid, dur1, dur2;
11582 hp2p_info_t *hp2p_info;
11583
11584 hp2p_flowid = dhd->bus->max_submission_rings -
11585 dhd->bus->max_cmn_rings - flowid + 1;
11586 hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11587 ts = (ts_timestamp_t *)&(txstatus->ts);
11588
11589 dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11590 if (dur1 > (MAX_TX_HIST_BIN - 1)) {
11591 dur1 = MAX_TX_HIST_BIN - 1;
11592 DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11593 }
11594 hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
11595
11596 dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11597 if (dur2 > (MAX_TX_HIST_BIN - 1)) {
11598 dur2 = MAX_TX_HIST_BIN - 1;
11599 DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11600 }
11601
11602 hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
11603 return;
11604 }
11605
11606 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
11607 {
11608 hp2p_info_t *hp2p_info;
11609 unsigned long flags;
11610 dhd_pub_t *dhdp;
11611
11612 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11613 #pragma GCC diagnostic push
11614 #pragma GCC diagnostic ignored "-Wcast-qual"
11615 #endif // endif
11616 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
11617 hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
11618 #else
11619 hp2p_info = container_of(timer, hp2p_info_t, timer);
11620 #endif // endif
11621 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11622 #pragma GCC diagnostic pop
11623 #endif // endif
11624 dhdp = hp2p_info->dhd_pub;
11625 if (!dhdp) {
11626 goto done;
11627 }
11628
11629 DHD_INFO(("%s: pend_item = %d flowid = %d\n",
11630 __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
11631 hp2p_info->flowid));
11632
11633 flags = dhd_os_hp2plock(dhdp);
11634
11635 dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
11636 hp2p_info->hrtimer_init = FALSE;
11637 hp2p_info->num_timer_limit++;
11638
11639 dhd_os_hp2punlock(dhdp, flags);
11640 done:
11641 return HRTIMER_NORESTART;
11642 }
11643
11644 static void
11645 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
11646 {
11647 hp2p_info_t *hp2p_info;
11648 uint16 hp2p_flowid;
11649
11650 hp2p_flowid = dhd->bus->max_submission_rings -
11651 dhd->bus->max_cmn_rings - flowid + 1;
11652 hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11653
11654 if (ring->pend_items_count == dhd->pkt_thresh) {
11655 dhd_prot_txdata_write_flush(dhd, flowid);
11656
11657 hp2p_info->hrtimer_init = FALSE;
11658 hp2p_info->ring = NULL;
11659 hp2p_info->num_pkt_limit++;
11660 #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11661 tasklet_hrtimer_cancel(&hp2p_info->timer);
11662 #else
11663 hrtimer_cancel(&hp2p_info->timer);
11664 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11665 DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
11666 "hp2p_flowid = %d pkt_thresh = %d\n",
11667 __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
11668 } else {
11669 if (hp2p_info->hrtimer_init == FALSE) {
11670 hp2p_info->hrtimer_init = TRUE;
11671 hp2p_info->flowid = flowid;
11672 hp2p_info->dhd_pub = dhd;
11673 hp2p_info->ring = ring;
11674 hp2p_info->num_timer_start++;
11675 #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
11676 tasklet_hrtimer_start(&hp2p_info->timer,
11677 ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
11678 #else
11679 hrtimer_start(&hp2p_info->timer,
11680 ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
11681 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
11682
11683 DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
11684 __FUNCTION__, flowid, hp2p_flowid));
11685 }
11686 }
11687 return;
11688 }
11689
11690 static void
11691 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
11692 {
11693 uint64 ts;
11694
11695 ts = local_clock();
11696 do_div(ts, 1000);
11697
11698 txdesc->metadata_buf_len = 0;
11699 txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
11700 txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
11701 txdesc->exp_time = dhd->pkt_expiry;
11702
11703 DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
11704 __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
11705 txdesc->metadata_buf_addr.low_addr,
11706 txdesc->exp_time));
11707
11708 return;
11709 }
11710 #endif /* DHD_HP2P */
11711
11712 #ifdef DHD_MAP_LOGGING
11713 void
11714 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
11715 {
11716 dhd_prot_debug_info_print(dhdp);
11717 OSL_DMA_MAP_DUMP(dhdp->osh);
11718 #ifdef DHD_MAP_PKTID_LOGGING
11719 dhd_pktid_logging_dump(dhdp);
11720 #endif /* DHD_MAP_PKTID_LOGGING */
11721 #ifdef DHD_FW_COREDUMP
11722 dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
11723 #ifdef DNGL_AXI_ERROR_LOGGING
11724 dhdp->memdump_enabled = DUMP_MEMFILE;
11725 dhd_bus_get_mem_dump(dhdp);
11726 #else
11727 dhdp->memdump_enabled = DUMP_MEMONLY;
11728 dhd_bus_mem_dump(dhdp);
11729 #endif /* DNGL_AXI_ERROR_LOGGING */
11730 #endif /* DHD_FW_COREDUMP */
11731 }
11732 #endif /* DHD_MAP_LOGGING */
11733