Lines Matching +full:db0 +full:- +full:db7
23 * <<Broadcom-WL-IPTag/Open:>>
126 #define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring) (0xDD000000 | (ring->idx << 16u) | ring->rd)
128 #define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring) (0xFF000000 | (ring->idx << 16u) | ring->wr)
142 #define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
146 * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
147 * - in ensuring that a buffer's va is 4 Byte aligned
148 * - in rounding up a buffer length to 4 Bytes.
220 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
268 * +----------------------------------------------------------------------------
274 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
289 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
309 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
312 * +----------------------------------------------------------------------------
325 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
329 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
342 ((ringid) - BCMPCIE_COMMON_MSGRINGS)
352 ((ringid) - max_h2d_rings) : \
353 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
373 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
375 * following the DMA-able buffer, data corruption may occur if the DMA-able
386 * +----------------------------------------------------------------------------
391 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
392 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
393 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
395 * Each DMA-able buffer may be allocated independently, or may be carved out
405 * allocated and a DMA-able buffer (carved or allocated) is attached.
413 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
419 * state as-if upon an attach. All DMA-able buffers are retained.
422 * will avoid the case of a fragmented DMA-able region.
424 * +----------------------------------------------------------------------------
429 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
433 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
470 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
489 dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
511 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
514 (((ring)->max_items - 1) * (ring)->item_len))
646 /* DMA-able arrays for holding WR and RD indices */
787 /* APIs for managing a DMA-able buffer */
800 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
1014 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
1015 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
1016 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
1017 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
1019 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
1020 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
1042 return &dhd->prot->fw_trap_buf; in dhd_prot_get_minidump_buf()
1049 return dhd->prot->rxbufpost_sz; in dhd_prot_get_rxbufpost_sz()
1055 dhd_prot_t *prot = dhd->prot; in dhd_prot_get_h2d_rx_post_active()
1056 msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn; in dhd_prot_get_h2d_rx_post_active()
1060 wr = flow_ring->wr; in dhd_prot_get_h2d_rx_post_active()
1062 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_get_h2d_rx_post_active()
1063 rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx); in dhd_prot_get_h2d_rx_post_active()
1065 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); in dhd_prot_get_h2d_rx_post_active()
1067 return NTXPACTIVE(rd, wr, flow_ring->max_items); in dhd_prot_get_h2d_rx_post_active()
1073 dhd_prot_t *prot = dhd->prot; in dhd_prot_get_d2h_rx_cpln_active()
1074 msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln; in dhd_prot_get_d2h_rx_cpln_active()
1077 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_get_d2h_rx_cpln_active()
1078 wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx); in dhd_prot_get_d2h_rx_cpln_active()
1080 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); in dhd_prot_get_d2h_rx_cpln_active()
1084 rd = flow_ring->rd; in dhd_prot_get_d2h_rx_cpln_active()
1086 return NTXPACTIVE(rd, wr, flow_ring->max_items); in dhd_prot_get_d2h_rx_cpln_active()
1096 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_is_cmpl_ring_empty()
1097 wr = flow_ring->wr; in dhd_prot_is_cmpl_ring_empty()
1099 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); in dhd_prot_is_cmpl_ring_empty()
1101 if (dhd->dma_h2d_ring_upd_support) { in dhd_prot_is_cmpl_ring_empty()
1102 rd = flow_ring->rd; in dhd_prot_is_cmpl_ring_empty()
1104 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); in dhd_prot_is_cmpl_ring_empty()
1115 ring->curr_rd, ring->rd, ring->wr)); in dhd_prot_dump_ring_ptrs()
1164 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
1176 uint32 ring_seqnum = ring->seqnum; in dhd_prot_d2h_sync_livelock()
1185 dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, in dhd_prot_d2h_sync_livelock()
1186 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, in dhd_prot_d2h_sync_livelock()
1187 ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr)); in dhd_prot_d2h_sync_livelock()
1199 __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); in dhd_prot_d2h_sync_livelock()
1203 dhd_bus_dump_console_buffer(dhd->bus); in dhd_prot_d2h_sync_livelock()
1207 if (dhd->memdump_enabled) { in dhd_prot_d2h_sync_livelock()
1209 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; in dhd_prot_d2h_sync_livelock()
1220 dhd->bus->no_cfg_restore = 1; in dhd_prot_d2h_sync_livelock()
1223 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; in dhd_prot_d2h_sync_livelock()
1227 dhd->livelock_occured = TRUE; in dhd_prot_d2h_sync_livelock()
1231 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
1239 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; in BCMFASTPATH()
1241 volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */ in BCMFASTPATH()
1242 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
1248 ASSERT(msglen == ring->item_len); in BCMFASTPATH()
1257 * Case 1 - Apart from Host CPU some other bus master is in BCMFASTPATH()
1260 * Solution - Increase the number of tries. in BCMFASTPATH()
1262 * Case 2 - The 50usec delay given by the Host CPU is not in BCMFASTPATH()
1274 ring->seqnum++; /* next expected sequence number */ in BCMFASTPATH()
1279 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { in BCMFASTPATH()
1284 total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries); in BCMFASTPATH()
1286 if (total_tries > prot->d2h_sync_wait_max) in BCMFASTPATH()
1287 prot->d2h_sync_wait_max = total_tries; in BCMFASTPATH()
1299 ring->seqnum++; /* skip this message ... leak of a pktid */ in BCMFASTPATH()
1300 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ in BCMFASTPATH()
1304 prot->d2h_sync_wait_tot += tries; in BCMFASTPATH()
1305 return msg->msg_type; in BCMFASTPATH()
1309 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1320 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; in BCMFASTPATH()
1321 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
1326 ASSERT(msglen == ring->item_len); in BCMFASTPATH()
1335 * Case 1 - Apart from Host CPU some other bus master is in BCMFASTPATH()
1338 * Solution - Increase the number of tries. in BCMFASTPATH()
1340 * Case 2 - The 50usec delay given by the Host CPU is not in BCMFASTPATH()
1355 if (msg->epoch == ring_seqnum) { in BCMFASTPATH()
1359 ring->seqnum++; /* next expected sequence number */ in BCMFASTPATH()
1365 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { in BCMFASTPATH()
1371 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; in BCMFASTPATH()
1373 if (total_tries > prot->d2h_sync_wait_max) in BCMFASTPATH()
1374 prot->d2h_sync_wait_max = total_tries; in BCMFASTPATH()
1384 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, in BCMFASTPATH()
1387 ring->seqnum++; /* skip this message ... leak of a pktid */ in BCMFASTPATH()
1388 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ in BCMFASTPATH()
1392 prot->d2h_sync_wait_tot += tries; in BCMFASTPATH()
1393 return msg->msg_type; in BCMFASTPATH()
1397 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1409 if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { in BCMFASTPATH()
1413 return msg->msg_type; in BCMFASTPATH()
1419 * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1427 * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1437 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; in BCMFASTPATH()
1438 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
1453 * Case 1 - Apart from Host CPU some other bus master is in BCMFASTPATH()
1456 * Solution - Increase the number of tries. in BCMFASTPATH()
1458 * Case 2 - The 50usec delay given by the Host CPU is not in BCMFASTPATH()
1474 if (msg->epoch == ring_seqnum && in BCMFASTPATH()
1475 msg->msg_type == MSG_TYPE_INFO_PYLD && in BCMFASTPATH()
1476 msg->request_id > 0 && in BCMFASTPATH()
1477 msg->request_id <= ring->item_len) { in BCMFASTPATH()
1480 msglen = sizeof(cmn_msg_hdr_t) + msg->request_id; in BCMFASTPATH()
1482 if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) { in BCMFASTPATH()
1484 valid_msg = (trailer->epoch == ring_seqnum) && in BCMFASTPATH()
1485 (trailer->msg_type == msg->msg_type) && in BCMFASTPATH()
1486 (trailer->request_id == msg->request_id); in BCMFASTPATH()
1490 __FUNCTION__, trailer->epoch, trailer->request_id, in BCMFASTPATH()
1491 msg->epoch, msg->request_id)); in BCMFASTPATH()
1495 __FUNCTION__, msg->request_id)); in BCMFASTPATH()
1500 ring->seqnum++; /* next expected sequence number */ in BCMFASTPATH()
1501 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) { in BCMFASTPATH()
1509 __FUNCTION__, ring_seqnum, msg->epoch, in BCMFASTPATH()
1510 msg->msg_type, msg->request_id)); in BCMFASTPATH()
1513 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; in BCMFASTPATH()
1515 if (total_tries > prot->d2h_sync_wait_max) in BCMFASTPATH()
1516 prot->d2h_sync_wait_max = total_tries; in BCMFASTPATH()
1528 DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u" in BCMFASTPATH()
1529 " msgtype=0x%x; expected-msgtype=0x%x" in BCMFASTPATH()
1530 " length=%u; expected-max-length=%u", __FUNCTION__, in BCMFASTPATH()
1531 msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD, in BCMFASTPATH()
1532 msg->request_id, ring->item_len)); in BCMFASTPATH()
1535 (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) { in BCMFASTPATH()
1536 DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u" in BCMFASTPATH()
1537 " msgtype=0x%x; expected-msgtype=0x%x" in BCMFASTPATH()
1538 " length=%u; expected-length=%u", __FUNCTION__, in BCMFASTPATH()
1539 trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD, in BCMFASTPATH()
1540 trailer->request_id, msg->request_id)); in BCMFASTPATH()
1545 if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) in BCMFASTPATH()
1548 len = ring->item_len; in BCMFASTPATH()
1550 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, in BCMFASTPATH()
1553 ring->seqnum++; /* skip this message */ in BCMFASTPATH()
1554 return BCME_ERROR; /* invalid msg_type 0 -> noop callback */ in BCMFASTPATH()
1558 msg->epoch, msg->request_id)); in BCMFASTPATH()
1560 prot->d2h_sync_wait_tot += tries; in BCMFASTPATH()
1565 * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1577 if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) { in BCMFASTPATH()
1581 if (msg->msg_type == MSG_TYPE_INFO_PYLD) in BCMFASTPATH()
1584 return msg->msg_type; in BCMFASTPATH()
1594 dhd->prot->ioctl_received = reason; in dhd_wakeup_ioctl_event()
1601 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1607 dhd_prot_t *prot = dhd->prot; in dhd_prot_d2h_sync_init()
1608 prot->d2h_sync_wait_max = 0UL; in dhd_prot_d2h_sync_init()
1609 prot->d2h_sync_wait_tot = 0UL; in dhd_prot_d2h_sync_init()
1611 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_d2h_sync_init()
1612 prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_d2h_sync_init()
1614 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_d2h_sync_init()
1615 prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_d2h_sync_init()
1617 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_d2h_sync_init()
1618 prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_d2h_sync_init()
1620 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { in dhd_prot_d2h_sync_init()
1621 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; in dhd_prot_d2h_sync_init()
1623 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; in dhd_prot_d2h_sync_init()
1626 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { in dhd_prot_d2h_sync_init()
1627 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; in dhd_prot_d2h_sync_init()
1629 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl; in dhd_prot_d2h_sync_init()
1633 prot->d2h_sync_cb = dhd_prot_d2h_sync_none; in dhd_prot_d2h_sync_init()
1635 prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none; in dhd_prot_d2h_sync_init()
1642 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1647 dhd_prot_t *prot = dhd->prot; in dhd_prot_h2d_sync_init()
1648 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; in dhd_prot_h2d_sync_init()
1650 prot->h2dring_rxp_subn.current_phase = 0; in dhd_prot_h2d_sync_init()
1652 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; in dhd_prot_h2d_sync_init()
1653 prot->h2dring_ctrl_subn.current_phase = 0; in dhd_prot_h2d_sync_init()
1656 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
1659 * +---------------------------------------------------------------------------+
1660 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1663 * +---------------------------------------------------------------------------+
1669 base_addr->low_addr = htol32(PHYSADDRLO(pa)); in dhd_base_addr_htolpa()
1670 base_addr->high_addr = htol32(PHYSADDRHI(pa)); in dhd_base_addr_htolpa()
1674 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1681 pa_lowaddr = PHYSADDRLO(dma_buf->pa); in dhd_dma_buf_audit()
1682 ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa)); in dhd_dma_buf_audit()
1684 ASSERT(dma_buf->len != 0); in dhd_dma_buf_audit()
1686 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ in dhd_dma_buf_audit()
1687 end = (pa_lowaddr + dma_buf->len); /* end address */ in dhd_dma_buf_audit()
1691 __FUNCTION__, pa_lowaddr, dma_buf->len)); in dhd_dma_buf_audit()
1699 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1701 * returns non-zero negative error value on failure.
1707 osl_t *osh = dhd->osh; in dhd_dma_buf_alloc()
1712 ASSERT(dma_buf->va == NULL); in dhd_dma_buf_alloc()
1713 ASSERT(dma_buf->len == 0); in dhd_dma_buf_alloc()
1717 dma_pad = rem ? (DHD_DMA_PAD - rem) : 0; in dhd_dma_buf_alloc()
1719 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, in dhd_dma_buf_alloc()
1720 dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); in dhd_dma_buf_alloc()
1722 if (dma_buf->va == NULL) { in dhd_dma_buf_alloc()
1728 dma_buf->len = buf_len; /* not including padded len */ in dhd_dma_buf_alloc()
1741 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1746 if ((dma_buf == NULL) || (dma_buf->va == NULL)) in dhd_dma_buf_reset()
1752 memset((void*)dma_buf->va, 0, dma_buf->len); in dhd_dma_buf_reset()
1753 OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); in dhd_dma_buf_reset()
1765 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1771 osl_t *osh = dhd->osh; in dhd_dma_buf_free()
1775 if (dma_buf->va == NULL) in dhd_dma_buf_free()
1782 DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, in dhd_dma_buf_free()
1783 dma_buf->pa, dma_buf->dmah); in dhd_dma_buf_free()
1789 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1799 dma_buf->va = va; in dhd_dma_buf_init()
1800 dma_buf->len = len; in dhd_dma_buf_init()
1801 dma_buf->pa = pa; in dhd_dma_buf_init()
1802 dma_buf->dmah = dmah; in dhd_dma_buf_init()
1803 dma_buf->secdma = secdma; in dhd_dma_buf_init()
1809 /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1812 * +---------------------------------------------------------------------------+
1816 * +---------------------------------------------------------------------------+
1855 log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size); in dhd_pktid_logging_init()
1862 log->items = num_items; in dhd_pktid_logging_init()
1863 log->index = 0; in dhd_pktid_logging_init()
1880 log_size = DHD_PKTID_LOG_SZ(log->items); in dhd_pktid_logging_fini()
1881 MFREE(dhd->osh, handle, log_size); in dhd_pktid_logging_fini()
1897 idx = log->index; in dhd_pktid_logging()
1898 log->map[idx].ts_nsec = OSL_LOCALTIME_NS(); in dhd_pktid_logging()
1899 log->map[idx].pa = pa; in dhd_pktid_logging()
1900 log->map[idx].pktid = pktid; in dhd_pktid_logging()
1901 log->map[idx].size = len; in dhd_pktid_logging()
1902 log->map[idx].pkttype = pkttype; in dhd_pktid_logging()
1903 log->index = (idx + 1) % (log->items); /* update index */ in dhd_pktid_logging()
1909 dhd_prot_t *prot = dhd->prot; in dhd_pktid_logging_dump()
1918 map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map); in dhd_pktid_logging_dump()
1919 unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap); in dhd_pktid_logging_dump()
1924 map_log->index, unmap_log->index, in dhd_pktid_logging_dump()
1928 (uint64)__virt_to_phys((ulong)(map_log->map)), in dhd_pktid_logging_dump()
1929 (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items), in dhd_pktid_logging_dump()
1930 (uint64)__virt_to_phys((ulong)(unmap_log->map)), in dhd_pktid_logging_dump()
1931 (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items))); in dhd_pktid_logging_dump()
1936 /* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1939 * +---------------------------------------------------------------------------+
1946 * +---------------------------------------------------------------------------+
1957 #define MAX_TX_PKTID ((36 * 1024) - 1) /* Extend for 64 clients support. */
1997 ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
2226 dhd_prot_t *prot = dhd->prot; in dhd_get_pktid_map_type()
2229 if (pktid_map == prot->pktid_ctrl_map) { in dhd_get_pktid_map_type()
2231 } else if (pktid_map == prot->pktid_tx_map) { in dhd_get_pktid_map_type()
2233 } else if (pktid_map == prot->pktid_rx_map) { in dhd_get_pktid_map_type()
2243 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
2260 flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); in __dhd_pktid_audit()
2262 handle = pktid_map->pktid_audit; in __dhd_pktid_audit()
2274 if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) { in __dhd_pktid_audit()
2325 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); in __dhd_pktid_audit()
2328 dhd->pktid_audit_failed = TRUE; in __dhd_pktid_audit()
2384 * +---------------------------------------------------------------------------+
2400 * +---------------------------------------------------------------------------+
2414 osh = dhd->osh; in dhd_pktid_map_init()
2425 map->items = num_items; in dhd_pktid_map_init()
2426 map->avail = num_items; in dhd_pktid_map_init()
2428 map_items = DHD_PKIDMAP_ITEMS(map->items); in dhd_pktid_map_init()
2430 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); in dhd_pktid_map_init()
2433 map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); in dhd_pktid_map_init()
2434 if (map->pktid_lock == NULL) { in dhd_pktid_map_init()
2439 map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz); in dhd_pktid_map_init()
2440 if (map->keys == NULL) { in dhd_pktid_map_init()
2441 DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", in dhd_pktid_map_init()
2448 map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); in dhd_pktid_map_init()
2449 if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { in dhd_pktid_map_init()
2456 map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); in dhd_pktid_map_init()
2460 map->keys[nkey] = nkey; /* populate with unique keys */ in dhd_pktid_map_init()
2461 map->lockers[nkey].state = LOCKER_IS_FREE; in dhd_pktid_map_init()
2462 map->lockers[nkey].pkt = NULL; /* bzero: redundant */ in dhd_pktid_map_init()
2463 map->lockers[nkey].len = 0; in dhd_pktid_map_init()
2467 map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */ in dhd_pktid_map_init()
2468 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ in dhd_pktid_map_init()
2469 map->lockers[DHD_PKTID_INVALID].len = 0; in dhd_pktid_map_init()
2473 bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); in dhd_pktid_map_init()
2481 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { in dhd_pktid_map_init()
2482 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ in dhd_pktid_map_init()
2483 map->pktid_audit = (struct bcm_mwbmap *)NULL; in dhd_pktid_map_init()
2484 if (map->pktid_audit_lock) in dhd_pktid_map_init()
2485 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); in dhd_pktid_map_init()
2489 if (map->keys) { in dhd_pktid_map_init()
2490 MFREE(osh, map->keys, map_keys_sz); in dhd_pktid_map_init()
2493 if (map->pktid_lock) { in dhd_pktid_map_init()
2494 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); in dhd_pktid_map_init()
2519 DHD_PKTID_LOCK(map->pktid_lock, flags); in dhd_pktid_map_reset()
2520 osh = dhd->osh; in dhd_pktid_map_reset()
2522 map_items = DHD_PKIDMAP_ITEMS(map->items); in dhd_pktid_map_reset()
2526 if (map->lockers[nkey].state == LOCKER_IS_BUSY) { in dhd_pktid_map_reset()
2527 locker = &map->lockers[nkey]; in dhd_pktid_map_reset()
2528 locker->state = LOCKER_IS_FREE; in dhd_pktid_map_reset()
2529 data_tx = (locker->pkttype == PKTTYPE_DATA_TX); in dhd_pktid_map_reset()
2531 OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count); in dhd_pktid_map_reset()
2538 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, in dhd_pktid_map_reset()
2539 locker->pa, nkey, locker->len, in dhd_pktid_map_reset()
2540 locker->pkttype); in dhd_pktid_map_reset()
2543 DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah); in dhd_pktid_map_reset()
2544 dhd_prot_packet_free(dhd, (ulong*)locker->pkt, in dhd_pktid_map_reset()
2545 locker->pkttype, data_tx); in dhd_pktid_map_reset()
2552 map->keys[nkey] = nkey; /* populate with unique keys */ in dhd_pktid_map_reset()
2555 map->avail = map_items; in dhd_pktid_map_reset()
2556 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); in dhd_pktid_map_reset()
2557 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_reset()
2572 DHD_PKTID_LOCK(map->pktid_lock, flags); in dhd_pktid_map_reset_ioctl()
2574 map_items = DHD_PKIDMAP_ITEMS(map->items); in dhd_pktid_map_reset_ioctl()
2577 if (map->lockers[nkey].state == LOCKER_IS_BUSY) { in dhd_pktid_map_reset_ioctl()
2584 locker = &map->lockers[nkey]; in dhd_pktid_map_reset_ioctl()
2585 retbuf.va = locker->pkt; in dhd_pktid_map_reset_ioctl()
2586 retbuf.len = locker->len; in dhd_pktid_map_reset_ioctl()
2587 retbuf.pa = locker->pa; in dhd_pktid_map_reset_ioctl()
2588 retbuf.dmah = locker->dmah; in dhd_pktid_map_reset_ioctl()
2589 retbuf.secdma = locker->secdma; in dhd_pktid_map_reset_ioctl()
2598 map->keys[nkey] = nkey; /* populate with unique keys */ in dhd_pktid_map_reset_ioctl()
2601 map->avail = map_items; in dhd_pktid_map_reset_ioctl()
2602 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); in dhd_pktid_map_reset_ioctl()
2603 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_reset_ioctl()
2624 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); in dhd_pktid_map_fini()
2625 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); in dhd_pktid_map_fini()
2627 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); in dhd_pktid_map_fini()
2630 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { in dhd_pktid_map_fini()
2631 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ in dhd_pktid_map_fini()
2632 map->pktid_audit = (struct bcm_mwbmap *)NULL; in dhd_pktid_map_fini()
2633 if (map->pktid_audit_lock) { in dhd_pktid_map_fini()
2634 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); in dhd_pktid_map_fini()
2638 MFREE(dhd->osh, map->keys, map_keys_sz); in dhd_pktid_map_fini()
2639 VMFREE(dhd->osh, handle, dhd_pktid_map_sz); in dhd_pktid_map_fini()
2657 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); in dhd_pktid_map_fini_ioctl()
2658 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); in dhd_pktid_map_fini_ioctl()
2660 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); in dhd_pktid_map_fini_ioctl()
2663 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { in dhd_pktid_map_fini_ioctl()
2664 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ in dhd_pktid_map_fini_ioctl()
2665 map->pktid_audit = (struct bcm_mwbmap *)NULL; in dhd_pktid_map_fini_ioctl()
2666 if (map->pktid_audit_lock) { in dhd_pktid_map_fini_ioctl()
2667 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); in dhd_pktid_map_fini_ioctl()
2672 MFREE(dhd->osh, map->keys, map_keys_sz); in dhd_pktid_map_fini_ioctl()
2673 VMFREE(dhd->osh, handle, dhd_pktid_map_sz); in dhd_pktid_map_fini_ioctl()
2688 DHD_PKTID_LOCK(map->pktid_lock, flags); in BCMFASTPATH()
2689 avail = map->avail; in BCMFASTPATH()
2690 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
2696 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2714 DHD_PKTID_LOCK(map->pktid_lock, flags); in dhd_pktid_map_reserve()
2716 if ((int)(map->avail) <= 0) { /* no more pktids to allocate */ in dhd_pktid_map_reserve()
2717 map->failures++; in dhd_pktid_map_reserve()
2719 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_reserve()
2723 ASSERT(map->avail <= map->items); in dhd_pktid_map_reserve()
2724 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ in dhd_pktid_map_reserve()
2726 if ((map->avail > map->items) || (nkey > map->items)) { in dhd_pktid_map_reserve()
2727 map->failures++; in dhd_pktid_map_reserve()
2729 " map->avail<%u>, nkey<%u>, pkttype<%u>\n", in dhd_pktid_map_reserve()
2730 __FUNCTION__, __LINE__, map->avail, nkey, in dhd_pktid_map_reserve()
2732 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_reserve()
2736 locker = &map->lockers[nkey]; /* save packet metadata in locker */ in dhd_pktid_map_reserve()
2737 map->avail--; in dhd_pktid_map_reserve()
2738 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ in dhd_pktid_map_reserve()
2739 locker->len = 0; in dhd_pktid_map_reserve()
2740 locker->state = LOCKER_IS_BUSY; /* reserve this locker */ in dhd_pktid_map_reserve()
2742 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_reserve()
2751 * dhd_pktid_map_save_metadata - Save metadata information in a locker
2768 DHD_PKTID_LOCK(map->pktid_lock, flags); in dhd_pktid_map_save_metadata()
2770 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { in dhd_pktid_map_save_metadata()
2773 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_save_metadata()
2775 if (dhd->memdump_enabled) { in dhd_pktid_map_save_metadata()
2777 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in dhd_pktid_map_save_metadata()
2786 locker = &map->lockers[nkey]; in dhd_pktid_map_save_metadata()
2794 locker->mpkt = mpkt; in dhd_pktid_map_save_metadata()
2795 locker->mpkt_pa = mpkt_pa; in dhd_pktid_map_save_metadata()
2796 locker->mpkt_len = mpkt_len; in dhd_pktid_map_save_metadata()
2797 locker->dmah = dmah; in dhd_pktid_map_save_metadata()
2799 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_save_metadata()
2804 * dhd_pktid_map_save - Save a packet's parameters into a locker
2819 DHD_PKTID_LOCK(map->pktid_lock, flags); in dhd_pktid_map_save()
2821 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { in dhd_pktid_map_save()
2824 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_save()
2826 if (dhd->memdump_enabled) { in dhd_pktid_map_save()
2828 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in dhd_pktid_map_save()
2837 locker = &map->lockers[nkey]; in dhd_pktid_map_save()
2839 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || in dhd_pktid_map_save()
2840 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); in dhd_pktid_map_save()
2843 locker->dir = dir; in dhd_pktid_map_save()
2844 locker->pa = pa; in dhd_pktid_map_save()
2845 locker->len = (uint16)len; /* 16bit len */ in dhd_pktid_map_save()
2846 locker->dmah = dmah; /* 16bit len */ in dhd_pktid_map_save()
2847 locker->secdma = secdma; in dhd_pktid_map_save()
2848 locker->pkttype = pkttype; in dhd_pktid_map_save()
2849 locker->pkt = pkt; in dhd_pktid_map_save()
2850 locker->state = LOCKER_IS_BUSY; /* make this locker busy */ in dhd_pktid_map_save()
2852 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype); in dhd_pktid_map_save()
2854 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in dhd_pktid_map_save()
2858 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2894 DHD_PKTID_LOCK(map->pktid_lock, flags); in BCMFASTPATH()
2897 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { in BCMFASTPATH()
2900 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
2902 if (dhd->memdump_enabled) { in BCMFASTPATH()
2904 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in BCMFASTPATH()
2913 locker = &map->lockers[nkey]; in BCMFASTPATH()
2914 mpkt = locker->mpkt; in BCMFASTPATH()
2915 *pmpkt_pa = locker->mpkt_pa; in BCMFASTPATH()
2916 *pmpkt_len = locker->mpkt_len; in BCMFASTPATH()
2918 *pdmah = locker->dmah; in BCMFASTPATH()
2919 locker->mpkt = NULL; in BCMFASTPATH()
2920 locker->mpkt_len = 0; in BCMFASTPATH()
2921 locker->dmah = NULL; in BCMFASTPATH()
2923 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
2929 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2949 DHD_PKTID_LOCK(map->pktid_lock, flags); in BCMFASTPATH()
2952 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { in BCMFASTPATH()
2955 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
2957 if (dhd->memdump_enabled) { in BCMFASTPATH()
2959 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in BCMFASTPATH()
2968 locker = &map->lockers[nkey]; in BCMFASTPATH()
2975 if (locker->state == LOCKER_IS_FREE) { in BCMFASTPATH()
2978 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
2981 if (dhd->memdump_enabled) { in BCMFASTPATH()
2983 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in BCMFASTPATH()
2996 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { in BCMFASTPATH()
3001 PHYSADDRTOULONG(locker->pa, locker_addr); in BCMFASTPATH()
3003 locker_addr = PHYSADDRLO(locker->pa); in BCMFASTPATH()
3005 DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>," in BCMFASTPATH()
3006 "pkttype <%d> locker->pa <0x%llx> \n", in BCMFASTPATH()
3007 __FUNCTION__, __LINE__, locker->state, locker->pkttype, in BCMFASTPATH()
3009 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
3011 if (dhd->memdump_enabled) { in BCMFASTPATH()
3013 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in BCMFASTPATH()
3023 map->avail++; in BCMFASTPATH()
3024 map->keys[map->avail] = nkey; /* make this numbered key available */ in BCMFASTPATH()
3025 locker->state = LOCKER_IS_FREE; /* open and free Locker */ in BCMFASTPATH()
3028 locker->state = LOCKER_IS_RSVD; in BCMFASTPATH()
3035 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey, in BCMFASTPATH()
3036 (uint32)locker->len, pkttype); in BCMFASTPATH()
3039 *pa = locker->pa; /* return contents of locker */ in BCMFASTPATH()
3040 *len = (uint32)locker->len; in BCMFASTPATH()
3041 *dmah = locker->dmah; in BCMFASTPATH()
3042 *secdma = locker->secdma; in BCMFASTPATH()
3044 pkt = locker->pkt; in BCMFASTPATH()
3045 locker->pkt = NULL; /* Clear pkt */ in BCMFASTPATH()
3046 locker->len = 0; in BCMFASTPATH()
3048 DHD_PKTID_UNLOCK(map->pktid_lock, flags); in BCMFASTPATH()
3056 #error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms"
3069 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
3070 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
3072 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
3087 osl_t *osh = dhd->osh; in dhd_pktid_map_init()
3096 if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { in dhd_pktid_map_init()
3102 if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { in dhd_pktid_map_init()
3108 if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { in dhd_pktid_map_init()
3114 PKTLIST_INIT(handle->tx_pkt_list); in dhd_pktid_map_init()
3115 PKTLIST_INIT(handle->rx_pkt_list); in dhd_pktid_map_init()
3116 PKTLIST_INIT(handle->ctrl_pkt_list); in dhd_pktid_map_init()
3121 if (handle->ctrl_pkt_list) { in dhd_pktid_map_init()
3122 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_init()
3125 if (handle->rx_pkt_list) { in dhd_pktid_map_init()
3126 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_init()
3129 if (handle->tx_pkt_list) { in dhd_pktid_map_init()
3130 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_init()
3144 osl_t *osh = dhd->osh; in dhd_pktid_map_reset()
3146 if (handle->ctrl_pkt_list) { in dhd_pktid_map_reset()
3147 PKTLIST_FINI(handle->ctrl_pkt_list); in dhd_pktid_map_reset()
3148 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_reset()
3151 if (handle->rx_pkt_list) { in dhd_pktid_map_reset()
3152 PKTLIST_FINI(handle->rx_pkt_list); in dhd_pktid_map_reset()
3153 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_reset()
3156 if (handle->tx_pkt_list) { in dhd_pktid_map_reset()
3157 PKTLIST_FINI(handle->tx_pkt_list); in dhd_pktid_map_reset()
3158 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); in dhd_pktid_map_reset()
3165 osl_t *osh = dhd->osh; in dhd_pktid_map_fini()
3195 PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); in dhd_native_to_pktid()
3197 PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); in dhd_native_to_pktid()
3199 PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); in dhd_native_to_pktid()
3223 PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); in dhd_pktid_to_native()
3225 PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); in dhd_pktid_to_native()
3227 PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); in dhd_pktid_to_native()
3258 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
3272 if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", in dhd_prot_allocate_bufs()
3281 if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", in dhd_prot_allocate_bufs()
3290 if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", in dhd_prot_allocate_bufs()
3299 if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", in dhd_prot_allocate_bufs()
3309 if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", in dhd_prot_allocate_bufs()
3319 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able in dhd_prot_allocate_bufs()
3324 if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { in dhd_prot_allocate_bufs()
3329 if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { in dhd_prot_allocate_bufs()
3334 if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) { in dhd_prot_allocate_bufs()
3337 prot->hostts_req_buf_inuse = FALSE; in dhd_prot_allocate_bufs()
3341 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, in dhd_prot_allocate_bufs()
3344 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) in dhd_prot_allocate_bufs()
3354 if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) { in dhd_prot_allocate_bufs()
3362 scratch_pa = prot->hmaptest.mem.pa; in dhd_prot_allocate_bufs()
3363 scratch_len = prot->hmaptest.mem.len; in dhd_prot_allocate_bufs()
3373 if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { in dhd_prot_allocate_bufs()
3379 if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) { in dhd_prot_allocate_bufs()
3395 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
3397 * with DMA-able buffers).
3405 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
3414 osl_t *osh = dhd->osh; in dhd_prot_attach()
3426 prot->osh = osh; in dhd_prot_attach()
3427 dhd->prot = prot; in dhd_prot_attach()
3430 dhd->dma_d2h_ring_upd_support = FALSE; in dhd_prot_attach()
3431 dhd->dma_h2d_ring_upd_support = FALSE; in dhd_prot_attach()
3432 dhd->dma_ring_upd_overwrite = FALSE; in dhd_prot_attach()
3434 dhd->idma_inited = 0; in dhd_prot_attach()
3435 dhd->ifrm_inited = 0; in dhd_prot_attach()
3436 dhd->dar_inited = 0; in dhd_prot_attach()
3443 dhd_rxchain_reset(&prot->rxchain); in dhd_prot_attach()
3446 prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL); in dhd_prot_attach()
3447 if (prot->pktid_ctrl_map == NULL) { in dhd_prot_attach()
3451 prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX); in dhd_prot_attach()
3452 if (prot->pktid_rx_map == NULL) in dhd_prot_attach()
3455 prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX); in dhd_prot_attach()
3456 if (prot->pktid_rx_map == NULL) in dhd_prot_attach()
3460 prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, in dhd_prot_attach()
3462 if (prot->pktid_map_handle_ioctl == NULL) { in dhd_prot_attach()
3468 prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); in dhd_prot_attach()
3469 if (prot->pktid_dma_map == NULL) { in dhd_prot_attach()
3474 prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG); in dhd_prot_attach()
3475 if (prot->pktid_dma_unmap == NULL) { in dhd_prot_attach()
3482 if (dhd->bus->sih->buscorerev < 71) { in dhd_prot_attach()
3496 if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) { in dhd_prot_attach()
3518 dhd_prot_t *prot = dhd->prot; in dhd_alloc_host_scbs()
3521 if (dhd->hscb_enable) { in dhd_alloc_host_scbs()
3523 dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0); in dhd_alloc_host_scbs()
3529 if (prot->host_scb_buf.va) { in dhd_alloc_host_scbs()
3530 if (prot->host_scb_buf.len >= host_scb_size) { in dhd_alloc_host_scbs()
3531 prot->host_scb_buf.len = host_scb_size; in dhd_alloc_host_scbs()
3533 dhd_dma_buf_free(dhd, &prot->host_scb_buf); in dhd_alloc_host_scbs()
3537 if (prot->host_scb_buf.va == NULL) { in dhd_alloc_host_scbs()
3538 ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size); in dhd_alloc_host_scbs()
3542 dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa); in dhd_alloc_host_scbs()
3543 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_alloc_host_scbs()
3562 dhd_prot_t *prot = dhd->prot; in dhd_set_host_cap()
3567 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { in dhd_set_host_cap()
3568 if (dhd->h2d_phase_supported) { in dhd_set_host_cap()
3570 if (dhd->force_dongletrap_on_bad_h2d_phase) in dhd_set_host_cap()
3573 if (prot->host_ipc_version > prot->device_ipc_version) in dhd_set_host_cap()
3574 prot->active_ipc_version = prot->device_ipc_version; in dhd_set_host_cap()
3576 prot->active_ipc_version = prot->host_ipc_version; in dhd_set_host_cap()
3578 data |= prot->active_ipc_version; in dhd_set_host_cap()
3580 if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) { in dhd_set_host_cap()
3585 if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) { in dhd_set_host_cap()
3586 DHD_INFO(("Advertise Inband-DW Capability\n")); in dhd_set_host_cap()
3589 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB); in dhd_set_host_cap()
3590 if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) { in dhd_set_host_cap()
3591 dhd_init_dongle_ds_lock(dhd->bus); in dhd_set_host_cap()
3592 dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE); in dhd_set_host_cap()
3597 if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) { in dhd_set_host_cap()
3598 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB); in dhd_set_host_cap()
3604 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE); in dhd_set_host_cap()
3611 if (dhd->pcie_txs_metadata_enable != 0) in dhd_set_host_cap()
3616 if (dhd->bt_logging) { in dhd_set_host_cap()
3617 if (dhd->bt_logging_enabled) { in dhd_set_host_cap()
3631 if (dhd->fast_delete_ring_support) { in dhd_set_host_cap()
3635 if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { in dhd_set_host_cap()
3638 dhd->idma_inited = TRUE; in dhd_set_host_cap()
3641 dhd->idma_inited = FALSE; in dhd_set_host_cap()
3644 if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) { in dhd_set_host_cap()
3647 dhd->ifrm_inited = TRUE; in dhd_set_host_cap()
3648 dhd->dma_h2d_ring_upd_support = FALSE; in dhd_set_host_cap()
3652 dhd->ifrm_inited = FALSE; in dhd_set_host_cap()
3655 if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) { in dhd_set_host_cap()
3658 dhd->dar_inited = TRUE; in dhd_set_host_cap()
3661 dhd->dar_inited = FALSE; in dhd_set_host_cap()
3665 * Radar 36403220 JIRA SWWLAN-182145 in dhd_set_host_cap()
3671 if (dhd->snapshot_upload) { in dhd_set_host_cap()
3677 if (dhd->hscb_enable) { in dhd_set_host_cap()
3682 if (dhd->dongle_edl_support) { in dhd_set_host_cap()
3697 if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) { in dhd_set_host_cap()
3707 if (dhd->db0ts_capable) { in dhd_set_host_cap()
3709 DHD_ERROR(("Enable DB0 TS in host cap\n")); in dhd_set_host_cap()
3711 DHD_ERROR(("DB0 TS not enabled in host cap\n")); in dhd_set_host_cap()
3714 if (dhd->extdtxs_in_txcpl) { in dhd_set_host_cap()
3724 prot->active_ipc_version, prot->host_ipc_version, in dhd_set_host_cap()
3725 prot->device_ipc_version)); in dhd_set_host_cap()
3727 dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); in dhd_set_host_cap()
3728 dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, in dhd_set_host_cap()
3729 sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); in dhd_set_host_cap()
3733 host_trap_addr_len = prot->fw_trap_buf.len / 4; in dhd_set_host_cap()
3734 dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len, in dhd_set_host_cap()
3741 dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version); in dhd_set_host_cap()
3748 uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo; in dhd_agg_inflight_stats_dump()
3762 uint64 *bin = dhd->prot->agg_h2d_db_info.inflight_histo; in dhd_agg_inflights_stats_update()
3784 ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS); in dhd_agg_inflights_stats_update()
3809 dhd = agg_db_info->dhd; in dhd_msgbuf_agg_h2d_db_timer_fn()
3810 prot = dhd->prot; in dhd_msgbuf_agg_h2d_db_timer_fn()
3812 prot->agg_h2d_db_info.timer_db_cnt++; in dhd_msgbuf_agg_h2d_db_timer_fn()
3815 if (dhd->bus->sih) { in dhd_msgbuf_agg_h2d_db_timer_fn()
3816 corerev = dhd->bus->sih->buscorerev; in dhd_msgbuf_agg_h2d_db_timer_fn()
3821 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); in dhd_msgbuf_agg_h2d_db_timer_fn()
3823 prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC); in dhd_msgbuf_agg_h2d_db_timer_fn()
3832 agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; in dhd_msgbuf_agg_h2d_db_timer_start()
3835 if (!hrtimer_active(&agg_db_info->timer)) { in dhd_msgbuf_agg_h2d_db_timer_start()
3836 hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC), in dhd_msgbuf_agg_h2d_db_timer_start()
3844 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_agg_h2d_db_timer_init()
3845 agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; in dhd_msgbuf_agg_h2d_db_timer_init()
3847 agg_db_info->dhd = dhd; in dhd_msgbuf_agg_h2d_db_timer_init()
3848 hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in dhd_msgbuf_agg_h2d_db_timer_init()
3850 agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn; in dhd_msgbuf_agg_h2d_db_timer_init()
3851 agg_db_info->init = TRUE; in dhd_msgbuf_agg_h2d_db_timer_init()
3852 agg_db_info->timer_db_cnt = 0; in dhd_msgbuf_agg_h2d_db_timer_init()
3853 agg_db_info->direct_db_cnt = 0; in dhd_msgbuf_agg_h2d_db_timer_init()
3854 agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE); in dhd_msgbuf_agg_h2d_db_timer_init()
3860 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_agg_h2d_db_timer_reset()
3861 agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; in dhd_msgbuf_agg_h2d_db_timer_reset()
3862 if (agg_db_info->init) { in dhd_msgbuf_agg_h2d_db_timer_reset()
3863 if (agg_db_info->inflight_histo) { in dhd_msgbuf_agg_h2d_db_timer_reset()
3864 MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE); in dhd_msgbuf_agg_h2d_db_timer_reset()
3866 hrtimer_try_to_cancel(&agg_db_info->timer); in dhd_msgbuf_agg_h2d_db_timer_reset()
3867 agg_db_info->init = FALSE; in dhd_msgbuf_agg_h2d_db_timer_reset()
3874 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_agg_h2d_db_timer_cancel()
3875 agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; in dhd_msgbuf_agg_h2d_db_timer_cancel()
3876 hrtimer_try_to_cancel(&agg_db_info->timer); in dhd_msgbuf_agg_h2d_db_timer_cancel()
3883 dhd_prot_t *prot = dhd->prot; in dhd_prot_clearcounts()
3885 agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info; in dhd_prot_clearcounts()
3886 if (agg_db_info->inflight_histo) { in dhd_prot_clearcounts()
3887 memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE); in dhd_prot_clearcounts()
3889 agg_db_info->direct_db_cnt = 0; in dhd_prot_clearcounts()
3890 agg_db_info->timer_db_cnt = 0; in dhd_prot_clearcounts()
3892 prot->txcpl_db_cnt = 0; in dhd_prot_clearcounts()
3893 prot->tx_h2d_db_cnt = 0; in dhd_prot_clearcounts()
3897 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3907 dhd_prot_t *prot = dhd->prot; in dhd_prot_init()
3917 * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM, in dhd_prot_init()
3920 prot->h2d_max_txpost = (uint16)h2d_max_txpost; in dhd_prot_init()
3921 DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost)); in dhd_prot_init()
3924 prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost; in dhd_prot_init()
3926 __FUNCTION__, __LINE__, prot->h2d_htput_max_txpost)); in dhd_prot_init()
3930 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); in dhd_prot_init()
3931 if (prot->max_rxbufpost == 0) { in dhd_prot_init()
3934 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; in dhd_prot_init()
3936 DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); in dhd_prot_init()
3939 max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus); in dhd_prot_init()
3940 prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >= in dhd_prot_init()
3942 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; in dhd_prot_init()
3943 prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST; in dhd_prot_init()
3945 prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST; in dhd_prot_init()
3947 prot->max_tsbufpost = DHD_MAX_TSBUF_POST; in dhd_prot_init()
3949 prot->cur_ioctlresp_bufs_posted = 0; in dhd_prot_init()
3950 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); in dhd_prot_init()
3951 prot->data_seq_no = 0; in dhd_prot_init()
3952 prot->ioctl_seq_no = 0; in dhd_prot_init()
3953 prot->rxbufpost = 0; in dhd_prot_init()
3954 prot->tot_rxbufpost = 0; in dhd_prot_init()
3955 prot->tot_rxcpl = 0; in dhd_prot_init()
3956 prot->cur_event_bufs_posted = 0; in dhd_prot_init()
3957 prot->ioctl_state = 0; in dhd_prot_init()
3958 prot->curr_ioctl_cmd = 0; in dhd_prot_init()
3959 prot->cur_ts_bufs_posted = 0; in dhd_prot_init()
3960 prot->infobufpost = 0; in dhd_prot_init()
3962 prot->btlogbufpost = 0; in dhd_prot_init()
3965 prot->dmaxfer.srcmem.va = NULL; in dhd_prot_init()
3966 prot->dmaxfer.dstmem.va = NULL; in dhd_prot_init()
3967 prot->dmaxfer.in_progress = FALSE; in dhd_prot_init()
3970 prot->hmaptest.in_progress = FALSE; in dhd_prot_init()
3972 prot->metadata_dbg = FALSE; in dhd_prot_init()
3973 prot->rx_metadata_offset = 0; in dhd_prot_init()
3974 prot->tx_metadata_offset = 0; in dhd_prot_init()
3975 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; in dhd_prot_init()
3978 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; in dhd_prot_init()
3979 prot->ioctl_state = 0; in dhd_prot_init()
3980 prot->ioctl_status = 0; in dhd_prot_init()
3981 prot->ioctl_resplen = 0; in dhd_prot_init()
3982 prot->ioctl_received = IOCTL_WAIT; in dhd_prot_init()
3986 prot->device_ipc_version = dhd->bus->api.fw_rev; in dhd_prot_init()
3987 prot->host_ipc_version = PCIE_SHARED_VERSION; in dhd_prot_init()
3988 prot->no_tx_resource = FALSE; in dhd_prot_init()
4001 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); in dhd_prot_init()
4002 prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus); in dhd_prot_init()
4004 prot->tx_h2d_db_cnt = 0; in dhd_prot_init()
4009 dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE; in dhd_prot_init()
4014 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_init()
4015 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); in dhd_prot_init()
4016 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4018 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); in dhd_prot_init()
4019 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4023 if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) { in dhd_prot_init()
4024 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); in dhd_prot_init()
4025 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4027 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); in dhd_prot_init()
4028 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4032 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); in dhd_prot_init()
4033 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); in dhd_prot_init()
4034 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); in dhd_prot_init()
4036 /* Make it compatibile with pre-rev7 Firmware */ in dhd_prot_init()
4037 if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) { in dhd_prot_init()
4038 prot->d2hring_tx_cpln.item_len = in dhd_prot_init()
4040 prot->d2hring_rx_cpln.item_len = in dhd_prot_init()
4043 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); in dhd_prot_init()
4044 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); in dhd_prot_init()
4052 if (INBAND_DW_ENAB(dhd->bus)) { in dhd_prot_init()
4053 dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus, in dhd_prot_init()
4059 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); in dhd_prot_init()
4060 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4062 dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, in dhd_prot_init()
4063 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); in dhd_prot_init()
4065 prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO; in dhd_prot_init()
4068 if (dhd->hostrdy_after_init) in dhd_prot_init()
4069 dhd_bus_hostready(dhd->bus); in dhd_prot_init()
4072 * If the DMA-able buffers for flowring needs to come from a specific in dhd_prot_init()
4073 * contiguous memory region, then setup prot->flowrings_dma_buf here. in dhd_prot_init()
4074 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from in dhd_prot_init()
4078 /* Pre-allocate pool of msgbuf_ring for flowrings */ in dhd_prot_init()
4083 dhd->ring_attached = TRUE; in dhd_prot_init()
4087 dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa); in dhd_prot_init()
4088 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), in dhd_prot_init()
4103 uint buscorerev = dhd->bus->sih->buscorerev; in dhd_prot_init()
4104 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, in dhd_prot_init()
4112 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, in dhd_prot_init()
4135 prot->no_retry = FALSE; in dhd_prot_init()
4136 prot->no_aggr = FALSE; in dhd_prot_init()
4137 prot->fixed_rate = FALSE; in dhd_prot_init()
4155 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) in dhd_prot_init()
4157 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) in dhd_prot_init()
4171 if (dhd->dongle_edl_support) { in dhd_prot_init()
4181 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) { in dhd_prot_init()
4194 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) { in dhd_prot_init()
4207 dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR); in dhd_prot_init()
4208 dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR); in dhd_prot_init()
4209 atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE); in dhd_prot_init()
4215 * dhd_prot_detach - PCIE FD protocol layer destructor.
4220 dhd_prot_t *prot = dhd->prot; in dhd_prot_detach()
4224 /* For non-android platforms, devreset will not be called, in dhd_prot_detach()
4229 /* free up all DMA-able buffers allocated during prot attach/init */ in dhd_prot_detach()
4231 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); in dhd_prot_detach()
4233 dhd_dma_buf_free(dhd, &prot->hmaptest.mem); in dhd_prot_detach()
4235 dhd_dma_buf_free(dhd, &prot->retbuf); in dhd_prot_detach()
4236 dhd_dma_buf_free(dhd, &prot->ioctbuf); in dhd_prot_detach()
4237 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); in dhd_prot_detach()
4238 dhd_dma_buf_free(dhd, &prot->hostts_req_buf); in dhd_prot_detach()
4239 dhd_dma_buf_free(dhd, &prot->fw_trap_buf); in dhd_prot_detach()
4240 dhd_dma_buf_free(dhd, &prot->host_scb_buf); in dhd_prot_detach()
4242 dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf); in dhd_prot_detach()
4245 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ in dhd_prot_detach()
4246 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); in dhd_prot_detach()
4247 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); in dhd_prot_detach()
4248 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); in dhd_prot_detach()
4249 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); in dhd_prot_detach()
4251 dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf); in dhd_prot_detach()
4254 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); in dhd_prot_detach()
4255 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); in dhd_prot_detach()
4256 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); in dhd_prot_detach()
4257 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); in dhd_prot_detach()
4258 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); in dhd_prot_detach()
4260 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ in dhd_prot_detach()
4288 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map); in dhd_prot_detach()
4289 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map); in dhd_prot_detach()
4290 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map); in dhd_prot_detach()
4292 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); in dhd_prot_detach()
4295 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map); in dhd_prot_detach()
4296 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap); in dhd_prot_detach()
4299 if (prot->h2d_dma_indx_rd_copy_buf) { in dhd_prot_detach()
4300 MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf, in dhd_prot_detach()
4301 prot->h2d_dma_indx_rd_copy_bufsz); in dhd_prot_detach()
4303 if (prot->d2h_dma_indx_wr_copy_buf) { in dhd_prot_detach()
4304 MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf, in dhd_prot_detach()
4305 prot->d2h_dma_indx_wr_copy_bufsz); in dhd_prot_detach()
4308 DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); in dhd_prot_detach()
4310 dhd->prot = NULL; in dhd_prot_detach()
4315 * dhd_prot_reset - Reset the protocol layer without freeing any objects.
4325 struct dhd_prot *prot = dhd->prot; in dhd_prot_reset()
4333 dhd->ring_attached = FALSE; in dhd_prot_reset()
4338 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); in dhd_prot_reset()
4339 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); in dhd_prot_reset()
4340 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); in dhd_prot_reset()
4341 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); in dhd_prot_reset()
4342 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); in dhd_prot_reset()
4345 if (prot->h2dring_info_subn) { in dhd_prot_reset()
4346 dhd_prot_ring_reset(dhd, prot->h2dring_info_subn); in dhd_prot_reset()
4349 if (prot->d2hring_info_cpln) { in dhd_prot_reset()
4350 dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln); in dhd_prot_reset()
4354 if (prot->d2hring_edl) { in dhd_prot_reset()
4355 dhd_prot_ring_reset(dhd, prot->d2hring_edl); in dhd_prot_reset()
4359 /* Reset all DMA-able buffers allocated during prot attach */ in dhd_prot_reset()
4360 dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); in dhd_prot_reset()
4362 dhd_dma_buf_reset(dhd, &prot->hmaptest.mem); in dhd_prot_reset()
4364 dhd_dma_buf_reset(dhd, &prot->retbuf); in dhd_prot_reset()
4365 dhd_dma_buf_reset(dhd, &prot->ioctbuf); in dhd_prot_reset()
4366 dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf); in dhd_prot_reset()
4367 dhd_dma_buf_reset(dhd, &prot->hostts_req_buf); in dhd_prot_reset()
4368 dhd_dma_buf_reset(dhd, &prot->fw_trap_buf); in dhd_prot_reset()
4369 dhd_dma_buf_reset(dhd, &prot->host_scb_buf); in dhd_prot_reset()
4371 dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf); in dhd_prot_reset()
4374 dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf); in dhd_prot_reset()
4376 /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */ in dhd_prot_reset()
4377 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); in dhd_prot_reset()
4378 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); in dhd_prot_reset()
4379 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); in dhd_prot_reset()
4380 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); in dhd_prot_reset()
4383 if (prot->d2h_dma_indx_wr_copy_buf) { in dhd_prot_reset()
4384 dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf, in dhd_prot_reset()
4385 prot->h2d_dma_indx_rd_copy_bufsz); in dhd_prot_reset()
4386 dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf, in dhd_prot_reset()
4387 prot->d2h_dma_indx_wr_copy_bufsz); in dhd_prot_reset()
4393 prot->rx_metadata_offset = 0; in dhd_prot_reset()
4394 prot->tx_metadata_offset = 0; in dhd_prot_reset()
4396 prot->rxbufpost = 0; in dhd_prot_reset()
4397 prot->cur_event_bufs_posted = 0; in dhd_prot_reset()
4398 prot->cur_ioctlresp_bufs_posted = 0; in dhd_prot_reset()
4400 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); in dhd_prot_reset()
4401 prot->data_seq_no = 0; in dhd_prot_reset()
4402 prot->ioctl_seq_no = 0; in dhd_prot_reset()
4403 prot->ioctl_state = 0; in dhd_prot_reset()
4404 prot->curr_ioctl_cmd = 0; in dhd_prot_reset()
4405 prot->ioctl_received = IOCTL_WAIT; in dhd_prot_reset()
4407 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; in dhd_prot_reset()
4408 prot->txcpl_db_cnt = 0; in dhd_prot_reset()
4413 if (dhd->flow_rings_inited) { in dhd_prot_reset()
4419 if (prot->h2dring_btlog_subn) { in dhd_prot_reset()
4420 dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn); in dhd_prot_reset()
4423 if (prot->d2hring_btlog_cpln) { in dhd_prot_reset()
4424 dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln); in dhd_prot_reset()
4428 if (prot->d2hring_hp2p_txcpl) { in dhd_prot_reset()
4429 dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl); in dhd_prot_reset()
4431 if (prot->d2hring_hp2p_rxcpl) { in dhd_prot_reset()
4432 dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl); in dhd_prot_reset()
4437 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); in dhd_prot_reset()
4438 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); in dhd_prot_reset()
4439 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map); in dhd_prot_reset()
4441 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl); in dhd_prot_reset()
4444 dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0; in dhd_prot_reset()
4445 dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0; in dhd_prot_reset()
4447 dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0; in dhd_prot_reset()
4449 dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0; in dhd_prot_reset()
4450 dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0; in dhd_prot_reset()
4451 dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0; in dhd_prot_reset()
4475 * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
4489 dhd_prot_t *prot = dhd->prot; in dhd_prot_rx_dataoffset()
4490 prot->rx_dataoffset = rx_offset; in dhd_prot_rx_dataoffset()
4496 dhd_prot_t *prot = dhd->prot; in dhd_check_create_info_rings()
4501 if (dhd->submit_count_WAR) { in dhd_check_create_info_rings()
4502 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; in dhd_check_create_info_rings()
4509 ringid = dhd->bus->max_tx_flowrings + in dhd_check_create_info_rings()
4510 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + in dhd_check_create_info_rings()
4514 if (prot->d2hring_info_cpln) { in dhd_check_create_info_rings()
4515 /* for d2hring re-entry case, clear inited flag */ in dhd_check_create_info_rings()
4516 prot->d2hring_info_cpln->inited = FALSE; in dhd_check_create_info_rings()
4519 if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { in dhd_check_create_info_rings()
4523 if (prot->h2dring_info_subn == NULL) { in dhd_check_create_info_rings()
4524 prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_info_rings()
4526 if (prot->h2dring_info_subn == NULL) { in dhd_check_create_info_rings()
4533 ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo", in dhd_check_create_info_rings()
4543 if (prot->d2hring_info_cpln == NULL) { in dhd_check_create_info_rings()
4544 prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_info_rings()
4546 if (prot->d2hring_info_cpln == NULL) { in dhd_check_create_info_rings()
4558 ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo", in dhd_check_create_info_rings()
4564 dhd_prot_ring_detach(dhd, prot->h2dring_info_subn); in dhd_check_create_info_rings()
4571 MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); in dhd_check_create_info_rings()
4573 if (prot->d2hring_info_cpln) { in dhd_check_create_info_rings()
4574 MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); in dhd_check_create_info_rings()
4582 dhd_prot_t *prot = dhd->prot; in dhd_prot_init_info_rings()
4591 if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) { in dhd_prot_init_info_rings()
4596 DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx)); in dhd_prot_init_info_rings()
4597 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln, in dhd_prot_init_info_rings()
4602 prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL; in dhd_prot_init_info_rings()
4603 prot->h2dring_info_subn->current_phase = 0; in dhd_prot_init_info_rings()
4604 prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_init_info_rings()
4605 prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_init_info_rings()
4607 DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); in dhd_prot_init_info_rings()
4608 prot->h2dring_info_subn->n_completion_ids = 1; in dhd_prot_init_info_rings()
4609 prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx; in dhd_prot_init_info_rings()
4611 ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn, in dhd_prot_init_info_rings()
4623 if (dhd->prot->h2dring_info_subn) { in dhd_prot_detach_info_rings()
4624 dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn); in dhd_prot_detach_info_rings()
4625 MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); in dhd_prot_detach_info_rings()
4627 if (dhd->prot->d2hring_info_cpln) { in dhd_prot_detach_info_rings()
4628 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln); in dhd_prot_detach_info_rings()
4629 MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); in dhd_prot_detach_info_rings()
4637 dhd_prot_t *prot = dhd->prot; in dhd_check_create_hp2p_rings()
4642 ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2; in dhd_check_create_hp2p_rings()
4644 if (prot->d2hring_hp2p_txcpl == NULL) { in dhd_check_create_hp2p_rings()
4645 prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_hp2p_rings()
4647 if (prot->d2hring_hp2p_txcpl == NULL) { in dhd_check_create_hp2p_rings()
4654 ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl", in dhd_check_create_hp2p_rings()
4655 dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE, in dhd_check_create_hp2p_rings()
4663 /* for re-entry case, clear inited flag */ in dhd_check_create_hp2p_rings()
4664 prot->d2hring_hp2p_txcpl->inited = FALSE; in dhd_check_create_hp2p_rings()
4666 if (prot->d2hring_hp2p_rxcpl == NULL) { in dhd_check_create_hp2p_rings()
4667 prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_hp2p_rings()
4669 if (prot->d2hring_hp2p_rxcpl == NULL) { in dhd_check_create_hp2p_rings()
4681 ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl", in dhd_check_create_hp2p_rings()
4682 dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE, in dhd_check_create_hp2p_rings()
4690 /* for re-entry case, clear inited flag */ in dhd_check_create_hp2p_rings()
4691 prot->d2hring_hp2p_rxcpl->inited = FALSE; in dhd_check_create_hp2p_rings()
4694 if (prot->d2hring_hp2p_rxcpl != NULL && in dhd_check_create_hp2p_rings()
4695 prot->d2hring_hp2p_txcpl != NULL) { in dhd_check_create_hp2p_rings()
4702 MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); in dhd_check_create_hp2p_rings()
4703 prot->d2hring_hp2p_rxcpl = NULL; in dhd_check_create_hp2p_rings()
4706 MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); in dhd_check_create_hp2p_rings()
4707 prot->d2hring_hp2p_txcpl = NULL; in dhd_check_create_hp2p_rings()
4714 dhd_prot_t *prot = dhd->prot; in dhd_prot_init_hp2p_rings()
4717 dhd->hp2p_ring_more = TRUE; in dhd_prot_init_hp2p_rings()
4719 dhd->hp2p_mf_enable = FALSE; in dhd_prot_init_hp2p_rings()
4727 if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) { in dhd_prot_init_hp2p_rings()
4733 prot->d2hring_hp2p_txcpl->idx)); in dhd_prot_init_hp2p_rings()
4734 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl, in dhd_prot_init_hp2p_rings()
4739 prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_init_hp2p_rings()
4740 prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_init_hp2p_rings()
4742 if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) { in dhd_prot_init_hp2p_rings()
4748 prot->d2hring_hp2p_rxcpl->idx)); in dhd_prot_init_hp2p_rings()
4749 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl, in dhd_prot_init_hp2p_rings()
4754 prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_init_hp2p_rings()
4755 prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_init_hp2p_rings()
4766 if (dhd->prot->d2hring_hp2p_txcpl) { in dhd_prot_detach_hp2p_rings()
4767 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl); in dhd_prot_detach_hp2p_rings()
4768 MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t)); in dhd_prot_detach_hp2p_rings()
4769 dhd->prot->d2hring_hp2p_txcpl = NULL; in dhd_prot_detach_hp2p_rings()
4771 if (dhd->prot->d2hring_hp2p_rxcpl) { in dhd_prot_detach_hp2p_rings()
4772 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl); in dhd_prot_detach_hp2p_rings()
4773 MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t)); in dhd_prot_detach_hp2p_rings()
4774 dhd->prot->d2hring_hp2p_rxcpl = NULL; in dhd_prot_detach_hp2p_rings()
4783 dhd_prot_t *prot = dhd->prot; in dhd_check_create_btlog_rings()
4787 if (dhd->submit_count_WAR) { in dhd_check_create_btlog_rings()
4788 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2; in dhd_check_create_btlog_rings()
4791 ringid = dhd->bus->max_tx_flowrings + in dhd_check_create_btlog_rings()
4792 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + in dhd_check_create_btlog_rings()
4793 BCMPCIE_H2D_COMMON_MSGRINGS - 1; in dhd_check_create_btlog_rings()
4796 if (prot->d2hring_btlog_cpln) { in dhd_check_create_btlog_rings()
4797 /* for re-entry case, clear inited flag */ in dhd_check_create_btlog_rings()
4798 prot->d2hring_btlog_cpln->inited = FALSE; in dhd_check_create_btlog_rings()
4801 if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) { in dhd_check_create_btlog_rings()
4805 if (prot->h2dring_btlog_subn == NULL) { in dhd_check_create_btlog_rings()
4806 prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_btlog_rings()
4808 if (prot->h2dring_btlog_subn == NULL) { in dhd_check_create_btlog_rings()
4815 ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog", in dhd_check_create_btlog_rings()
4825 if (prot->d2hring_btlog_cpln == NULL) { in dhd_check_create_btlog_rings()
4826 prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_btlog_rings()
4828 if (prot->d2hring_btlog_cpln == NULL) { in dhd_check_create_btlog_rings()
4834 if (dhd->submit_count_WAR) { in dhd_check_create_btlog_rings()
4842 ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog", in dhd_check_create_btlog_rings()
4848 dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn); in dhd_check_create_btlog_rings()
4855 MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t)); in dhd_check_create_btlog_rings()
4857 if (prot->d2hring_btlog_cpln) { in dhd_check_create_btlog_rings()
4858 MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t)); in dhd_check_create_btlog_rings()
4866 dhd_prot_t *prot = dhd->prot; in dhd_prot_init_btlog_rings()
4875 if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) { in dhd_prot_init_btlog_rings()
4880 DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx)); in dhd_prot_init_btlog_rings()
4881 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln, in dhd_prot_init_btlog_rings()
4886 prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL; in dhd_prot_init_btlog_rings()
4887 prot->h2dring_btlog_subn->current_phase = 0; in dhd_prot_init_btlog_rings()
4888 prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_init_btlog_rings()
4889 prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_init_btlog_rings()
4891 DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx)); in dhd_prot_init_btlog_rings()
4892 prot->h2dring_btlog_subn->n_completion_ids = 1; in dhd_prot_init_btlog_rings()
4893 prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx; in dhd_prot_init_btlog_rings()
4895 ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn, in dhd_prot_init_btlog_rings()
4907 if (dhd->prot->h2dring_btlog_subn) { in dhd_prot_detach_btlog_rings()
4908 dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn); in dhd_prot_detach_btlog_rings()
4909 MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t)); in dhd_prot_detach_btlog_rings()
4911 if (dhd->prot->d2hring_btlog_cpln) { in dhd_prot_detach_btlog_rings()
4912 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln); in dhd_prot_detach_btlog_rings()
4913 MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t)); in dhd_prot_detach_btlog_rings()
4922 dhd_prot_t *prot = dhd->prot; in dhd_check_create_edl_rings()
4927 if (dhd->submit_count_WAR) { in dhd_check_create_edl_rings()
4928 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; in dhd_check_create_edl_rings()
4933 * ringid at end of dynamic rings (re-use info ring cpl ring id) in dhd_check_create_edl_rings()
4935 ringid = dhd->bus->max_tx_flowrings + in dhd_check_create_edl_rings()
4936 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + in dhd_check_create_edl_rings()
4940 if (prot->d2hring_edl) { in dhd_check_create_edl_rings()
4941 prot->d2hring_edl->inited = FALSE; in dhd_check_create_edl_rings()
4945 if (prot->d2hring_edl == NULL) { in dhd_check_create_edl_rings()
4946 prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); in dhd_check_create_edl_rings()
4948 if (prot->d2hring_edl == NULL) { in dhd_check_create_edl_rings()
4956 ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl", in dhd_check_create_edl_rings()
4968 MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t)); in dhd_check_create_edl_rings()
4969 prot->d2hring_edl = NULL; in dhd_check_create_edl_rings()
4977 dhd_prot_t *prot = dhd->prot; in dhd_prot_init_edl_rings()
4986 if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) { in dhd_prot_init_edl_rings()
4991 DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx)); in dhd_prot_init_edl_rings()
4992 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl, in dhd_prot_init_edl_rings()
4997 prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL; in dhd_prot_init_edl_rings()
4998 prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; in dhd_prot_init_edl_rings()
5006 if (dhd->prot->d2hring_edl) { in dhd_prot_detach_edl_rings()
5007 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl); in dhd_prot_detach_edl_rings()
5008 MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t)); in dhd_prot_detach_edl_rings()
5009 dhd->prot->d2hring_edl = NULL; in dhd_prot_detach_edl_rings()
5024 dhd_prot_t *prot = dhd->prot; in dhd_sync_with_dongle()
5046 dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major; in dhd_sync_with_dongle()
5047 dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor; in dhd_sync_with_dongle()
5050 DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor)); in dhd_sync_with_dongle()
5060 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); in dhd_sync_with_dongle()
5086 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; in dhd_sync_with_dongle()
5097 if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), in dhd_sync_with_dongle()
5102 if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) { in dhd_sync_with_dongle()
5104 __FUNCTION__, prot->rxbufpost_sz, in dhd_sync_with_dongle()
5106 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; in dhd_sync_with_dongle()
5109 __FUNCTION__, prot->rxbufpost_sz)); in dhd_sync_with_dongle()
5127 if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) in dhd_sync_with_dongle()
5133 if (dhd->hp2p_enable) { in dhd_sync_with_dongle()
5134 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40; in dhd_sync_with_dongle()
5136 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; in dhd_sync_with_dongle()
5139 dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; in dhd_sync_with_dongle()
5141 dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US(); in dhd_sync_with_dongle()
5145 dhd->dhd_rte_time_sync_ms = 0; in dhd_sync_with_dongle()
5151 dhd->host_sfhllc_supported = TRUE; in dhd_sync_with_dongle()
5153 dhd->host_sfhllc_supported = FALSE; in dhd_sync_with_dongle()
5158 dhd->iswl = TRUE; in dhd_sync_with_dongle()
5176 len -= BCMPCIE_D2H_METADATA_HDRLEN; in BCMFASTPATH()
5183 len -= TLV_HDR_LEN; in BCMFASTPATH()
5200 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" in BCMFASTPATH()
5201 " rate = %08x tries = %d - %d\n", txs, in BCMFASTPATH()
5233 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", in BCMFASTPATH()
5250 len -= tlv_l; in BCMFASTPATH()
5265 PKTFREE_STATIC(dhd->osh, pkt, send); in BCMFASTPATH()
5267 PKTFREE(dhd->osh, pkt, send); in BCMFASTPATH()
5270 PKTFREE(dhd->osh, pkt, send); in BCMFASTPATH()
5291 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, in BCMFASTPATH()
5294 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, in BCMFASTPATH()
5298 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, in BCMFASTPATH()
5302 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); in BCMFASTPATH()
5307 dhd->dma_stats.ioctl_rx--; in BCMFASTPATH()
5308 dhd->dma_stats.ioctl_rx_sz -= len; in BCMFASTPATH()
5312 dhd->dma_stats.event_rx--; in BCMFASTPATH()
5313 dhd->dma_stats.event_rx_sz -= len; in BCMFASTPATH()
5316 dhd->dma_stats.info_rx--; in BCMFASTPATH()
5317 dhd->dma_stats.info_rx_sz -= len; in BCMFASTPATH()
5320 dhd->dma_stats.tsbuf_rx--; in BCMFASTPATH()
5321 dhd->dma_stats.tsbuf_rx_sz -= len; in BCMFASTPATH()
5335 retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, in BCMFASTPATH()
5336 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); in BCMFASTPATH()
5349 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); in dhd_prot_inc_hostactive_devwake_assert()
5350 bus->host_active_cnt++; in dhd_prot_inc_hostactive_devwake_assert()
5351 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); in dhd_prot_inc_hostactive_devwake_assert()
5353 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); in dhd_prot_inc_hostactive_devwake_assert()
5354 bus->host_active_cnt--; in dhd_prot_inc_hostactive_devwake_assert()
5356 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); in dhd_prot_inc_hostactive_devwake_assert()
5369 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); in dhd_prot_dec_hostactive_ack_pending_dsreq()
5370 bus->host_active_cnt--; in dhd_prot_dec_hostactive_ack_pending_dsreq()
5372 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); in dhd_prot_dec_hostactive_ack_pending_dsreq()
5380 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
5384 fillbufs = prot->max_rxbufpost - prot->rxbufpost; in BCMFASTPATH()
5393 prot->rxbufpost += (uint16)retcount; in BCMFASTPATH()
5395 fillbufs = prot->max_rxbufpost - prot->rxbufpost; in BCMFASTPATH()
5416 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
5417 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; in BCMFASTPATH()
5423 uint16 pktsz = prot->rxbufpost_sz; in BCMFASTPATH()
5427 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) in BCMFASTPATH()
5433 lcl_buf = MALLOC(dhd->osh, lcl_buf_size); in BCMFASTPATH()
5437 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in BCMFASTPATH()
5446 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { in BCMFASTPATH()
5448 dhd->rx_pktgetfail++; in BCMFASTPATH()
5454 PKTPULL(dhd->osh, p, BCMEXTRAHDROOM); in BCMFASTPATH()
5456 pktlen[i] = PKTLEN(dhd->osh, p); in BCMFASTPATH()
5457 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0); in BCMFASTPATH()
5460 PKTFREE(dhd->osh, p, FALSE); in BCMFASTPATH()
5466 dhd->dma_stats.rxdata++; in BCMFASTPATH()
5467 dhd->dma_stats.rxdata_sz += pktlen[i]; in BCMFASTPATH()
5470 PKTPULL(dhd->osh, p, prot->rx_metadata_offset); in BCMFASTPATH()
5471 pktlen[i] = PKTLEN(dhd->osh, p); in BCMFASTPATH()
5480 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
5487 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
5500 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa, in BCMFASTPATH()
5501 pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); in BCMFASTPATH()
5509 if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) { in BCMFASTPATH()
5511 dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va in BCMFASTPATH()
5512 + dhd->prot->hmaptest.offset; in BCMFASTPATH()
5514 dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset; in BCMFASTPATH()
5515 if ((dhd->prot->hmap_rx_buf_va + dhd->prot->hmap_rx_buf_len) > in BCMFASTPATH()
5516 ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) { in BCMFASTPATH()
5519 dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE; in BCMFASTPATH()
5520 dhd->prot->hmaptest.in_progress = FALSE; in BCMFASTPATH()
5522 pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va, in BCMFASTPATH()
5523 dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0); in BCMFASTPATH()
5525 dhd->prot->hmap_rx_buf_pa = pa; in BCMFASTPATH()
5526 dhd->prot->hmaptest_rx_pktid = pktid; in BCMFASTPATH()
5527 dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED; in BCMFASTPATH()
5531 dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa))); in BCMFASTPATH()
5533 PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i]))); in BCMFASTPATH()
5537 dhd->prot->tot_rxbufpost++; in BCMFASTPATH()
5539 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; in BCMFASTPATH()
5540 rxbuf_post->cmn_hdr.if_id = 0; in BCMFASTPATH()
5541 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; in BCMFASTPATH()
5542 rxbuf_post->cmn_hdr.flags = ring->current_phase; in BCMFASTPATH()
5543 ring->seqnum++; in BCMFASTPATH()
5544 rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]); in BCMFASTPATH()
5545 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); in BCMFASTPATH()
5546 rxbuf_post->data_buf_addr.low_addr = in BCMFASTPATH()
5547 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); in BCMFASTPATH()
5549 if (prot->rx_metadata_offset) { in BCMFASTPATH()
5550 rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; in BCMFASTPATH()
5551 rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); in BCMFASTPATH()
5552 rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); in BCMFASTPATH()
5554 rxbuf_post->metadata_buf_len = 0; in BCMFASTPATH()
5555 rxbuf_post->metadata_buf_addr.high_addr = 0; in BCMFASTPATH()
5556 rxbuf_post->metadata_buf_addr.low_addr = 0; in BCMFASTPATH()
5560 DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC); in BCMFASTPATH()
5563 rxbuf_post->cmn_hdr.request_id = htol32(pktid); in BCMFASTPATH()
5566 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; in BCMFASTPATH()
5568 PKTAUDIT(dhd->osh, p); in BCMFASTPATH()
5573 if (ring->wr < (alloced - i)) in BCMFASTPATH()
5574 ring->wr = ring->max_items - (alloced - i); in BCMFASTPATH()
5576 ring->wr -= (alloced - i); in BCMFASTPATH()
5578 if (ring->wr == 0) { in BCMFASTPATH()
5579 DHD_INFO(("%s: flipping the phase now\n", ring->name)); in BCMFASTPATH()
5580 ring->current_phase = ring->current_phase ? in BCMFASTPATH()
5592 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
5599 DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL); in BCMFASTPATH()
5600 PKTFREE(dhd->osh, p, FALSE); in BCMFASTPATH()
5603 MFREE(dhd->osh, lcl_buf, lcl_buf_size); in BCMFASTPATH()
5605 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in BCMFASTPATH()
5617 dhd_prot_t *prot = dhd->prot; in dhd_prot_infobufpost()
5632 if (ring->inited != TRUE) in dhd_prot_infobufpost()
5634 if (ring == dhd->prot->h2dring_info_subn) { in dhd_prot_infobufpost()
5635 if (prot->max_infobufpost == 0) in dhd_prot_infobufpost()
5638 count = prot->max_infobufpost - prot->infobufpost; in dhd_prot_infobufpost()
5641 else if (ring == dhd->prot->h2dring_btlog_subn) { in dhd_prot_infobufpost()
5642 if (prot->max_btlogbufpost == 0) in dhd_prot_infobufpost()
5646 count = prot->max_btlogbufpost - prot->btlogbufpost; in dhd_prot_infobufpost()
5661 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) in dhd_prot_infobufpost()
5666 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_infobufpost()
5673 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_infobufpost()
5675 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in dhd_prot_infobufpost()
5677 return -1; in dhd_prot_infobufpost()
5690 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); in dhd_prot_infobufpost()
5692 p = PKTGET(dhd->osh, pktsz, FALSE); in dhd_prot_infobufpost()
5696 dhd->rx_pktgetfail++; in dhd_prot_infobufpost()
5699 pktlen = PKTLEN(dhd->osh, p); in dhd_prot_infobufpost()
5700 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); in dhd_prot_infobufpost()
5702 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); in dhd_prot_infobufpost()
5704 PKTFREE_STATIC(dhd->osh, p, FALSE); in dhd_prot_infobufpost()
5706 PKTFREE(dhd->osh, p, FALSE); in dhd_prot_infobufpost()
5713 dhd->dma_stats.info_rx++; in dhd_prot_infobufpost()
5714 dhd->dma_stats.info_rx_sz += pktlen; in dhd_prot_infobufpost()
5716 pktlen = PKTLEN(dhd->osh, p); in dhd_prot_infobufpost()
5719 infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST; in dhd_prot_infobufpost()
5720 infobuf_post->cmn_hdr.if_id = 0; in dhd_prot_infobufpost()
5721 infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; in dhd_prot_infobufpost()
5722 infobuf_post->cmn_hdr.flags = ring->current_phase; in dhd_prot_infobufpost()
5723 ring->seqnum++; in dhd_prot_infobufpost()
5725 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa, in dhd_prot_infobufpost()
5726 pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX); in dhd_prot_infobufpost()
5730 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0); in dhd_prot_infobufpost()
5733 PKTFREE_STATIC(dhd->osh, p, FALSE); in dhd_prot_infobufpost()
5735 PKTFREE(dhd->osh, p, FALSE); in dhd_prot_infobufpost()
5742 infobuf_post->host_buf_len = htol16((uint16)pktlen); in dhd_prot_infobufpost()
5743 infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); in dhd_prot_infobufpost()
5744 infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); in dhd_prot_infobufpost()
5747 DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC); in dhd_prot_infobufpost()
5751 infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr, in dhd_prot_infobufpost()
5752 infobuf_post->host_buf_addr.high_addr)); in dhd_prot_infobufpost()
5754 infobuf_post->cmn_hdr.request_id = htol32(pktid); in dhd_prot_infobufpost()
5756 infobuf_post_tmp = infobuf_post_tmp + ring->item_len; in dhd_prot_infobufpost()
5758 PKTAUDIT(dhd->osh, p); in dhd_prot_infobufpost()
5763 if (ring->wr < (alloced - i)) in dhd_prot_infobufpost()
5764 ring->wr = ring->max_items - (alloced - i); in dhd_prot_infobufpost()
5766 ring->wr -= (alloced - i); in dhd_prot_infobufpost()
5769 if (alloced && ring->wr == 0) { in dhd_prot_infobufpost()
5770 DHD_INFO(("%s: flipping the phase now\n", ring->name)); in dhd_prot_infobufpost()
5771 ring->current_phase = ring->current_phase ? in dhd_prot_infobufpost()
5778 if (ring == dhd->prot->h2dring_info_subn) { in dhd_prot_infobufpost()
5779 prot->infobufpost += alloced; in dhd_prot_infobufpost()
5782 if (ring == dhd->prot->h2dring_btlog_subn) { in dhd_prot_infobufpost()
5783 prot->btlogbufpost += alloced; in dhd_prot_infobufpost()
5789 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_infobufpost()
5792 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in dhd_prot_infobufpost()
5818 if (retbuf->va) { in free_ioctl_return_buffer()
5821 retbuf->len = IOCT_RETBUF_SIZE; in free_ioctl_return_buffer()
5822 retbuf->_alloced = retbuf->len + dma_pad; in free_ioctl_return_buffer()
5838 dhd_prot_t *prot = dhd->prot; in dhd_prot_rxbufpost_ctrl()
5845 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; in dhd_prot_rxbufpost_ctrl()
5849 if (dhd->busstate == DHD_BUS_DOWN) { in dhd_prot_rxbufpost_ctrl()
5851 return -1; in dhd_prot_rxbufpost_ctrl()
5864 return -1; in dhd_prot_rxbufpost_ctrl()
5867 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) { in dhd_prot_rxbufpost_ctrl()
5900 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); in dhd_prot_rxbufpost_ctrl()
5902 p = PKTGET(dhd->osh, pktsz, FALSE); in dhd_prot_rxbufpost_ctrl()
5908 dhd->rx_pktgetfail++; in dhd_prot_rxbufpost_ctrl()
5912 pktlen = PKTLEN(dhd->osh, p); in dhd_prot_rxbufpost_ctrl()
5913 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); in dhd_prot_rxbufpost_ctrl()
5925 dhd->dma_stats.ioctl_rx++; in dhd_prot_rxbufpost_ctrl()
5926 dhd->dma_stats.ioctl_rx_sz += pktlen; in dhd_prot_rxbufpost_ctrl()
5930 dhd->dma_stats.event_rx++; in dhd_prot_rxbufpost_ctrl()
5931 dhd->dma_stats.event_rx_sz += pktlen; in dhd_prot_rxbufpost_ctrl()
5934 dhd->dma_stats.tsbuf_rx++; in dhd_prot_rxbufpost_ctrl()
5935 dhd->dma_stats.tsbuf_rx_sz += pktlen; in dhd_prot_rxbufpost_ctrl()
5945 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_rxbufpost_ctrl()
5951 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_rxbufpost_ctrl()
5959 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); in dhd_prot_rxbufpost_ctrl()
5965 rxbuf_post->cmn_hdr.msg_type = msg_type; in dhd_prot_rxbufpost_ctrl()
5969 map_handle = dhd->prot->pktid_map_handle_ioctl; in dhd_prot_rxbufpost_ctrl()
5971 ring->dma_buf.secdma, buf_type); in dhd_prot_rxbufpost_ctrl()
5975 map_handle = dhd->prot->pktid_ctrl_map; in dhd_prot_rxbufpost_ctrl()
5977 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, in dhd_prot_rxbufpost_ctrl()
5982 if (ring->wr == 0) { in dhd_prot_rxbufpost_ctrl()
5983 ring->wr = ring->max_items - 1; in dhd_prot_rxbufpost_ctrl()
5985 ring->wr--; in dhd_prot_rxbufpost_ctrl()
5986 if (ring->wr == 0) { in dhd_prot_rxbufpost_ctrl()
5987 ring->current_phase = ring->current_phase ? 0 : in dhd_prot_rxbufpost_ctrl()
5991 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_rxbufpost_ctrl()
5992 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); in dhd_prot_rxbufpost_ctrl()
6001 rxbuf_post->cmn_hdr.request_id = htol32(pktid); in dhd_prot_rxbufpost_ctrl()
6002 rxbuf_post->cmn_hdr.if_id = 0; in dhd_prot_rxbufpost_ctrl()
6003 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; in dhd_prot_rxbufpost_ctrl()
6004 ring->seqnum++; in dhd_prot_rxbufpost_ctrl()
6005 rxbuf_post->cmn_hdr.flags = ring->current_phase; in dhd_prot_rxbufpost_ctrl()
6008 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { in dhd_prot_rxbufpost_ctrl()
6009 if (ring->wr == 0) { in dhd_prot_rxbufpost_ctrl()
6010 ring->wr = ring->max_items - 1; in dhd_prot_rxbufpost_ctrl()
6012 if (ring->wr == 0) { in dhd_prot_rxbufpost_ctrl()
6013 ring->current_phase = ring->current_phase ? 0 : in dhd_prot_rxbufpost_ctrl()
6017 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_rxbufpost_ctrl()
6022 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); in dhd_prot_rxbufpost_ctrl()
6029 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); in dhd_prot_rxbufpost_ctrl()
6031 rxbuf_post->host_buf_len = htol16((uint16)pktlen); in dhd_prot_rxbufpost_ctrl()
6033 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); in dhd_prot_rxbufpost_ctrl()
6034 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); in dhd_prot_rxbufpost_ctrl()
6037 PKTAUDIT(dhd->osh, p); in dhd_prot_rxbufpost_ctrl()
6042 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_rxbufpost_ctrl()
6045 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in dhd_prot_rxbufpost_ctrl()
6062 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); in dhd_prot_rxbufpost_ctrl()
6064 return -1; in dhd_prot_rxbufpost_ctrl()
6075 if (dhd->busstate == DHD_BUS_DOWN) { in dhd_msgbuf_rxbuf_post_ctrlpath()
6093 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_rxbuf_post_ioctlresp_bufs()
6097 max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; in dhd_msgbuf_rxbuf_post_ioctlresp_bufs()
6103 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, in dhd_msgbuf_rxbuf_post_ioctlresp_bufs()
6110 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_rxbuf_post_event_bufs()
6113 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; in dhd_msgbuf_rxbuf_post_event_bufs()
6119 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, in dhd_msgbuf_rxbuf_post_event_bufs()
6127 dhd_prot_t *prot = dhd->prot; in dhd_msgbuf_rxbuf_post_ts_bufs()
6130 if (prot->active_ipc_version < 7) { in dhd_msgbuf_rxbuf_post_ts_bufs()
6132 prot->active_ipc_version)); in dhd_msgbuf_rxbuf_post_ts_bufs()
6136 max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted; in dhd_msgbuf_rxbuf_post_ts_bufs()
6143 prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, in dhd_msgbuf_rxbuf_post_ts_bufs()
6152 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
6155 msgbuf_ring_t *ring = prot->d2hring_info_cpln; in BCMFASTPATH()
6160 if (ring->inited != TRUE) in BCMFASTPATH()
6163 /* Process all the messages - DTOH direction */ in BCMFASTPATH()
6168 if (dhd->hang_was_sent) { in BCMFASTPATH()
6173 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
6178 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
6181 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
6199 n += msg_len / ring->item_len; in BCMFASTPATH()
6212 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
6215 msgbuf_ring_t *ring = prot->d2hring_btlog_cpln; in BCMFASTPATH()
6219 if (ring->inited != TRUE) in BCMFASTPATH()
6222 /* Process all the messages - DTOH direction */ in BCMFASTPATH()
6232 if (dhd->hang_was_sent) { in BCMFASTPATH()
6237 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
6261 n += msg_len / ring->item_len; in BCMFASTPATH()
6275 dhd_prot_t *prot = dhd->prot; in dhd_prot_process_msgbuf_edl()
6276 msgbuf_ring_t *ring = prot->d2hring_edl; in dhd_prot_process_msgbuf_edl()
6284 if (ring->inited != TRUE) in dhd_prot_process_msgbuf_edl()
6286 if (ring->item_len == 0) { in dhd_prot_process_msgbuf_edl()
6288 __FUNCTION__, ring->idx, ring->item_len)); in dhd_prot_process_msgbuf_edl()
6296 if (dhd->hang_was_sent) { in dhd_prot_process_msgbuf_edl()
6306 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_process_msgbuf_edl()
6307 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_process_msgbuf_edl()
6309 ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); in dhd_prot_process_msgbuf_edl()
6311 dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx); in dhd_prot_process_msgbuf_edl()
6313 rd = ring->rd; in dhd_prot_process_msgbuf_edl()
6314 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_process_msgbuf_edl()
6316 depth = ring->max_items; in dhd_prot_process_msgbuf_edl()
6318 items = READ_AVAIL_SPACE(ring->wr, rd, depth); in dhd_prot_process_msgbuf_edl()
6323 if (items > ring->max_items) { in dhd_prot_process_msgbuf_edl()
6325 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", in dhd_prot_process_msgbuf_edl()
6326 __FUNCTION__, ring, ring->name, ring->max_items, items)); in dhd_prot_process_msgbuf_edl()
6328 ring->wr, ring->rd, depth)); in dhd_prot_process_msgbuf_edl()
6329 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", in dhd_prot_process_msgbuf_edl()
6330 dhd->busstate, dhd->bus->wait_for_d3_ack)); in dhd_prot_process_msgbuf_edl()
6333 if (ring->wr >= ring->max_items) { in dhd_prot_process_msgbuf_edl()
6334 dhd->bus->read_shm_fail = TRUE; in dhd_prot_process_msgbuf_edl()
6338 if (dhd->memdump_enabled) { in dhd_prot_process_msgbuf_edl()
6340 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; in dhd_prot_process_msgbuf_edl()
6354 ring->rd, ring->wr, depth)); in dhd_prot_process_msgbuf_edl()
6357 dhd_schedule_logtrace(dhd->info); in dhd_prot_process_msgbuf_edl()
6380 if (!dhd || !dhd->prot) in dhd_prot_process_edl_complete()
6383 prot = dhd->prot; in dhd_prot_process_edl_complete()
6384 ring = prot->d2hring_edl; in dhd_prot_process_edl_complete()
6390 if (dhd->hang_was_sent) { in dhd_prot_process_edl_complete()
6394 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6395 ring->curr_rd = ring->rd; in dhd_prot_process_edl_complete()
6396 wr = ring->wr; in dhd_prot_process_edl_complete()
6397 depth = ring->max_items; in dhd_prot_process_edl_complete()
6407 num_items = READ_AVAIL_SPACE(wr, ring->rd, depth); in dhd_prot_process_edl_complete()
6411 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6419 msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len); in dhd_prot_process_edl_complete()
6423 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6432 if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) { in dhd_prot_process_edl_complete()
6442 if ((ring->curr_rd + 1) >= ring->max_items) { in dhd_prot_process_edl_complete()
6443 ring->curr_rd = 0; in dhd_prot_process_edl_complete()
6445 ring->curr_rd += 1; in dhd_prot_process_edl_complete()
6459 OSL_PREFETCH(msg_addr + ring->item_len); in dhd_prot_process_edl_complete()
6461 msg_addr += ring->item_len; in dhd_prot_process_edl_complete()
6462 --n; in dhd_prot_process_edl_complete()
6465 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6467 if ((ring->rd + max_items_to_process) >= ring->max_items) in dhd_prot_process_edl_complete()
6468 ring->rd = 0; in dhd_prot_process_edl_complete()
6470 ring->rd += max_items_to_process; in dhd_prot_process_edl_complete()
6471 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6476 * context in - 'dhdpcie_bus_suspend' in dhd_prot_process_edl_complete()
6481 __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); in dhd_prot_process_edl_complete()
6493 * needs to be re-read from here, If we don't do so, then till in dhd_prot_process_edl_complete()
6499 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6500 if (wr != (volatile uint16)ring->wr) { in dhd_prot_process_edl_complete()
6501 wr = (volatile uint16)ring->wr; in dhd_prot_process_edl_complete()
6502 new_items = READ_AVAIL_SPACE(wr, ring->rd, depth); in dhd_prot_process_edl_complete()
6507 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_process_edl_complete()
6510 /* if # of items processed is less than num_items, need to re-schedule in dhd_prot_process_edl_complete()
6518 num_items - max_items_to_process)); in dhd_prot_process_edl_complete()
6519 return (num_items - max_items_to_process); in dhd_prot_process_edl_complete()
6536 prot = dhd->prot; in dhd_prot_edl_ring_tcm_rd_update()
6537 if (!prot || !prot->d2hring_edl) in dhd_prot_edl_ring_tcm_rd_update()
6540 ring = prot->d2hring_edl; in dhd_prot_edl_ring_tcm_rd_update()
6541 DHD_RING_LOCK(ring->ring_lock, flags); in dhd_prot_edl_ring_tcm_rd_update()
6543 DHD_RING_UNLOCK(ring->ring_lock, flags); in dhd_prot_edl_ring_tcm_rd_update()
6544 if (dhd->dma_h2d_ring_upd_support && in dhd_prot_edl_ring_tcm_rd_update()
6561 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count); in dhd_prot_rx_frame()
6567 if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) { in dhd_prot_lb_rxp_flow_ctrl()
6572 if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) && in dhd_prot_lb_rxp_flow_ctrl()
6573 (!atomic_read(&dhd->lb_rxp_flow_ctrl))) { in dhd_prot_lb_rxp_flow_ctrl()
6574 atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE); in dhd_prot_lb_rxp_flow_ctrl()
6576 dhd->lb_rxp_stop_thr_hitcnt++; in dhd_prot_lb_rxp_flow_ctrl()
6579 dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr)); in dhd_prot_lb_rxp_flow_ctrl()
6580 } else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) && in dhd_prot_lb_rxp_flow_ctrl()
6581 (atomic_read(&dhd->lb_rxp_flow_ctrl))) { in dhd_prot_lb_rxp_flow_ctrl()
6582 atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE); in dhd_prot_lb_rxp_flow_ctrl()
6584 dhd->lb_rxp_strt_thr_hitcnt++; in dhd_prot_lb_rxp_flow_ctrl()
6587 dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr)); in dhd_prot_lb_rxp_flow_ctrl()
6590 return atomic_read(&dhd->lb_rxp_flow_ctrl); in dhd_prot_lb_rxp_flow_ctrl()
6600 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
6630 dhd->rx_pending_due_to_rpm = TRUE; in BCMFASTPATH()
6633 dhd->rx_pending_due_to_rpm = FALSE; in BCMFASTPATH()
6637 if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl) in BCMFASTPATH()
6638 ring = prot->d2hring_hp2p_rxcpl; in BCMFASTPATH()
6641 ring = &prot->d2hring_rx_cpln; in BCMFASTPATH()
6642 item_len = ring->item_len; in BCMFASTPATH()
6650 if (dhd->hang_was_sent) in BCMFASTPATH()
6653 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
6661 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
6666 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
6674 sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len); in BCMFASTPATH()
6681 if ((ring->curr_rd + 1) >= ring->max_items) { in BCMFASTPATH()
6682 ring->curr_rd = 0; in BCMFASTPATH()
6684 ring->curr_rd += 1; in BCMFASTPATH()
6688 msg_len -= item_len; in BCMFASTPATH()
6693 pktid = ltoh32(msg->cmn_hdr.request_id); in BCMFASTPATH()
6696 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid, in BCMFASTPATH()
6700 pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, in BCMFASTPATH()
6704 msg_len -= item_len; in BCMFASTPATH()
6708 dhd->prot->tot_rxcpl++; in BCMFASTPATH()
6710 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); in BCMFASTPATH()
6713 dhd->dma_stats.rxdata--; in BCMFASTPATH()
6714 dhd->dma_stats.rxdata_sz -= len; in BCMFASTPATH()
6717 if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) && in BCMFASTPATH()
6718 (pktid == dhd->prot->hmaptest_rx_pktid)) { in BCMFASTPATH()
6721 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); in BCMFASTPATH()
6722 DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa, in BCMFASTPATH()
6723 (uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah); in BCMFASTPATH()
6727 msg->rx_status_0, msg->rx_status_1)); in BCMFASTPATH()
6729 dhd->prot->hmap_rx_buf_va, in BCMFASTPATH()
6730 (uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa))); in BCMFASTPATH()
6732 PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa))); in BCMFASTPATH()
6733 memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len); in BCMFASTPATH()
6734 dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE; in BCMFASTPATH()
6735 dhd->prot->hmap_rx_buf_va = NULL; in BCMFASTPATH()
6736 dhd->prot->hmap_rx_buf_len = 0; in BCMFASTPATH()
6737 PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0); in BCMFASTPATH()
6738 PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0); in BCMFASTPATH()
6739 prot->hmaptest.in_progress = FALSE; in BCMFASTPATH()
6744 ltoh32(msg->cmn_hdr.request_id), in BCMFASTPATH()
6745 ltoh16(msg->data_offset), in BCMFASTPATH()
6746 ltoh16(msg->data_len), msg->cmn_hdr.if_id, in BCMFASTPATH()
6747 msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), in BCMFASTPATH()
6748 ltoh16(msg->metadata_len))); in BCMFASTPATH()
6751 msg_len -= item_len; in BCMFASTPATH()
6756 if (prot->metadata_dbg && prot->rx_metadata_offset && in BCMFASTPATH()
6757 msg->metadata_len) { in BCMFASTPATH()
6759 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); in BCMFASTPATH()
6761 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len); in BCMFASTPATH()
6762 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len); in BCMFASTPATH()
6768 if (ltoh16(msg->data_offset)) { in BCMFASTPATH()
6770 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset)); in BCMFASTPATH()
6772 else if (prot->rx_dataoffset) { in BCMFASTPATH()
6774 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset); in BCMFASTPATH()
6777 PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); in BCMFASTPATH()
6783 if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) { in BCMFASTPATH()
6784 fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref)); in BCMFASTPATH()
6785 fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) + in BCMFASTPATH()
6786 ltoh16(msg->rx_pktts.d_t2)); in BCMFASTPATH()
6799 if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { in BCMFASTPATH()
6811 ifidx = msg->cmn_hdr.if_id; in BCMFASTPATH()
6813 if (ifidx != msg->cmn_hdr.if_id) { in BCMFASTPATH()
6815 if_newidx = msg->cmn_hdr.if_id; in BCMFASTPATH()
6816 pkt_cnt--; in BCMFASTPATH()
6820 PKTSETNEXT(dhd->osh, prevpkt, pkt); in BCMFASTPATH()
6826 if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) { in BCMFASTPATH()
6835 if (dhd->prot->rx_ts_log_enabled) { in BCMFASTPATH()
6837 ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts; in BCMFASTPATH()
6840 dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse); in BCMFASTPATH()
6843 dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, in BCMFASTPATH()
6844 ts->low, ts->high, &parse); in BCMFASTPATH()
6849 PKTAUDIT(dhd->osh, pkt); in BCMFASTPATH()
6855 if (ring->rd < msg_len / item_len) in BCMFASTPATH()
6856 ring->rd = ring->max_items - msg_len / item_len; in BCMFASTPATH()
6858 ring->rd -= msg_len / item_len; in BCMFASTPATH()
6864 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
6868 nextpkt = PKTNEXT(dhd->osh, pkt); in BCMFASTPATH()
6869 PKTSETNEXT(dhd->osh, pkt, NULL); in BCMFASTPATH()
6928 if (dhd->dma_d2h_ring_upd_support) { in dhd_prot_update_txflowring()
6929 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); in dhd_prot_update_txflowring()
6933 ring->idx, flowid, ring->wr, ring->rd)); in dhd_prot_update_txflowring()
6936 dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ in dhd_prot_update_txflowring()
6949 if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl) in BCMFASTPATH()
6950 ring = dhd->prot->d2hring_hp2p_txcpl; in BCMFASTPATH()
6953 ring = &dhd->prot->d2hring_tx_cpln; in BCMFASTPATH()
6955 /* Process all the messages - DTOH direction */ in BCMFASTPATH()
6965 if (dhd->hang_was_sent) { in BCMFASTPATH()
6970 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
6975 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
6978 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
6990 __FUNCTION__, ring->name, msg_addr, msg_len)); in BCMFASTPATH()
6997 n += msg_len / ring->item_len; in BCMFASTPATH()
7007 if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) { in BCMFASTPATH()
7009 dhd->prot->txcpl_db_cnt++; in BCMFASTPATH()
7019 dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf; in BCMFASTPATH()
7024 if (trap_addr->va == NULL) { in BCMFASTPATH()
7025 DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__)); in BCMFASTPATH()
7029 OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32)); in BCMFASTPATH()
7030 data = *(uint32 *)(trap_addr->va); in BCMFASTPATH()
7033 if (dhd->db7_trap.fw_db7w_trap_inprogress) { in BCMFASTPATH()
7034 DHD_ERROR(("DB7 FW responded 0x%04x\n", data)); in BCMFASTPATH()
7041 if (dhd->extended_trap_data) { in BCMFASTPATH()
7042 OSL_CACHE_INV((void *)trap_addr->va, in BCMFASTPATH()
7044 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va, in BCMFASTPATH()
7047 if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) { in BCMFASTPATH()
7054 dhd->dongle_trap_due_to_bt = TRUE; in BCMFASTPATH()
7066 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
7067 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; in BCMFASTPATH()
7070 /* Process all the messages - DTOH direction */ in BCMFASTPATH()
7079 if (dhd->hang_was_sent) { in BCMFASTPATH()
7083 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
7087 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
7090 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
7100 __FUNCTION__, ring->name, msg_addr, msg_len)); in BCMFASTPATH()
7125 item_len = ring->item_len; in BCMFASTPATH()
7128 __FUNCTION__, ring->idx, item_len, buf_len)); in BCMFASTPATH()
7133 if (dhd->hang_was_sent) { in BCMFASTPATH()
7138 if (dhd->smmu_fault_occurred) { in BCMFASTPATH()
7146 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); in BCMFASTPATH()
7154 if ((ring->curr_rd + 1) >= ring->max_items) { in BCMFASTPATH()
7155 ring->curr_rd = 0; in BCMFASTPATH()
7157 ring->curr_rd += 1; in BCMFASTPATH()
7181 if (ring == dhd->prot->d2hring_info_cpln) { in BCMFASTPATH()
7182 if (!dhd->prot->infobufpost) { in BCMFASTPATH()
7187 dhd->prot->infobufpost--; in BCMFASTPATH()
7188 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn); in BCMFASTPATH()
7192 else if (ring == dhd->prot->d2hring_btlog_cpln) { in BCMFASTPATH()
7195 if (!dhd->prot->btlogbufpost) { in BCMFASTPATH()
7201 dhd->prot->btlogbufpost--; in BCMFASTPATH()
7202 if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) { in BCMFASTPATH()
7203 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn); in BCMFASTPATH()
7218 buf_len = buf_len - item_len; in BCMFASTPATH()
7242 uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id); in dhd_prot_ringstatus_process()
7243 uint16 status = ltoh16(ring_status->compl_hdr.status); in dhd_prot_ringstatus_process()
7244 uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id); in dhd_prot_ringstatus_process()
7247 request_id, status, ring_id, ltoh16(ring_status->write_idx))); in dhd_prot_ringstatus_process()
7249 if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) in dhd_prot_ringstatus_process()
7261 if (dhd->prot->h2dring_info_subn != NULL) { in dhd_prot_ringstatus_process()
7262 if (dhd->prot->h2dring_info_subn->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7264 dhd->prot->h2dring_info_subn->create_pending = FALSE; in dhd_prot_ringstatus_process()
7274 if (dhd->prot->d2hring_info_cpln != NULL) { in dhd_prot_ringstatus_process()
7275 if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7277 dhd->prot->d2hring_info_cpln->create_pending = FALSE; in dhd_prot_ringstatus_process()
7288 if (dhd->prot->h2dring_btlog_subn != NULL) { in dhd_prot_ringstatus_process()
7289 if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7291 dhd->prot->h2dring_btlog_subn->create_pending = FALSE; in dhd_prot_ringstatus_process()
7301 if (dhd->prot->d2hring_btlog_cpln != NULL) { in dhd_prot_ringstatus_process()
7302 if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7304 dhd->prot->d2hring_btlog_cpln->create_pending = FALSE; in dhd_prot_ringstatus_process()
7316 if (dhd->prot->d2hring_hp2p_txcpl != NULL) { in dhd_prot_ringstatus_process()
7317 if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7319 dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE; in dhd_prot_ringstatus_process()
7329 if (dhd->prot->d2hring_hp2p_rxcpl != NULL) { in dhd_prot_ringstatus_process()
7330 if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) { in dhd_prot_ringstatus_process()
7332 dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE; in dhd_prot_ringstatus_process()
7354 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, in dhd_prot_genstatus_process()
7355 gen_status->compl_hdr.flow_ring_id)); in dhd_prot_genstatus_process()
7371 uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id); in dhd_prot_ioctack_process()
7378 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, in dhd_prot_ioctack_process()
7381 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, in dhd_prot_ioctack_process()
7387 dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS(); in dhd_prot_ioctack_process()
7390 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && in dhd_prot_ioctack_process()
7391 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { in dhd_prot_ioctack_process()
7392 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; in dhd_prot_ioctack_process()
7395 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); in dhd_prot_ioctack_process()
7402 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, in dhd_prot_ioctack_process()
7403 ioct_ack->compl_hdr.flow_ring_id)); in dhd_prot_ioctack_process()
7404 if (ioct_ack->compl_hdr.status != 0) { in dhd_prot_ioctack_process()
7419 dhd_prot_t *prot = dhd->prot; in dhd_prot_ioctcmplt_process()
7433 if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) { in dhd_prot_ioctcmplt_process()
7440 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); in dhd_prot_ioctcmplt_process()
7444 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id, in dhd_prot_ioctcmplt_process()
7447 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id, in dhd_prot_ioctcmplt_process()
7453 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || in dhd_prot_ioctcmplt_process()
7454 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { in dhd_prot_ioctcmplt_process()
7456 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); in dhd_prot_ioctcmplt_process()
7463 dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS(); in dhd_prot_ioctcmplt_process()
7466 prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; in dhd_prot_ioctcmplt_process()
7482 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); in dhd_prot_ioctcmplt_process()
7483 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); in dhd_prot_ioctcmplt_process()
7484 xt_id = ltoh16(ioct_resp->trans_id); in dhd_prot_ioctcmplt_process()
7486 if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) { in dhd_prot_ioctcmplt_process()
7488 __FUNCTION__, xt_id, prot->ioctl_trans_id, in dhd_prot_ioctcmplt_process()
7489 prot->curr_ioctl_cmd, ioct_resp->cmd)); in dhd_prot_ioctcmplt_process()
7496 if (dhd->memdump_enabled) { in dhd_prot_ioctcmplt_process()
7498 dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH; in dhd_prot_ioctcmplt_process()
7517 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); in dhd_prot_ioctcmplt_process()
7519 if (prot->ioctl_resplen > 0) { in dhd_prot_ioctcmplt_process()
7521 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); in dhd_prot_ioctcmplt_process()
7523 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); in dhd_prot_ioctcmplt_process()
7539 if (prot->cur_ioctlresp_bufs_posted > 0) { in dhd_prot_ioctcmplt_process()
7540 prot->cur_ioctlresp_bufs_posted--; in dhd_prot_ioctcmplt_process()
7549 return dhd->prot->no_tx_resource; in dhd_prot_check_tx_resource()
7554 * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported)
7580 pdata = PKTDATA(dhdp->osh, pkt); in dhd_msgbuf_get_ip_info()
7581 plen = PKTLEN(dhdp->osh, pkt); in dhd_msgbuf_get_ip_info()
7587 type = ntoh16(((struct ether_header *)pdata)->ether_type); in dhd_msgbuf_get_ip_info()
7589 plen -= ETHER_HDR_LEN; in dhd_msgbuf_get_ip_info()
7610 plen -= len; in dhd_msgbuf_get_ip_info()
7612 checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip, in dhd_msgbuf_get_ip_info()
7613 sizeof(iph->src_ip) / sizeof(uint32)); in dhd_msgbuf_get_ip_info()
7614 checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip, in dhd_msgbuf_get_ip_info()
7615 sizeof(iph->dst_ip) / sizeof(uint32)); in dhd_msgbuf_get_ip_info()
7625 plen -= IPV6_MIN_HLEN; in dhd_msgbuf_get_ip_info()
7629 if (exth_len < 0 || ((plen -= exth_len) <= 0)) { in dhd_msgbuf_get_ip_info()
7634 plen -= exth_len; in dhd_msgbuf_get_ip_info()
7637 checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr, in dhd_msgbuf_get_ip_info()
7638 sizeof(ip6h->saddr) / sizeof(uint32)); in dhd_msgbuf_get_ip_info()
7639 checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr, in dhd_msgbuf_get_ip_info()
7640 sizeof(ip6h->saddr) / sizeof(uint32)); in dhd_msgbuf_get_ip_info()
7665 plen -= len; in dhd_msgbuf_get_ip_info()
7678 *tcp_seqno = tcp->seq_num; in dhd_msgbuf_get_ip_info()
7679 *tcp_ackno = tcp->ack_num; in dhd_msgbuf_get_ip_info()
7686 * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket
7712 flow_pkt_offset = flow->pkt_offset; in dhd_msgbuf_send_msg_tx_ts()
7734 ((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) { in dhd_msgbuf_send_msg_tx_ts()
7747 to_tx_info.fwts[0] = ntohl(fwts->ts[0]); in dhd_msgbuf_send_msg_tx_ts()
7748 to_tx_info.fwts[1] = ntohl(fwts->ts[1]); in dhd_msgbuf_send_msg_tx_ts()
7749 to_tx_info.fwts[2] = ntohl(fwts->ts[2]); in dhd_msgbuf_send_msg_tx_ts()
7750 to_tx_info.fwts[3] = ntohl(fwts->ts[3]); in dhd_msgbuf_send_msg_tx_ts()
7758 to_tx_info.fwts[0] = ntohl(fwts->ts[0]); in dhd_msgbuf_send_msg_tx_ts()
7759 to_tx_info.fwts[1] = ntohl(fwts->ts[1]); in dhd_msgbuf_send_msg_tx_ts()
7760 to_tx_info.fwts[2] = ntohl(fwts->ts[2]); in dhd_msgbuf_send_msg_tx_ts()
7761 to_tx_info.fwts[3] = ntohl(fwts->ts[3]); in dhd_msgbuf_send_msg_tx_ts()
7763 to_tx_info.ucts[0] = ntohl(fwts->ut[0]); in dhd_msgbuf_send_msg_tx_ts()
7764 to_tx_info.ucts[1] = ntohl(fwts->ut[1]); in dhd_msgbuf_send_msg_tx_ts()
7765 to_tx_info.ucts[2] = ntohl(fwts->ut[2]); in dhd_msgbuf_send_msg_tx_ts()
7766 to_tx_info.ucts[3] = ntohl(fwts->ut[3]); in dhd_msgbuf_send_msg_tx_ts()
7767 to_tx_info.ucts[4] = ntohl(fwts->ut[4]); in dhd_msgbuf_send_msg_tx_ts()
7769 to_tx_info.uccnt[0] = ntohl(fwts->uc[0]); in dhd_msgbuf_send_msg_tx_ts()
7770 to_tx_info.uccnt[1] = ntohl(fwts->uc[1]); in dhd_msgbuf_send_msg_tx_ts()
7771 to_tx_info.uccnt[2] = ntohl(fwts->uc[2]); in dhd_msgbuf_send_msg_tx_ts()
7772 to_tx_info.uccnt[3] = ntohl(fwts->uc[3]); in dhd_msgbuf_send_msg_tx_ts()
7773 to_tx_info.uccnt[4] = ntohl(fwts->uc[4]); in dhd_msgbuf_send_msg_tx_ts()
7774 to_tx_info.uccnt[5] = ntohl(fwts->uc[5]); in dhd_msgbuf_send_msg_tx_ts()
7775 to_tx_info.uccnt[6] = ntohl(fwts->uc[6]); in dhd_msgbuf_send_msg_tx_ts()
7776 to_tx_info.uccnt[7] = ntohl(fwts->uc[7]); in dhd_msgbuf_send_msg_tx_ts()
7784 * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket
7810 flow_pkt_offset = flow->pkt_offset; in dhd_msgbuf_send_msg_rx_ts()
7833 ((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) { in dhd_msgbuf_send_msg_rx_ts()
7852 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
7862 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; in BCMFASTPATH()
7890 if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) || in BCMFASTPATH()
7891 (dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) { in BCMFASTPATH()
7902 flowid = txstatus->compl_hdr.flow_ring_id; in BCMFASTPATH()
7906 OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight); in BCMFASTPATH()
7916 flow_ring_node->tx_cmpl = OSL_SYSUPTIME(); in BCMFASTPATH()
7919 if (dhd->prot->d2hring_hp2p_txcpl && in BCMFASTPATH()
7920 flow_ring_node->flow_info.tid == HP2P_PRIO) { in BCMFASTPATH()
7921 ring = dhd->prot->d2hring_hp2p_txcpl; in BCMFASTPATH()
7924 ring->curr_rd++; in BCMFASTPATH()
7925 if (ring->curr_rd >= ring->max_items) { in BCMFASTPATH()
7926 ring->curr_rd = 0; in BCMFASTPATH()
7931 DHD_RING_LOCK(ring->ring_lock, flags); in BCMFASTPATH()
7932 pktid = ltoh32(txstatus->cmn_hdr.request_id); in BCMFASTPATH()
7934 if (dhd->pcie_txs_metadata_enable > 1) { in BCMFASTPATH()
7936 * |<--- txstatus --->|<- metadatalen ->| in BCMFASTPATH()
7938 * | | | | |> total delay from fetch to report (8-bit 1 = 4ms) in BCMFASTPATH()
7939 * | | | |> ucode delay from enqueue to completion (8-bit 1 = 4ms) in BCMFASTPATH()
7940 * | | |> 8-bit reserved (pre-filled with original TX status by caller) in BCMFASTPATH()
7941 * | |> delay time first fetch to the last fetch (4-bit 1 = 32ms) in BCMFASTPATH()
7942 * |> fetch count (4-bit) in BCMFASTPATH()
7944 printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid, in BCMFASTPATH()
7945 ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status), in BCMFASTPATH()
7946 (txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK), in BCMFASTPATH()
7947 ((txstatus->tx_status >> 12) & 0xf), in BCMFASTPATH()
7948 ((txstatus->tx_status >> 8) & 0xf) * 32, in BCMFASTPATH()
7949 ((txstatus->tx_status_ext & 0xff) * 4), in BCMFASTPATH()
7950 ((txstatus->tx_status_ext >> 8) & 0xff) * 4); in BCMFASTPATH()
7955 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid, in BCMFASTPATH()
7960 if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) { in BCMFASTPATH()
7967 if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) && in BCMFASTPATH()
7968 (pktid == dhd->prot->hmaptest_tx_pktid)) { in BCMFASTPATH()
7970 DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status)); in BCMFASTPATH()
7972 dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa))); in BCMFASTPATH()
7973 dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE; in BCMFASTPATH()
7974 dhd->prot->hmap_tx_buf_va = NULL; in BCMFASTPATH()
7975 dhd->prot->hmap_tx_buf_len = 0; in BCMFASTPATH()
7976 PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0); in BCMFASTPATH()
7977 PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0); in BCMFASTPATH()
7978 prot->hmaptest.in_progress = FALSE; in BCMFASTPATH()
7985 dhd->pkt_metadata_buflen) { in BCMFASTPATH()
7987 meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map, in BCMFASTPATH()
7990 if (dhd->pkt_metadata_version == METADATA_VER_1) { in BCMFASTPATH()
7992 } else if (dhd->pkt_metadata_version == METADATA_VER_2) { in BCMFASTPATH()
8004 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, in BCMFASTPATH()
8007 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
8015 DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced, in BCMFASTPATH()
8022 if (dhd->memdump_enabled) { in BCMFASTPATH()
8024 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; in BCMFASTPATH()
8033 if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) { in BCMFASTPATH()
8036 prot->pktid_txq_stop_cnt--; in BCMFASTPATH()
8037 dhd->prot->no_tx_resource = FALSE; in BCMFASTPATH()
8038 dhd_bus_start_queue(dhd->bus); in BCMFASTPATH()
8041 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); in BCMFASTPATH()
8045 flow_info = &flow_ring_node->flow_info; in BCMFASTPATH()
8046 tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt); in BCMFASTPATH()
8048 if (dhd->pkt_latency > 0 && in BCMFASTPATH()
8049 tx_status_latency > (dhd->pkt_latency)) { in BCMFASTPATH()
8051 tx_status_latency, dhd->pkt_latency, in BCMFASTPATH()
8052 dhd->awdl_aw_counter)); in BCMFASTPATH()
8055 flow_info->cum_tx_status_latency += tx_status_latency; in BCMFASTPATH()
8056 flow_info->num_tx_status++; in BCMFASTPATH()
8060 if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup; in BCMFASTPATH()
8061 ifindex = flow_ring_node->flow_info.ifindex; in BCMFASTPATH()
8064 awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot]; in BCMFASTPATH()
8065 DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags); in BCMFASTPATH()
8066 awdl_stats->cum_tx_status_latency += tx_status_latency; in BCMFASTPATH()
8067 awdl_stats->num_tx_status++; in BCMFASTPATH()
8068 DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags); in BCMFASTPATH()
8073 if (dhd->host_sfhllc_supported) { in BCMFASTPATH()
8076 PKTDATA(dhd->osh, pkt), sizeof(eth))) { in BCMFASTPATH()
8077 if (dhd_8023_llc_to_ether_hdr(dhd->osh, in BCMFASTPATH()
8088 dhd->dma_stats.txdata--; in BCMFASTPATH()
8089 dhd->dma_stats.txdata_sz -= len; in BCMFASTPATH()
8092 ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK); in BCMFASTPATH()
8094 if (dhd->d11_tx_status) { in BCMFASTPATH()
8095 uint16 status = ltoh16(txstatus->compl_hdr.status) & in BCMFASTPATH()
8097 dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id), in BCMFASTPATH()
8098 pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len, in BCMFASTPATH()
8105 dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id); in BCMFASTPATH()
8111 if (dhd->pkt_metadata_buflen) { in BCMFASTPATH()
8113 if ((dhd->pkt_metadata_version == METADATA_VER_1) && in BCMFASTPATH()
8127 dhd->pkt_metadata_version); in BCMFASTPATH()
8129 } else if ((dhd->pkt_metadata_version == METADATA_VER_2) && in BCMFASTPATH()
8169 dhd->pkt_metadata_version); in BCMFASTPATH()
8174 if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) { in BCMFASTPATH()
8177 fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref)); in BCMFASTPATH()
8178 fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + in BCMFASTPATH()
8179 ltoh16(txstatus->tx_pktts.d_t2)); in BCMFASTPATH()
8180 fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + in BCMFASTPATH()
8181 ltoh16(txstatus->tx_pktts.d_t3)); in BCMFASTPATH()
8182 fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) + in BCMFASTPATH()
8183 ltoh16(txstatus->compl_hdr.tx_pktts.d_t4)); in BCMFASTPATH()
8196 if (dhd->prot->metadata_dbg && in BCMFASTPATH()
8197 dhd->prot->tx_metadata_offset && txstatus->metadata_len) { in BCMFASTPATH()
8202 PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); in BCMFASTPATH()
8203 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); in BCMFASTPATH()
8204 bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); in BCMFASTPATH()
8205 dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); in BCMFASTPATH()
8210 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) { in BCMFASTPATH()
8219 if (dhd->prot->tx_ts_log_enabled) { in BCMFASTPATH()
8221 ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts); in BCMFASTPATH()
8224 dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse); in BCMFASTPATH()
8227 dhd_timesync_log_tx_timestamp(dhd->ts, in BCMFASTPATH()
8228 txstatus->compl_hdr.flow_ring_id, in BCMFASTPATH()
8229 txstatus->cmn_hdr.if_id, in BCMFASTPATH()
8230 ts->low, ts->high, &parse); in BCMFASTPATH()
8235 PKTAUDIT(dhd->osh, pkt); in BCMFASTPATH()
8237 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, in BCMFASTPATH()
8238 txstatus->tx_status); in BCMFASTPATH()
8239 DHD_RING_UNLOCK(ring->ring_lock, flags); in BCMFASTPATH()
8242 DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced, in BCMFASTPATH()
8247 DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags); in BCMFASTPATH()
8249 __FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt))); in BCMFASTPATH()
8250 dhd->txpath_mem -= PKTLEN(dhd->osh, pkt); in BCMFASTPATH()
8251 DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags); in BCMFASTPATH()
8253 PKTFREE(dhd->osh, pkt, TRUE); in BCMFASTPATH()
8268 dhd_prot_t *prot = dhd->prot; in dhd_prot_event_process()
8272 bufid = ltoh32(evnt->cmn_hdr.request_id); in dhd_prot_event_process()
8275 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid, in dhd_prot_event_process()
8279 buflen = ltoh16(evnt->event_data_len); in dhd_prot_event_process()
8281 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); in dhd_prot_event_process()
8285 if (prot->cur_event_bufs_posted) in dhd_prot_event_process()
8286 prot->cur_event_bufs_posted--; in dhd_prot_event_process()
8299 if (dhd->prot->rx_dataoffset) in dhd_prot_event_process()
8300 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); in dhd_prot_event_process()
8303 PKTSETLEN(dhd->osh, pkt, buflen); in dhd_prot_event_process()
8305 PKTAUDIT(dhd->osh, pkt); in dhd_prot_event_process()
8307 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); in dhd_prot_event_process()
8321 pktid = ltoh32(resp->cmn_hdr.request_id); in BCMFASTPATH()
8322 buflen = ltoh16(resp->info_data_len); in BCMFASTPATH()
8325 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, in BCMFASTPATH()
8330 pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), in BCMFASTPATH()
8331 dhd->prot->rx_dataoffset)); in BCMFASTPATH()
8333 if (dhd->debug_buf_dest_support) { in BCMFASTPATH()
8334 if (resp->dest < DEBUG_BUF_DEST_MAX) { in BCMFASTPATH()
8335 dhd->debug_buf_dest_stat[resp->dest]++; in BCMFASTPATH()
8346 if (dhd->prot->rx_dataoffset) in BCMFASTPATH()
8347 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); in BCMFASTPATH()
8350 PKTSETLEN(dhd->osh, pkt, buflen); in BCMFASTPATH()
8352 PKTAUDIT(dhd->osh, pkt); in BCMFASTPATH()
8355 * special ifidx of -1. This is just internal to dhd to get the data to in BCMFASTPATH()
8358 dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1); in BCMFASTPATH()
8367 dhd_prot_t *prot = dhd->prot; in BCMFASTPATH()
8374 status = resp->compl_hdr.status; in BCMFASTPATH()
8390 prot->snapshot_upload_len = ltoh32(resp->resp_len); in BCMFASTPATH()
8391 prot->snapshot_type = resp->type; in BCMFASTPATH()
8392 prot->snapshot_cmpl_pending = FALSE; in BCMFASTPATH()
8395 __FUNCTION__, ltoh32(resp->cmn_hdr.request_id), in BCMFASTPATH()
8396 resp->cmn_hdr.flags, in BCMFASTPATH()
8397 prot->snapshot_upload_len, prot->snapshot_type)); in BCMFASTPATH()
8412 pktid = ltoh32(resp->cmn_hdr.request_id); in BCMFASTPATH()
8413 buflen = ltoh16(resp->info_data_len); in BCMFASTPATH()
8416 if (resp->compl_hdr.status != BCMPCIE_SUCCESS) { in BCMFASTPATH()
8418 __FUNCTION__, resp->compl_hdr.status)); in BCMFASTPATH()
8423 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, in BCMFASTPATH()
8428 pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), in BCMFASTPATH()
8429 dhd->prot->rx_dataoffset)); in BCMFASTPATH()
8439 if (dhd->prot->rx_dataoffset) in BCMFASTPATH()
8440 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); in BCMFASTPATH()
8443 PKTSETLEN(dhd->osh, pkt, buflen); in BCMFASTPATH()
8444 PKTSETNEXT(dhd->osh, pkt, NULL); in BCMFASTPATH()
8446 dhd_bus_rx_bt_log(dhd->bus, pkt); in BCMFASTPATH()
8457 if (dhd->prot) { in dhd_prot_stop()
8458 DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map); in dhd_prot_stop()
8459 DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map); in dhd_prot_stop()
8460 DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map); in dhd_prot_stop()
8462 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl); in dhd_prot_stop()
8468 /* Add any protocol-specific data header.
8493 dhd_prot_t *prot = dhd->prot;
8511 uint16 meta_data_buf_len = dhd->pkt_metadata_buflen;
8516 bool host_sfh_llc_reqd = dhd->host_sfhllc_supported;
8521 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
8527 if (dhd->flow_ring_table == NULL) {
8533 if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
8534 if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
8537 prot->pktid_txq_stop_cnt++;
8538 dhd_bus_stop_queue(dhd->bus);
8539 dhd->prot->no_tx_resource = TRUE;
8541 dhd->prot->pktid_depleted_cnt++;
8544 dhd->prot->pktid_depleted_cnt = 0;
8548 if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) {
8549 if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) {
8554 memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE);
8555 DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF),
8556 PKTLEN(dhd->osh, big_pktbuf)));
8557 if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE,
8558 PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) {
8565 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
8568 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
8572 * JIRA SW4349-436:
8584 if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) {
8595 * the caller re-queues this packet
8606 * so the caller would re-queue the original SKB.
8613 if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) {
8614 PKTFREE(dhd->osh, PKTBUF, TRUE);
8618 DHD_RING_LOCK(ring->ring_lock, flags);
8620 /* Create a unique 32-bit packet id */
8621 pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
8641 __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
8644 txdesc->flags = 0;
8647 pktdata = PKTDATA(dhd->osh, PKTBUF);
8648 pktlen = PKTLEN(dhd->osh, PKTBUF);
8650 /* TODO: XXX: re-look into dropped packets */
8661 if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
8662 PKTCFREE(dhd->osh, pkt_to_free, FALSE);
8665 /* Ethernet header - contains ethertype field
8668 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
8671 /* the awdl ifidx will always have a non-zero value
8674 * Hence we can check for non-zero value of awdl ifidx to
8677 if (dhd->awdl_llc_enabled &&
8678 dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) {
8683 * in FW will not be exercised - which is the
8696 * re-copying the ether header
8698 memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF),
8709 if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata,
8712 pktdata = PKTDATA(dhd->osh, PKTBUF);
8713 pktlen = PKTLEN(dhd->osh, PKTBUF);
8714 txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC;
8722 pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN;
8723 pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8726 /* Map the data pointer to a DMA-able address */
8727 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
8740 dhd->dma_stats.txdata++;
8741 dhd->dma_stats.txdata_sz += pktlen;
8744 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
8745 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
8748 if (ring->pend_items_count == 0)
8749 ring->start_addr = (void *)txdesc;
8750 ring->pend_items_count++;
8753 if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) {
8755 dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va
8756 + dhd->prot->hmaptest.offset;
8758 dhd->prot->hmap_tx_buf_len = pktlen;
8759 if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) >
8760 ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
8763 dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
8764 dhd->prot->hmaptest.in_progress = FALSE;
8767 memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen);
8768 pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va,
8769 dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0);
8771 dhd->prot->hmap_tx_buf_pa = pa;
8773 dhd->prot->hmaptest_tx_pktid = pktid;
8774 dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED;
8777 dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen));
8785 dhd->pkt_metadata_buflen) {
8787 meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len,
8795 DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map,
8806 txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF);
8807 txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF);
8808 txdesc->metadata_buf_len = meta_data_buf_len;
8815 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
8816 txdesc->cmn_hdr.if_id = ifidx;
8817 txdesc->cmn_hdr.flags = ring->current_phase;
8819 txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3;
8823 txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK <<
8825 txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) &
8830 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
8831 txdesc->seg_cnt = 1;
8833 txdesc->data_len = htol16((uint16) pktlen);
8834 txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
8835 txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
8840 PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8843 txdesc->ext_flags = 0;
8846 txdesc->rate = 0;
8848 if (!llc_inserted && dhd->prot->tx_ts_log_enabled) {
8851 dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse);
8854 if (dhd->prot->no_retry)
8855 txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY;
8856 if (dhd->prot->no_aggr)
8857 txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR;
8858 if (dhd->prot->fixed_rate)
8859 txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8866 txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8872 dhd->tx_profile_enab && dhd->num_profiles > 0)
8876 for (offset = 0; offset < dhd->num_profiles; offset++) {
8877 if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF),
8878 PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]),
8881 txdesc->rate |=
8882 (((uint8)dhd->protocol_filters[offset].profile_index) &
8886 txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE;
8895 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
8896 if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
8898 prot->tx_metadata_offset, headroom));
8900 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
8901 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
8904 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8906 meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
8907 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
8910 /* Unmap the data pointer to a DMA-able address */
8911 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
8914 ring->pend_items_count--;
8927 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8929 txdesc->metadata_buf_len = prot->tx_metadata_offset;
8930 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
8931 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
8934 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8939 if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) {
8943 txdesc->metadata_buf_len = htol16(0);
8944 txdesc->metadata_buf_addr.high_addr = 0;
8945 txdesc->metadata_buf_addr.low_addr = 0;
8950 OSL_ATOMIC_INC(dhd->osh, &ring->inflight);
8954 DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
8957 txdesc->cmn_hdr.request_id = htol32(pktid);
8959 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
8960 txdesc->cmn_hdr.request_id));
8963 PKTAUDIT(dhd->osh, PKTBUF);
8969 if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8974 if ((ring->pend_items_count == prot->txp_threshold) ||
9005 DHD_RING_UNLOCK(ring->ring_lock, flags);
9007 OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
9016 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9019 flow_ring_node->flow_info.num_tx_pkts++;
9025 if (ring->wr == 0) {
9026 ring->wr = ring->max_items - 1;
9028 ring->wr--;
9029 if (ring->wr == 0) {
9030 DHD_INFO(("%s: flipping the phase now\n", ring->name));
9031 ring->current_phase = ring->current_phase ?
9042 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
9050 if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
9051 PKTCFREE(dhd->osh, PKTBUF, FALSE);
9054 DHD_RING_UNLOCK(ring->ring_lock, flags);
9058 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9071 if (dhd->flow_ring_table == NULL) {
9075 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9077 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9079 if (ring->pend_items_count) {
9080 dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr,
9081 ring->pend_items_count);
9082 ring->pend_items_count = 0;
9083 ring->start_addr = NULL;
9099 if (dhd->flow_ring_table == NULL) {
9103 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9105 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9107 if (ring->pend_items_count) {
9109 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
9110 ring->pend_items_count);
9111 ring->pend_items_count = 0;
9112 ring->start_addr = NULL;
9113 dhd->prot->tx_h2d_db_cnt++;
9132 dhd_prot_t *prot = dhd->prot;
9134 if (prot->rxbufpost >= rxcnt) {
9135 prot->rxbufpost -= (uint16)rxcnt;
9141 prot->rxbufpost = 0;
9144 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
9146 } else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
9161 dhd_prot_t *prot = dhd->prot;
9164 uint32 len = dhd->prot->hmaptest.len;
9168 end_usec -= prot->hmaptest.start_usec;
9172 prot->hmaptest.in_progress = FALSE;
9173 if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9175 } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9180 readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset;
9181 OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9182 dhd->prot->hmaptest.mem.len);
9183 if (prot->hmaptest.is_write) {
9207 pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9208 dhd_prot_t *prot = dhd->prot;
9209 uint corerev = dhd->bus->sih->buscorerev;
9211 scratch_pa = prot->hmaptest.mem.pa;
9212 scratch_len = prot->hmaptest.mem.len;
9219 * window0 = 0 - sandbox_start
9220 * window1 = sandbox_end + 1 - 0xffffffff
9221 * window2 = 0x100000000 - 0x1fffffe00
9223 * window3 = sandbox_start - sandbox_end
9243 (uint64)(0x100000000 - w1_start)));
9247 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9249 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9251 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9256 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9259 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9262 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9264 (0x100000000 - w1_start));
9266 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9268 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9270 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9273 /* program only windows 0-2 with section1 +section2 */
9276 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9287 pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9288 uint corerev = dhd->bus->sih->buscorerev;
9291 dhd->prot->hmaptest.in_progress = FALSE;
9299 window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9302 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9306 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9308 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9310 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9326 pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9328 dhd_prot_t *prot = dhd->prot;
9329 dhd_bus_t *bus = dhd->bus;
9330 uint corerev = bus->sih->buscorerev;
9331 scratch_pa = prot->hmaptest.mem.pa;
9332 scratch_len = prot->hmaptest.mem.len;
9343 if (hmap_params->enable) {
9350 OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9351 dhd->prot->hmaptest.mem.len);
9353 window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9356 prot->hmap_enabled = nwindows ? TRUE : FALSE;
9360 DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled")));
9369 addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9371 addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9373 window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9379 addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9381 addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9383 window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9404 dhd_prot_t *prot = dhd->prot;
9408 dhd_bus_t *bus = dhd->bus;
9409 uint corerev = bus->sih->buscorerev;
9411 if (prot->hmaptest.in_progress) {
9416 prot->hmaptest.in_progress = TRUE;
9422 prot->hmaptest.accesstype = hmaptest_params->accesstype;
9423 prot->hmaptest.is_write = hmaptest_params->is_write;
9424 prot->hmaptest.len = hmaptest_params->xfer_len;
9425 prot->hmaptest.offset = hmaptest_params->host_offset;
9426 offset = prot->hmaptest.offset;
9429 prot->hmaptest.is_write, prot->hmaptest.accesstype,
9430 offset, prot->hmaptest.len, hmaptest_params->value));
9433 (uint32)PHYSADDRLO(prot->hmaptest.mem.pa),
9434 (uint32)PHYSADDRHI(prot->hmaptest.mem.pa)));
9436 if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9437 if (prot->hmaptest.is_write) {
9439 dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE;
9442 dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE;
9448 uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ));
9449 char *fillbuf = (char *)dhd->prot->hmaptest.mem.va
9452 ((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
9454 dhd->prot->hmaptest.in_progress = FALSE;
9458 if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9460 } else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9463 prot->hmaptest.in_progress = FALSE;
9478 OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9479 dhd->prot->hmaptest.mem.len);
9488 if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) {
9489 if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9494 scratch_lin = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff)
9495 | (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32);
9497 hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff));
9498 hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff));
9502 prot->hmaptest.start_usec = OSL_SYSUPTIME_US();
9513 dhd_prot_t *prot = dhd->prot;
9516 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
9521 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
9522 sizeof(tput_params->host_buf_addr));
9523 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
9531 dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR));
9534 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9551 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9556 if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
9564 dhd->wme_dp = (uint8) ltoh32(val);
9571 dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen);
9612 if ((len - slen) >= sizeof(*extcnt)) {
9614 dhd->awdl_minext = extcnt->minExt;
9621 if ((len - slen) >= sizeof(uint8)) {
9622 dhd->awdl_presmode = *((uint8 *)((char *)buf + slen));
9637 int ret = -1;
9640 if (dhd->bus->is_linkdown) {
9650 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
9651 DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
9653 dhd->busstate, dhd->hang_was_sent));
9657 if (dhd->busstate == DHD_BUS_SUSPEND) {
9670 if (dhd->bus->sih->buscorerev == 72) {
9679 if (ioc->cmd == WLC_SET_PM) {
9705 action = ioc->set;
9713 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9715 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9717 ioc->used = ret;
9727 dhd->dongle_error = ret;
9749 dhd_prot_t *prot = dhd->prot;
9756 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9762 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
9766 DHD_RING_LOCK(ring->ring_lock, flags);
9772 DHD_RING_UNLOCK(ring->ring_lock, flags);
9774 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9790 ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9791 ring->seqnum++;
9793 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
9794 ioct_rqst->msg.if_id = 0;
9795 ioct_rqst->msg.flags = ring->current_phase;
9802 DHD_RING_UNLOCK(ring->ring_lock, flags);
9805 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9817 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9818 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
9825 dhd_prot_t *prot = dhdp->prot;
9826 dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
9829 dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
9834 dmap->srcmem = &(dmaxfer->srcmem);
9835 dmap->dstmem = &(dmaxfer->dstmem);
9842 MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
9852 dhd_dma_buf_free(dhdp, dmmap->srcmem);
9853 dhd_dma_buf_free(dhdp, dmmap->dstmem);
9855 MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
9857 dhdp->bus->dmaxfer_complete = TRUE;
9872 if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
9876 if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
9877 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9881 dmaxfer->len = len;
9893 while (i < dmaxfer->len) {
9894 ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
9901 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
9903 dmaxfer->srcdelay = srcdelay;
9904 dmaxfer->destdelay = destdelay;
9912 dhd_prot_t *prot = dhd->prot;
9923 dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period);
9926 DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
9927 prot->dmaxfer.status = cmplt->compl_hdr.status;
9928 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9929 if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM &&
9930 prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM &&
9931 prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM &&
9932 prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) {
9933 err = memcmp(prot->dmaxfer.srcmem.va,
9934 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9936 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
9938 cmplt->compl_hdr.status != BCME_OK) {
9945 prot->dmaxfer.status = BCME_ERROR;
9947 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
9949 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9952 switch (prot->dmaxfer.d11_lpbk) {
9977 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9985 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9994 dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
9995 prot->dmaxfer.len, DHD_INFO_VAL);
9996 dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
9997 prot->dmaxfer.len, DHD_INFO_VAL);
10003 end_usec -= prot->dmaxfer.start_usec;
10005 prot->dmaxfer.time_taken = end_usec;
10007 prot->dmaxfer.len, (unsigned long)end_usec,
10008 (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
10010 dhd->prot->dmaxfer.in_progress = FALSE;
10013 dhd->bus->dmaxfer_complete = TRUE;
10020 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
10029 dhd_prot_t *prot = dhd->prot;
10033 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10035 /* XXX: prot->dmaxfer.in_progress is not protected by lock */
10036 if (prot->dmaxfer.in_progress) {
10043 " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
10048 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
10053 prot->dmaxfer.in_progress = TRUE;
10055 &prot->dmaxfer)) != BCME_OK) {
10056 prot->dmaxfer.in_progress = FALSE;
10058 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10062 DHD_RING_LOCK(ring->ring_lock, flags);
10067 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
10068 prot->dmaxfer.in_progress = FALSE;
10069 DHD_RING_UNLOCK(ring->ring_lock, flags);
10071 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10077 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
10078 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
10079 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10080 dmap->cmn_hdr.flags = ring->current_phase;
10081 ring->seqnum++;
10083 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
10084 dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
10085 dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
10086 dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
10087 dmap->xfer_len = htol32(prot->dmaxfer.len);
10088 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
10089 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
10090 prot->dmaxfer.d11_lpbk = d11_lpbk;
10092 dmap->host_ouput_buf_addr.high = 0x0;
10093 dmap->host_ouput_buf_addr.low = mem_addr;
10095 dmap->host_input_buf_addr.high = 0x0;
10096 dmap->host_input_buf_addr.low = mem_addr;
10098 dmap->host_ouput_buf_addr.high = 0x0;
10099 dmap->host_ouput_buf_addr.low = mem_addr;
10101 dmap->host_input_buf_addr.high = 0x0;
10102 dmap->host_input_buf_addr.low = mem_addr;
10104 dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
10106 ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
10108 prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
10113 DHD_RING_UNLOCK(ring->ring_lock, flags);
10117 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10126 dhd_prot_t *prot = dhd->prot;
10128 if (prot->dmaxfer.in_progress)
10129 result->status = DMA_XFER_IN_PROGRESS;
10130 else if (prot->dmaxfer.status == 0)
10131 result->status = DMA_XFER_SUCCESS;
10133 result->status = DMA_XFER_FAILED;
10135 result->type = prot->dmaxfer.d11_lpbk;
10136 result->error_code = prot->dmaxfer.status;
10137 result->num_bytes = prot->dmaxfer.len;
10138 result->time_taken = prot->dmaxfer.time_taken;
10139 if (prot->dmaxfer.time_taken) {
10141 result->tput =
10142 (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
10143 (uint32)prot->dmaxfer.time_taken;
10158 if (dhd->bus->is_linkdown) {
10161 return -EIO;
10164 if (dhd->busstate == DHD_BUS_DOWN) {
10166 return -EIO;
10170 if (dhd->hang_was_sent) {
10173 return -EIO;
10189 strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
10193 *(uint32 *)(uint32 *)buf = dhd->dongle_error;
10211 OSL_DISABLE_PREEMPTION(dhd->osh);
10212 dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10225 OSL_ENABLE_PREEMPTION(dhd->osh);
10228 OSL_ENABLE_PREEMPTION(dhd->osh);
10247 dhd_prot_t *prot = dhd->prot;
10248 dhd->rxcnt_timeout++;
10249 dhd->rx_ctlerrs++;
10252 dhd->is_sched_error ? " due to scheduling problem" : "",
10253 dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
10254 prot->ioctl_state, dhd->busstate, prot->ioctl_received));
10265 if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
10273 if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
10274 prot->curr_ioctl_cmd == WLC_GET_VAR) {
10277 uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
10279 strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
10280 iovbuf[sizeof(iovbuf) - 1] = '\0';
10282 prot->curr_ioctl_cmd == WLC_SET_VAR ?
10290 intstatus = si_corereg(dhd->bus->sih,
10291 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10292 if (intstatus == (uint32)-1) {
10294 dhd->bus->is_linkdown = TRUE;
10297 dhd_bus_dump_console_buffer(dhd->bus);
10308 dhd_prot_t *prot = dhd->prot;
10317 ret = -EIO;
10325 prev_stop_count = dhd->gdb_proxy_stop_count;
10326 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10327 } while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) ||
10328 (dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK)));
10331 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10335 if (prot->ioctl_received == 0) {
10336 uint32 intstatus = si_corereg(dhd->bus->sih,
10337 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10338 int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
10339 if ((intstatus) && (intstatus != (uint32)-1) &&
10347 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10349 dhdpcie_bus_clear_intstatus(dhd->bus);
10354 if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10356 if (cnt <= dhd->conf->ctrl_resched) {
10357 uint buscorerev = dhd->bus->sih->buscorerev;
10359 … intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
10360 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
10364 dhd->bus->intstatus = intstatus;
10365 dhd->bus->ipend = TRUE;
10366 dhd->bus->dpc_sched = TRUE;
10368 timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
10376 if (dhd->check_trap_rot) {
10379 dhd_bus_checkdied(dhd->bus, NULL, 0);
10381 if (dhd->dongle_trap_occured) {
10384 dhd->bus->no_cfg_restore = 1;
10387 ret = -EREMOTEIO;
10392 dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
10394 dhd->iovar_timeout_occured = TRUE;
10399 if (dhd->memdump_enabled) {
10401 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
10418 dhd->bus->no_cfg_restore = 1;
10421 ret = -ETIMEDOUT;
10424 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
10426 __FUNCTION__, prot->ioctl_received));
10427 ret = -EINVAL;
10430 dhd->rxcnt_timeout = 0;
10431 dhd->rx_ctlpkts++;
10433 __FUNCTION__, prot->ioctl_resplen));
10436 if (dhd->prot->ioctl_resplen > len)
10437 dhd->prot->ioctl_resplen = (uint16)len;
10439 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
10441 ret = (int)(dhd->prot->ioctl_status);
10445 dhd->prot->ioctl_state = 0;
10446 dhd->prot->ioctl_resplen = 0;
10447 dhd->prot->ioctl_received = IOCTL_WAIT;
10448 dhd->prot->curr_ioctl_cmd = 0;
10461 if (dhd->bus->is_linkdown) {
10464 return -EIO;
10467 if (dhd->busstate == DHD_BUS_DOWN) {
10469 return -EIO;
10473 if (dhd->hang_was_sent) {
10476 return -EIO;
10493 OSL_DISABLE_PREEMPTION(dhd->osh);
10494 dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10508 OSL_ENABLE_PREEMPTION(dhd->osh);
10512 OSL_ENABLE_PREEMPTION(dhd->osh);
10532 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
10549 if (!(dhd) || !(dhd->prot)) {
10552 prot = dhd->prot;
10555 ring = &prot->h2dring_ctrl_subn;
10559 ring = &prot->h2dring_rxp_subn;
10563 ring = &prot->d2hring_ctrl_cpln;
10567 ring = &prot->d2hring_tx_cpln;
10571 ring = &prot->d2hring_rx_cpln;
10583 if (dhd->dongle_edl_support) {
10584 ring = prot->d2hring_edl;
10588 else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
10590 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
10593 ring = prot->h2dring_info_subn;
10597 ring = prot->d2hring_info_cpln;
10619 ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
10620 ((unsigned long)(ring->max_items) * (ring->item_len)));
10626 ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
10627 ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
10650 buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10659 msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
10678 MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10692 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
10694 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
10699 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
10702 dhd->dma_h2d_ring_upd_support,
10703 dhd->dma_d2h_ring_upd_support,
10704 dhd->prot->rw_index_sz);
10705 bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10706 h2d_max_txpost, dhd->prot->h2d_max_txpost);
10708 bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n",
10709 h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost);
10711 bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
10712 bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
10713 bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
10714 bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt);
10719 bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt);
10724 dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt);
10750 dhd_prot_t *prot = dhd->prot;
10751 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10754 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10757 DHD_RING_LOCK(ring->ring_lock, flags);
10763 DHD_RING_UNLOCK(ring->ring_lock, flags);
10765 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10767 return -1;
10771 hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10772 ring->seqnum++;
10773 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
10774 hevent->msg.if_id = 0;
10775 hevent->msg.flags = ring->current_phase;
10778 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
10785 DHD_RING_UNLOCK(ring->ring_lock, flags);
10788 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10805 DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name));
10814 if (dhd->dma_d2h_ring_upd_support) {
10815 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
10817 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
10819 /* Check if ring->rd is valid */
10820 if (ring->rd >= ring->max_items) {
10821 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
10822 dhd->bus->read_shm_fail = TRUE;
10832 DHD_INFO(("%s: Ring space not available \n", ring->name));
10838 DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name));
10839 ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
10855 dhd_prot_t *prot = dhd->prot;
10861 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10863 ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10869 return -EIO;
10882 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10888 R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr));
10895 diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus)));
10900 DHD_RING_LOCK(ring->ring_lock, flags);
10902 if (prot->ioctl_state) {
10903 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
10904 DHD_RING_UNLOCK(ring->ring_lock, flags);
10906 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10910 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
10918 prot->ioctl_state = 0;
10919 prot->curr_ioctl_cmd = 0;
10920 prot->ioctl_received = IOCTL_WAIT;
10921 DHD_RING_UNLOCK(ring->ring_lock, flags);
10923 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10925 return -1;
10929 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
10930 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
10931 ioct_rqst->cmn_hdr.flags = ring->current_phase;
10932 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
10933 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10934 ring->seqnum++;
10936 ioct_rqst->cmd = htol32(cmd);
10937 prot->curr_ioctl_cmd = cmd;
10938 ioct_rqst->output_buf_len = htol16(resplen);
10939 prot->ioctl_trans_id++;
10940 ioct_rqst->trans_id = prot->ioctl_trans_id;
10943 ioct_rqst->input_buf_len = htol16(rqstlen);
10944 ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
10945 ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
10947 ioct_buf = (void *) prot->ioctbuf.va;
10949 prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
10954 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
10960 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
10961 ioct_rqst->trans_id));
10970 DHD_RING_UNLOCK(ring->ring_lock, flags);
10973 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10980 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
10981 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
10988 * returns non-zero negative error value on failure.
10996 dhd_prot_t *prot = dhd->prot;
10997 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11005 strlcpy((char *)ring->name, name, sizeof(ring->name));
11007 ring->idx = ringid;
11013 max_items = prot->h2d_htput_max_txpost;
11019 ring->max_items = max_items;
11020 ring->item_len = item_len;
11023 if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11024 /* Carve out from the contiguous DMA-able flowring buffer */
11027 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
11029 dma_buf = &ring->dma_buf;
11032 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
11034 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
11036 dma_buf->len = dma_buf_len;
11037 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
11038 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
11039 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
11042 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
11044 dma_buf->dmah = rsv_buf->dmah;
11045 dma_buf->secdma = rsv_buf->secdma;
11047 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11050 if (ring == dhd->prot->d2hring_edl) {
11054 memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
11055 dma_buf = &ring->dma_buf;
11056 if (dma_buf->va == NULL) {
11063 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
11071 dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
11073 ring->ring_lock = osl_spin_lock_init(dhd->osh);
11077 ring->name, ring->max_items, ring->item_len,
11078 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
11079 ltoh32(ring->base_addr.low_addr)));
11085 * dhd_prot_ring_init - Post the common ring information to dongle.
11096 ring->wr = 0;
11097 ring->rd = 0;
11098 ring->curr_rd = 0;
11101 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
11102 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
11103 dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
11104 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
11105 dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
11106 sizeof(uint16), RING_ITEM_LEN, ring->idx);
11108 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11109 sizeof(uint16), RING_WR_UPD, ring->idx);
11110 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11111 sizeof(uint16), RING_RD_UPD, ring->idx);
11114 ring->inited = TRUE;
11119 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
11127 dhd_dma_buf_reset(dhd, &ring->dma_buf);
11129 ring->rd = ring->wr = 0;
11130 ring->curr_rd = 0;
11131 ring->inited = FALSE;
11132 ring->create_pending = FALSE;
11136 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
11142 dhd_prot_t *prot = dhd->prot;
11143 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11146 ring->inited = FALSE;
11147 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
11149 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
11152 if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11153 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11154 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
11157 if (ring == dhd->prot->d2hring_edl) {
11161 memset(&ring->dma_buf, 0, sizeof(ring->dma_buf));
11165 dhd_dma_buf_free(dhd, &ring->dma_buf);
11169 osl_spin_lock_deinit(dhd->osh, ring->ring_lock);
11177 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
11178 return dhd->bus->max_tx_flowrings;
11180 return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
11184 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
11186 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
11189 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
11192 * attaching the DMA-able buffer.
11198 * returns non-zero negative error value on failure.
11206 dhd_prot_t *prot = dhd->prot;
11209 if (prot->h2d_flowrings_pool != NULL)
11212 ASSERT(prot->h2d_rings_total == 0);
11215 prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
11217 if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
11219 __FUNCTION__, prot->h2d_rings_total));
11229 prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
11232 if (prot->h2d_flowrings_pool == NULL) {
11238 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
11243 prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
11258 prot->h2d_rings_total = 0;
11264 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
11267 * The DMA-able buffer is not freed during reset and neither is the flowring
11271 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
11274 * This will avoid a fragmented DMA-able memory condition, if multiple
11283 dhd_prot_t *prot = dhd->prot;
11285 if (prot->h2d_flowrings_pool == NULL) {
11286 ASSERT(prot->h2d_rings_total == 0);
11293 ring->inited = FALSE;
11296 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
11300 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
11301 * DMA-able buffers for flowrings.
11302 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
11303 * de-initialization of each msgbuf_ring_t.
11311 dhd_prot_t *prot = dhd->prot;
11313 if (prot->h2d_flowrings_pool == NULL) {
11314 ASSERT(prot->h2d_rings_total == 0);
11319 /* Detach the DMA-able buffer for each flowring in the flowring pool */
11324 MFREE(prot->osh, prot->h2d_flowrings_pool,
11327 prot->h2d_rings_total = 0;
11332 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
11345 dhd_prot_t *prot = dhd->prot;
11348 ASSERT(flowid < prot->h2d_rings_total);
11349 ASSERT(prot->h2d_flowrings_pool != NULL);
11353 /* ASSERT flow_ring->inited == FALSE */
11355 ring->wr = 0;
11356 ring->rd = 0;
11357 ring->curr_rd = 0;
11358 ring->inited = TRUE;
11363 ring->current_phase = 0;
11368 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
11375 dhd_prot_t *prot = dhd->prot;
11378 ASSERT(flowid < prot->h2d_rings_total);
11379 ASSERT(prot->h2d_flowrings_pool != NULL);
11384 /* ASSERT flow_ring->inited == TRUE */
11386 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11388 ring->wr = 0;
11389 ring->rd = 0;
11390 ring->inited = FALSE;
11392 ring->curr_rd = 0;
11399 dhd_prot_t *prot = dhd->prot;
11406 flush = !!ring->pend_items_count;
11409 inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight);
11432 ASSERT(nitems <= ring->max_items);
11434 ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
11438 ((ring->max_items - ring->wr) >= nitems))) {
11440 ring->name, nitems, ring->wr, ring->rd));
11446 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
11449 if ((ring->wr + *alloced) == ring->max_items)
11450 ring->wr = 0;
11451 else if ((ring->wr + *alloced) < ring->max_items)
11452 ring->wr += *alloced;
11468 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11476 if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11481 DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11484 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11486 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11487 dhd_prot_dma_indx_set(dhd, ring->wr,
11488 H2D_DMA_INDX_WR_UPD, ring->idx);
11489 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11490 dhd_prot_dma_indx_set(dhd, ring->wr,
11491 H2D_IFRM_INDX_WR_UPD, ring->idx);
11493 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11494 sizeof(uint16), RING_WR_UPD, ring->idx);
11497 DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11503 dhd_prot_t *prot = dhd->prot;
11504 flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
11506 msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
11512 prot->agg_h2d_db_info.direct_db_cnt++;
11517 if (dhd->bus->sih) {
11518 corerev = dhd->bus->sih->buscorerev;
11526 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11528 prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11538 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
11543 * This is a non-atomic function, make sure the callers
11550 dhd_prot_t *prot = dhd->prot;
11552 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11556 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11558 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11559 dhd_prot_dma_indx_set(dhd, ring->wr,
11560 H2D_DMA_INDX_WR_UPD, ring->idx);
11561 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11562 dhd_prot_dma_indx_set(dhd, ring->wr,
11563 H2D_IFRM_INDX_WR_UPD, ring->idx);
11565 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11566 sizeof(uint16), RING_WR_UPD, ring->idx);
11571 (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
11574 if (dhd->bus->sih) {
11575 corerev = dhd->bus->sih->buscorerev;
11581 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11583 prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11592 DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11594 DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11601 DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11602 dhd->prot->mb_ring_fn(dhd->bus, value);
11603 DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11607 * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
11617 DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11623 __DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus);
11626 DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11630 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
11637 dhd_prot_t *prot = dhd->prot;
11643 * update the r -indices in the
11647 dhd_prot_dma_indx_set(dhd, ring->rd,
11648 D2H_DMA_INDX_RD_UPD, ring->idx);
11650 if (dhd->bus->sih) {
11651 corerev = dhd->bus->sih->buscorerev;
11657 prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
11658 } else if (dhd->dma_h2d_ring_upd_support) {
11659 dhd_prot_dma_indx_set(dhd, ring->rd,
11660 D2H_DMA_INDX_RD_UPD, ring->idx);
11662 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11663 sizeof(uint16), RING_RD_UPD, ring->idx);
11675 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11676 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11679 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11682 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11703 ring_to_create->create_req_id = (uint16)req_id;
11704 ring_to_create->create_pending = TRUE;
11707 d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
11708 d2h_ring->msg.if_id = 0;
11709 d2h_ring->msg.flags = ctrl_ring->current_phase;
11710 d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11711 d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
11712 DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
11713 ring_to_create->idx, max_h2d_rings));
11715 d2h_ring->ring_type = ring_type;
11716 d2h_ring->max_items = htol16(ring_to_create->max_items);
11717 d2h_ring->len_item = htol16(ring_to_create->item_len);
11718 d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11719 d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11721 d2h_ring->flags = 0;
11722 d2h_ring->msg.epoch =
11723 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11724 ctrl_ring->seqnum++;
11730 __FUNCTION__, ltoh16(d2h_ring->max_items),
11731 ltoh16(d2h_ring->len_item),
11732 ltoh16(d2h_ring->ring_id),
11733 d2h_ring->ring_ptr.low_addr,
11734 d2h_ring->ring_ptr.high_addr));
11742 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11745 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11750 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11753 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11766 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11769 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11772 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11793 ring_to_create->create_req_id = (uint16)id;
11794 ring_to_create->create_pending = TRUE;
11797 h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
11798 h2d_ring->msg.if_id = 0;
11799 h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11800 h2d_ring->msg.flags = ctrl_ring->current_phase;
11801 h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
11802 h2d_ring->ring_type = ring_type;
11803 h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
11804 h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
11805 h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
11806 h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11807 h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11809 for (i = 0; i < ring_to_create->n_completion_ids; i++) {
11810 h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
11813 h2d_ring->flags = 0;
11814 h2d_ring->msg.epoch =
11815 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11816 ctrl_ring->seqnum++;
11822 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11825 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11829 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11832 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11838 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
11847 dhd_prot_t *prot = dhd->prot;
11848 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11852 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11857 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11862 ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
11872 ASSERT(prot->rw_index_sz != 0);
11873 ptr += offset * prot->rw_index_sz;
11878 OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
11886 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
11897 dhd_prot_t *prot = dhd->prot;
11898 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11902 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11908 if (prot->h2d_dma_indx_rd_copy_buf) {
11909 ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf);
11913 ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
11920 if (prot->d2h_dma_indx_wr_copy_buf) {
11921 ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf);
11925 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
11931 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11941 ASSERT(prot->rw_index_sz != 0);
11942 ptr += offset * prot->rw_index_sz;
11944 OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
11961 dhd_prot_t *prot = dhd->prot;
11964 ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11966 OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len);
11976 dhd_prot_t *prot = dhd->prot;
11985 ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11988 ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32));
11998 dhd_prot_t *prot = dhd->prot;
12004 prot->host_seqnum, dngl_seqnum));
12005 if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) {
12006 if (prot->host_seqnum == dngl_seqnum) {
12007 memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz,
12008 prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz);
12009 memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz,
12010 prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz);
12011 dhd_prot_write_host_seqnum(dhd, prot->host_seqnum);
12014 prot->host_seqnum++;
12015 prot->host_seqnum %= D2H_EPOCH_MODULO;
12023 dhd_prot_t *prot = dhd->prot;
12027 prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12028 if (prot->d2h_dma_indx_wr_copy_buf == NULL) {
12033 prot->d2h_dma_indx_wr_copy_bufsz = buf_sz;
12037 prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12038 if (prot->h2d_dma_indx_rd_copy_buf == NULL) {
12043 prot->h2d_dma_indx_rd_copy_bufsz = buf_sz;
12071 if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
12083 dhd_prot_t *prot = dhd->prot;
12093 prot->rw_index_sz = rw_index_sz;
12099 dma_buf = &prot->h2d_dma_indx_wr_buf;
12103 dma_buf->len, rw_index_sz, length));
12107 dma_buf = &prot->h2d_dma_indx_rd_buf;
12111 dma_buf->len, rw_index_sz, length));
12115 dma_buf = &prot->d2h_dma_indx_wr_buf;
12119 dma_buf->len, rw_index_sz, length));
12123 dma_buf = &prot->d2h_dma_indx_rd_buf;
12127 dma_buf->len, rw_index_sz, length));
12131 dma_buf = &prot->h2d_ifrm_indx_wr_buf;
12135 dma_buf->len, rw_index_sz, length));
12167 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
12168 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
12171 * This is becuase ring->rd gets updated in the end of this function
12175 ring->curr_rd = ring->rd;
12178 if (dhd->dma_d2h_ring_upd_support) {
12180 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
12181 ring->wr = d2h_wr;
12183 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
12186 wr = ring->wr;
12187 rd = ring->rd;
12188 depth = ring->max_items;
12201 if (items > ring->max_items) {
12203 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
12204 __FUNCTION__, ring, ring->name, ring->max_items, items));
12206 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
12207 dhd->busstate, dhd->bus->wait_for_d3_ack));
12210 if (wr >= ring->max_items) {
12211 dhd->bus->read_shm_fail = TRUE;
12215 if (dhd->memdump_enabled) {
12217 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
12231 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
12234 if ((ring->rd + items) >= ring->max_items)
12235 ring->rd = 0;
12237 ring->rd += items;
12239 ASSERT(ring->rd < ring->max_items);
12242 *available_len = (uint32)(items * ring->item_len);
12253 * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
12260 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
12267 if (!ctrl_ring->inited) {
12273 if ((INBAND_DW_ENAB(dhd->bus)) &&
12274 (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
12284 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12293 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12299 h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
12300 h2d_mb_data->msg.flags = ctrl_ring->current_phase;
12302 h2d_mb_data->msg.epoch =
12303 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12304 ctrl_ring->seqnum++;
12307 h2d_mb_data->mail_box_data = htol32(mb_data);
12311 h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
12315 h2d_mb_data->mail_box_data = htol32(mb_data);
12324 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12342 dhd_prot_t *prot = dhd->prot;
12345 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
12346 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
12348 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
12349 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
12352 __FUNCTION__, flow_ring_node->flowid));
12357 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12360 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12367 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
12368 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
12369 __FUNCTION__, flow_ring_node->flowid));
12370 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12372 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12377 flow_ring_node->prot_info = (void *)flow_ring;
12380 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
12381 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12382 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
12383 flow_create_rqst->msg.flags = ctrl_ring->current_phase;
12385 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12386 ctrl_ring->seqnum++;
12389 flow_create_rqst->tid = flow_ring_node->flow_info.tid;
12390 flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12391 memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
12392 memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
12394 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
12395 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
12396 flow_create_rqst->max_items = htol16(flow_ring->max_items);
12397 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
12398 flow_create_rqst->if_flags = 0;
12404 if (dhd->hp2p_capable && dhd->hp2p_ring_more &&
12405 flow_ring_node->flow_info.tid == HP2P_PRIO &&
12406 (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
12407 !ETHER_ISMULTI(flow_create_rqst->da)) {
12408 flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
12409 flow_ring_node->hp2p_ring = TRUE;
12411 if (!dhd->hp2p_mf_enable) {
12412 dhd->hp2p_ring_more = FALSE;
12416 __FUNCTION__, flow_ring_node->flow_info.tid,
12417 flow_ring_node->flowid));
12425 flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
12428 " prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid,
12429 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
12430 flow_ring_node->flow_info.ifindex, flow_ring->max_items));
12433 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
12434 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12435 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12436 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
12437 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12438 H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
12440 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
12441 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
12447 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12450 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12462 ltoh16(flow_create_resp->cmplt.status),
12463 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
12465 dhd_bus_flow_ring_create_response(dhd->bus,
12466 ltoh16(flow_create_resp->cmplt.flow_ring_id),
12467 ltoh16(flow_create_resp->cmplt.status));
12476 ltoh16(resp->cmplt.status),
12477 ltoh16(resp->cmplt.ring_id),
12478 ltoh32(resp->cmn_hdr.request_id)));
12479 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
12480 (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
12484 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12485 !dhd->prot->h2dring_info_subn->create_pending) {
12489 if (dhd->prot->h2dring_btlog_subn &&
12490 dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12491 !dhd->prot->h2dring_btlog_subn->create_pending) {
12496 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12498 ltoh16(resp->cmplt.status)));
12501 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12502 dhd->prot->h2dring_info_subn->create_pending = FALSE;
12503 dhd->prot->h2dring_info_subn->inited = TRUE;
12505 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
12508 if (dhd->prot->h2dring_btlog_subn &&
12509 dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12510 dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
12511 dhd->prot->h2dring_btlog_subn->inited = TRUE;
12513 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
12524 ltoh16(resp->cmplt.status),
12525 ltoh16(resp->cmplt.ring_id),
12526 ltoh32(resp->cmn_hdr.request_id)));
12527 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
12528 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
12530 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
12531 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
12537 if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
12539 if (!dhd->dongle_edl_support)
12543 if (!dhd->prot->d2hring_info_cpln->create_pending) {
12548 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12550 ltoh16(resp->cmplt.status)));
12553 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
12554 dhd->prot->d2hring_info_cpln->inited = TRUE;
12558 if (!dhd->prot->d2hring_edl->create_pending) {
12563 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12565 ltoh16(resp->cmplt.status)));
12568 dhd->prot->d2hring_edl->create_pending = FALSE;
12569 dhd->prot->d2hring_edl->inited = TRUE;
12575 if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) {
12576 if (!dhd->prot->d2hring_btlog_cpln->create_pending) {
12581 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12583 ltoh16(resp->cmplt.status)));
12586 dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
12587 dhd->prot->d2hring_btlog_cpln->inited = TRUE;
12591 if (dhd->prot->d2hring_hp2p_txcpl &&
12592 ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
12593 if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
12598 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12600 ltoh16(resp->cmplt.status)));
12603 dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
12604 dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
12606 if (dhd->prot->d2hring_hp2p_rxcpl &&
12607 ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
12608 if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
12613 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12615 ltoh16(resp->cmplt.status)));
12618 dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
12619 dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
12631 d2h_data->d2h_mailbox_data));
12632 dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
12641 dhd_prot_t *prot = dhd->prot;
12645 host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
12647 pktid = ltoh32(host_ts_cpl->msg.request_id);
12648 if (prot->hostts_req_buf_inuse == FALSE) {
12652 prot->hostts_req_buf_inuse = FALSE;
12658 dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
12659 host_ts_cpl->cmplt.status);
12682 uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
12688 if (dhd->bus->is_linkdown) {
12693 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
12694 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
12695 if (dhd->dma_d2h_ring_upd_support) {
12697 drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx);
12698 dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12700 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
12701 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
12704 bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr,
12705 flow_ring->dma_buf.va,
12706 ltoh32(flow_ring->base_addr.high_addr),
12707 ltoh32(flow_ring->base_addr.low_addr),
12708 flow_ring->item_len, flow_ring->max_items,
12714 dhd_prot_t *prot = dhd->prot;
12716 dhd->prot->device_ipc_version,
12717 dhd->prot->host_ipc_version,
12718 dhd->prot->active_ipc_version);
12721 dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
12723 dhd->prot->max_infobufpost, dhd->prot->infobufpost);
12726 dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost);
12729 dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
12731 dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
12733 dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
12736 dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl);
12739 dhd->actual_tx_pkts, dhd->tot_txcpl);
12746 dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf,
12749 dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf,
12752 dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf,
12755 dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf,
12758 dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf,
12760 if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
12762 dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf,
12765 dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf,
12768 if (dhd->prot->d2hring_edl != NULL) {
12770 dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf,
12775 OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
12776 DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
12777 DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
12778 DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
12781 dhd_prot_ioctl_dump(dhd->prot, strbuf);
12784 dhd_dump_bus_mmio_trace(dhd->bus, strbuf);
12786 dhd_dump_bus_ds_trace(dhd->bus, strbuf);
12788 dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf);
12789 dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf);
12797 dhd_prot_t *prot = dhd->prot;
12800 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12803 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12807 DHD_RING_LOCK(ring->ring_lock, flags);
12814 DHD_RING_UNLOCK(ring->ring_lock, flags);
12815 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
12817 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12823 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
12824 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12825 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
12826 flow_delete_rqst->msg.flags = ring->current_phase;
12828 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12829 ring->seqnum++;
12832 flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12833 flow_delete_rqst->reason = htol16(BCME_OK);
12836 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
12837 flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
12838 flow_ring_node->flow_info.ifindex));
12843 DHD_RING_UNLOCK(ring->ring_lock, flags);
12846 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12855 msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
12861 __FUNCTION__, flowid, rd_idx, ring->wr));
12865 txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
12866 wr_idx = ring->wr;
12870 wr_idx--;
12872 wr_idx = ring->max_items - 1;
12874 (wr_idx * ring->item_len));
12875 txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
12886 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
12888 if (dhd->fast_delete_ring_support) {
12889 dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
12890 flow_delete_resp->read_idx);
12892 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
12893 flow_delete_resp->cmplt.status);
12904 flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
12906 dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
12907 flow_resume_resp->cmplt.status);
12918 status = flow_suspend_resp->cmplt.status;
12921 __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
12937 dhd_prot_t *prot = dhd->prot;
12940 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12943 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12947 DHD_RING_LOCK(ring->ring_lock, flags);
12953 DHD_RING_UNLOCK(ring->ring_lock, flags);
12954 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
12956 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12962 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
12963 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12964 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
12965 flow_flush_rqst->msg.flags = ring->current_phase;
12966 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12967 ring->seqnum++;
12969 flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12970 flow_flush_rqst->reason = htol16(BCME_OK);
12977 DHD_RING_UNLOCK(ring->ring_lock, flags);
12980 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12991 flow_flush_resp->cmplt.status));
12993 dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
12994 flow_flush_resp->cmplt.status);
13011 dhd_prot_t *prot = dhd->prot;
13014 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
13018 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
13022 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
13028 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13030 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13043 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
13044 ring_config_req->msg.if_id = 0;
13045 ring_config_req->msg.flags = 0;
13047 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
13048 ctrl_ring->seqnum++;
13050 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
13053 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
13054 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
13057 soft_doorbell = &prot->soft_doorbell[ring_idx];
13059 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
13060 ring_config_req->soft_doorbell.haddr.high =
13061 htol32(soft_doorbell->haddr.high);
13062 ring_config_req->soft_doorbell.haddr.low =
13063 htol32(soft_doorbell->haddr.low);
13064 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
13065 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
13068 __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
13069 ring_config_req->soft_doorbell.haddr.low,
13070 ring_config_req->soft_doorbell.value));
13072 msg_next = msg_next + ctrl_ring->item_len;
13078 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13081 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13089 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
13090 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
13091 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
13098 uint32 *ext_data = dhd->extended_trap_data;
13111 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13113 memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
13181 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13187 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13191 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13199 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13203 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13211 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13216 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13225 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13230 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13242 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13247 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13265 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13271 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13275 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13287 uint32 *ext_data = dhd->extended_trap_data;
13303 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13305 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13309 stack = (const uint32 *)tlv->data;
13316 for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
13317 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13332 remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
13335 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13353 uint32 *ext_data = dhd->extended_trap_data;
13369 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13372 remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
13375 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13378 remain_trap_data -= (tlv->len + sizeof(uint16));
13381 data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
13383 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13387 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13398 remain_trap_data -= (sizeof(uint32) * i);
13411 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13423 uint32 *ext_data = dhd->extended_trap_data;
13435 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
13449 uint32 *ext_data = dhd->extended_trap_data;
13464 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13477 base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
13484 if (hdr->len > 0) {
13485 base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
13494 MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
13505 if (!dhd || !dhd->hang_info) {
13507 dhd, (dhd ? dhd->hang_info : NULL)));
13511 if (!dhd->dongle_trap_occured) {
13525 dhd->hang_info_cnt = 0;
13526 get_debug_dump_time(dhd->debug_dump_time_hang_str);
13527 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13529 copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13530 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13533 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13535 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13541 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13542 copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13544 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13547 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13548 copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13550 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13553 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13554 copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13555 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13557 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13560 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13561 copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13563 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13574 if (!dhd || !dhd->hang_info) {
13576 dhd, (dhd ? dhd->hang_info : NULL)));
13580 if (!dhd->bus->is_linkdown) {
13585 dhd->hang_info_cnt = 0;
13587 get_debug_dump_time(dhd->debug_dump_time_hang_str);
13588 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13591 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13592 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13593 bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13595 dhd->hang_info_cnt++;
13599 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13600 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13601 bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13603 dhd->hang_info_cnt++;
13606 /* cookie - dump time stamp */
13607 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13608 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13609 bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c",
13610 dhd->debug_dump_time_hang_str, HANG_KEY_DEL);
13611 dhd->hang_info_cnt++;
13614 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13620 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13628 dhd_prot_t *prot = dhd->prot;
13636 DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
13641 DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
13642 DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
13645 DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
13647 prot->device_ipc_version,
13648 prot->host_ipc_version,
13649 prot->active_ipc_version));
13650 DHD_ERROR(("d2h_intr_method -> %s d2h_intr_control -> %s\n",
13651 dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX",
13652 dhd->bus->d2h_intr_control ? "HOST_IRQ" : "D2H_INTMASK"));
13654 prot->max_tsbufpost, prot->cur_ts_bufs_posted));
13656 prot->max_infobufpost, prot->infobufpost));
13658 prot->max_eventbufpost, prot->cur_event_bufs_posted));
13660 prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
13662 prot->max_rxbufpost, prot->rxbufpost));
13663 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13664 h2d_max_txpost, prot->h2d_max_txpost));
13666 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13667 h2d_htput_max_txpost, prot->h2d_htput_max_txpost));
13675 GET_SEC_USEC(prot->ioctl_fillup_time),
13676 GET_SEC_USEC(prot->ioctl_ack_time),
13677 GET_SEC_USEC(prot->ioctl_cmplt_time)));
13682 dhd->bus->is_linkdown = TRUE;
13685 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
13687 ring = &prot->h2dring_ctrl_subn;
13688 dma_buf_len = ring->max_items * ring->item_len;
13689 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13690 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13693 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13694 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13695 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13696 if (dhd->dma_d2h_ring_upd_support) {
13697 drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13698 dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13701 if (dhd->bus->is_linkdown) {
13705 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13706 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13709 DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13711 ring = &prot->d2hring_ctrl_cpln;
13712 dma_buf_len = ring->max_items * ring->item_len;
13713 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13714 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13717 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13718 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13719 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13720 if (dhd->dma_d2h_ring_upd_support) {
13721 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13722 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13725 if (dhd->bus->is_linkdown) {
13729 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13730 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13733 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13735 ring = prot->h2dring_info_subn;
13737 dma_buf_len = ring->max_items * ring->item_len;
13738 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13739 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13742 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13743 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13745 DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13746 if (dhd->dma_d2h_ring_upd_support) {
13747 drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13748 dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13751 if (dhd->bus->is_linkdown) {
13755 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13756 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13759 DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13761 ring = prot->d2hring_info_cpln;
13763 dma_buf_len = ring->max_items * ring->item_len;
13764 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13765 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13768 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13769 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13771 DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13772 if (dhd->dma_d2h_ring_upd_support) {
13773 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13774 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13777 if (dhd->bus->is_linkdown) {
13781 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13782 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13785 DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13788 ring = prot->d2hring_edl;
13790 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13791 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13792 dma_buf_len = ring->max_items * ring->item_len;
13795 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13796 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13798 DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13799 if (dhd->dma_d2h_ring_upd_support) {
13800 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13801 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13804 if (dhd->bus->is_linkdown) {
13808 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13809 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13813 ring->seqnum % D2H_EPOCH_MODULO));
13817 ring = &prot->d2hring_tx_cpln;
13819 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13820 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13821 dma_buf_len = ring->max_items * ring->item_len;
13824 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13825 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13827 DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13828 if (dhd->dma_d2h_ring_upd_support) {
13829 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13830 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13833 if (dhd->bus->is_linkdown) {
13837 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13838 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13841 DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13844 ring = &prot->d2hring_rx_cpln;
13846 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13847 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13848 dma_buf_len = ring->max_items * ring->item_len;
13851 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13852 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13854 DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13855 if (dhd->dma_d2h_ring_upd_support) {
13856 drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13857 dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13860 if (dhd->bus->is_linkdown) {
13864 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13865 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13868 DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13871 ring = &prot->h2dring_rxp_subn;
13873 ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13874 ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13875 dma_buf_len = ring->max_items * ring->item_len;
13878 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13879 ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13881 DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13882 if (dhd->dma_d2h_ring_upd_support) {
13883 drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13884 dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13887 if (dhd->bus->is_linkdown) {
13891 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13892 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13895 DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13899 __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
13902 __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
13905 DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
13906 DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
13907 DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
13911 dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt));
13913 dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt));
13927 if (dhd->prot->d2h_dma_indx_wr_buf.va) {
13929 uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
13931 OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
13932 dhd->prot->d2h_dma_indx_wr_buf.len);
13934 ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
13954 if (dhd->prot->h2d_dma_indx_rd_buf.va) {
13955 OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
13956 dhd->prot->h2d_dma_indx_rd_buf.len);
13958 ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
13977 dhd_prot_t *prot = dhd->prot;
13979 prot->metadata_dbg = val;
13981 return (uint32)prot->metadata_dbg;
13987 dhd_prot_t *prot = dhd->prot;
13988 return (uint32)prot->metadata_dbg;
13995 dhd_prot_t *prot = dhd->prot;
13997 prot->rx_metadata_offset = (uint16)val;
13999 prot->tx_metadata_offset = (uint16)val;
14007 dhd_prot_t *prot = dhd->prot;
14009 return prot->rx_metadata_offset;
14011 return prot->tx_metadata_offset;
14018 dhd_prot_t *prot = dhd->prot;
14020 prot->txp_threshold = (uint16)val;
14021 val = prot->txp_threshold;
14030 rxchain->pkt_count = 0;
14038 dhd_prot_t *prot = dhd->prot;
14039 rxchain_info_t *rxchain = &prot->rxchain;
14045 eh = PKTDATA(dhd->osh, pkt);
14048 if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
14049 rxchain->h_da, rxchain->h_prio))) {
14050 /* Different flow - First release the existing chain */
14056 if (rxchain->pkt_count == 0) {
14058 rxchain->pkthead = rxchain->pkttail = pkt;
14061 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
14062 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
14063 rxchain->h_prio = prio;
14064 rxchain->ifidx = ifidx;
14065 rxchain->pkt_count++;
14067 /* Same flow - keep chaining */
14068 PKTSETCLINK(rxchain->pkttail, pkt);
14069 rxchain->pkttail = pkt;
14070 rxchain->pkt_count++;
14073 if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
14074 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
14075 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
14076 PKTSETCHAINED(dhd->osh, pkt);
14077 PKTCINCRCNT(rxchain->pkthead);
14078 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
14085 if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
14093 dhd_prot_t *prot = dhd->prot;
14094 rxchain_info_t *rxchain = &prot->rxchain;
14096 if (rxchain->pkt_count == 0)
14100 dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
14114 dhd_prot_t *prot = dhd->prot;
14117 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14119 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
14120 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
14123 __FUNCTION__, flow_ring_node->flowid));
14128 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14132 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14139 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
14140 DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
14141 __FUNCTION__, flow_ring_node->flowid));
14142 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14144 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14149 flow_ring_node->prot_info = (void *)flow_ring;
14152 flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
14153 flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
14154 flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
14156 flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14157 ctrl_ring->seqnum++;
14159 flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
14161 __FUNCTION__, flow_ring_node->flowid));
14164 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
14165 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14166 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
14167 } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
14168 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14170 (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
14172 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
14173 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
14179 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14182 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14191 dhd_prot_t *prot = dhd->prot;
14195 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
14198 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14202 DHD_RING_LOCK(ring->ring_lock, flags);
14209 DHD_RING_UNLOCK(ring->ring_lock, flags);
14210 DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
14212 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14218 flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
14219 /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
14220 flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
14222 flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
14223 ring->seqnum++;
14228 flow_suspend_rqst->ring_id[index] = ringid[index];
14230 flow_suspend_rqst->num = count;
14237 DHD_RING_UNLOCK(ring->ring_lock, flags);
14240 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14251 struct dhd_prot *prot = dhd->prot;
14252 uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE;
14254 prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd;
14255 prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id;
14256 if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf)
14257 memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf,
14260 memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE);
14261 prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US();
14262 prot->ioctl_trace_count ++;
14271 dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ?
14272 prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE;
14277 bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n");
14281 prot->ioctl_trace[i].timestamp,
14282 prot->ioctl_trace[i].cmd,
14283 prot->ioctl_trace[i].transid,
14284 prot->ioctl_trace[i].ioctl_buf);
14295 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14297 bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14298 bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14299 bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14300 bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14301 bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14302 bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14303 bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14304 bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14305 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14306 bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14307 bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14308 bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14309 bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14310 bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14311 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14312 bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14313 bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14314 bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14315 bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14316 bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14317 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14318 bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14319 bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14329 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14332 bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8);
14333 bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba);
14334 bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc);
14335 bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be);
14336 bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da);
14337 bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14338 bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14339 bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14340 bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14341 bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14342 bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14343 bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14344 bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14345 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14346 bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14347 bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14348 bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14349 bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14350 bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14351 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14352 bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14353 bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14354 bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14355 bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14356 bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14357 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14358 bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14359 bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14401 ext_data = dhdp->extended_trap_data;
14404 if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
14405 bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
14417 bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
14422 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
14424 bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
14429 raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
14440 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
14442 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
14443 bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
14446 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
14448 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
14449 tr = (const trap_t *)tlv->data;
14452 tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
14454 tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
14456 tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
14459 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
14461 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
14462 stack = (const uint32 *)tlv->data;
14463 for (i = 0; i < (uint32)(tlv->len / 4); i++)
14470 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
14472 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
14473 bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
14474 bcm_bprintf(b, " error: %x\n", bpe->error);
14475 bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
14476 bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
14477 bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
14478 bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
14479 bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
14480 bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
14481 bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
14482 bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
14483 bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
14484 bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
14485 bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
14486 bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
14487 bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
14488 bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
14491 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
14495 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
14496 hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
14497 bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
14498 bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
14499 bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
14500 bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
14501 bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
14505 if (hme->heap_histogm[i] == 0xfffe)
14506 bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
14507 else if (hme->heap_histogm[i] == 0xffff)
14508 bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
14510 bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
14511 hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
14512 * hme->heap_histogm[i + 1]);
14515 bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
14517 bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
14521 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
14525 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
14526 pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
14527 bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
14528 bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
14531 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
14535 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
14536 wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
14537 bcm_bprintf(b, " instance: %d\n", wsme->instance);
14538 bcm_bprintf(b, " associated: %d\n", wsme->associated);
14539 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14540 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14541 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14542 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14543 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14544 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14546 if (tlv->len >= (sizeof(*wsme) * 2)) {
14548 bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
14549 bcm_bprintf(b, " associated: %d\n", wsme->associated);
14550 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14551 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14552 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14553 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14554 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14555 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14559 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
14562 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
14563 phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
14564 bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
14565 bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
14566 bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
14567 bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
14568 bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
14569 bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
14570 bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
14571 bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
14572 bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
14573 bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
14574 bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
14575 bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
14576 bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
14577 bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
14578 bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
14579 bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
14580 bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
14581 bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
14582 bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
14583 bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
14584 bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
14585 bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
14586 bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
14587 bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
14588 bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
14589 bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
14590 bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
14592 bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
14595 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
14599 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
14600 psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data;
14601 bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
14602 bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
14603 bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
14604 bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
14605 bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
14606 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
14607 if (psmwd->version == 1) {
14610 if (psmwd->version == 2) {
14615 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH);
14618 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len);
14619 phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data;
14620 bcm_bprintf(b, " version: 0x%x\n", phytxerr->version);
14621 bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason);
14622 bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E);
14623 bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640);
14624 bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642);
14625 bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846);
14626 bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848);
14627 bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a);
14628 bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a);
14629 bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c);
14630 bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856);
14631 bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858);
14632 bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490);
14633 bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8);
14634 bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason);
14635 bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0);
14636 bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1);
14637 bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2);
14638 bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0);
14639 bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1);
14640 bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0);
14641 bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1);
14642 bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2);
14643 bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0);
14644 bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1);
14645 bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst);
14646 bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm);
14647 bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel);
14648 bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos);
14649 bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf);
14650 bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval);
14651 bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29);
14652 bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a);
14654 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
14657 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
14658 macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data;
14659 bcm_bprintf(b, " version: %d\n", macsusp->version);
14660 bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
14661 bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
14662 bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
14663 bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
14665 bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
14667 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
14668 bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
14669 bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
14670 bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
14671 bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
14672 bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
14673 bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
14674 bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
14675 bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
14676 bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
14677 bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
14678 bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
14679 bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
14680 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
14681 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
14684 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
14687 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
14688 macwake = (const hnd_ext_trap_macenab_t *)tlv->data;
14689 bcm_bprintf(b, " version: 0x%x\n", macwake->version);
14690 bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
14691 bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
14692 bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
14693 bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
14695 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
14696 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
14697 bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
14698 bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
14699 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
14700 bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
14701 bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
14702 bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
14703 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
14704 bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
14705 bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
14706 bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
14707 bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
14708 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
14711 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
14714 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
14715 hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
14716 bcm_bprintf(b, " version: 0x%x\n", hc->version);
14717 bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
14718 bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
14719 bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
14720 bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
14722 bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
14725 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
14728 hmap = (const pcie_hmapviolation_t *)tlv->data;
14729 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
14730 bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
14731 bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
14732 bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
14735 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP);
14738 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len);
14739 fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data;
14740 bcm_bprintf(b, " version: %d\n", fbit->version);
14741 bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time);
14752 dhd_prot_t *prot = dhdp->prot;
14757 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14762 return -1;
14766 if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14770 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14773 if (prot->hostts_req_buf_inuse == TRUE) {
14775 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14777 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14779 return -1;
14787 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14789 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14791 return -1;
14795 ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
14796 ts_req->msg.if_id = 0;
14797 ts_req->msg.flags = ctrl_ring->current_phase;
14798 ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
14800 ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14801 ctrl_ring->seqnum++;
14803 ts_req->xt_id = xt_id;
14804 ts_req->seqnum = seqnum;
14806 ts_req->input_data_len = htol16(tlv_len);
14807 ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
14808 ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
14810 ts_tlv_buf = (void *) prot->hostts_req_buf.va;
14811 prot->hostts_req_buf_inuse = TRUE;
14814 OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
14821 ts_req->msg.request_id, ts_req->input_data_len,
14822 ts_req->xt_id, ts_req->seqnum));
14828 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14831 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14840 dhd->prot->tx_ts_log_enabled = enable;
14842 return dhd->prot->tx_ts_log_enabled;
14849 dhd->prot->rx_ts_log_enabled = enable;
14851 return dhd->prot->rx_ts_log_enabled;
14858 dhd->prot->no_retry = enable;
14860 return dhd->prot->no_retry;
14867 dhd->prot->no_aggr = enable;
14869 return dhd->prot->no_aggr;
14876 dhd->prot->fixed_rate = enable;
14878 return dhd->prot->fixed_rate;
14885 dhd_prot_t *prot = dhd->prot;
14887 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
14888 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
14894 if (dhd->prot->max_tsbufpost > 0)
14908 pktid = ltoh32(resp->msg.request_id);
14909 buflen = ltoh16(resp->buf_len);
14910 seqnum = ltoh16(resp->seqnum);
14913 DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
14918 pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
14920 if (!dhd->prot->cur_ts_bufs_posted) {
14925 dhd->prot->cur_ts_bufs_posted--;
14928 if (dhd->prot->max_tsbufpost > 0) {
14940 PKTSETLEN(dhd->osh, pkt, buflen);
14941 dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
14943 PKTFREE_STATIC(dhd->osh, pkt, TRUE);
14945 PKTFREE(dhd->osh, pkt, TRUE);
14956 return dhdp->prot->ioctl_trans_id;
14964 dhd_prot_t *prot = dhdp->prot;
14965 dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf;
14969 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14972 if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14976 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14984 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14986 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14992 snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD;
14993 snap_req->cmn_hdr.if_id = 0;
14994 snap_req->cmn_hdr.flags = ctrl_ring->current_phase;
14995 snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID;
14996 snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14997 ctrl_ring->seqnum++;
15000 snap_req->snapshot_buf_len = htol32(dma_buf->len);
15001 snap_req->snapshot_type = snapshot_type;
15002 snap_req->snapshot_param = snapshot_param;
15003 snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa));
15004 snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa));
15006 if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) {
15011 memset(dma_buf->va, 0, dma_buf->len);
15012 prot->snapshot_upload_len = 0;
15013 prot->snapshot_type = snapshot_type;
15014 prot->snapshot_cmpl_pending = TRUE;
15017 snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len,
15018 snap_req->snapshot_type, snap_req->snapshot_param));
15024 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
15027 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
15038 dhd_prot_t *prot = dhdp->prot;
15039 uint8 *buf = prot->snapshot_upload_buf.va;
15040 uint8 *buf_end = buf + prot->snapshot_upload_len;
15044 if (prot->snapshot_type != snapshot_type) {
15049 if (prot->snapshot_cmpl_pending) {
15059 copy_size = MIN(dst_buf_size, buf_end - (buf + offset));
15064 *is_more = (offset + copy_size < prot->snapshot_upload_len) ?
15073 if (!dhd->hscb_enable) {
15083 *va = dhd->prot->host_scb_buf.va;
15086 *len = dhd->prot->host_scb_buf.len;
15095 if (!dhd->hscb_enable) {
15099 if (dhd->prot->host_scb_buf.va == NULL ||
15100 ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
15104 memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
15115 dhd->pkt_thresh = (uint16)val;
15117 val = dhd->pkt_thresh;
15126 dhd->time_thresh = (uint16)val;
15128 val = dhd->time_thresh;
15137 dhd->pkt_expiry = (uint16)val;
15139 val = dhd->pkt_expiry;
15149 dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
15150 dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
15158 ret = dhd->hp2p_infra_enable ? 0x1:0x0;
15160 ret |= dhd->hp2p_enable ? 0x1:0x0;
15168 ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
15172 hp2p_info = &dhd->hp2p_info[0];
15173 dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
15175 if (dur1 > (MAX_RX_HIST_BIN - 1)) {
15176 dur1 = MAX_RX_HIST_BIN - 1;
15178 __FUNCTION__, ts->low, ts->high));
15181 hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
15188 ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
15189 uint16 flowid = txstatus->compl_hdr.flow_ring_id;
15193 hp2p_flowid = dhd->bus->max_submission_rings -
15194 dhd->bus->max_cmn_rings - flowid + 1;
15195 hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15196 ts = (ts_timestamp_t *)&(txstatus->ts);
15198 dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15199 if (dur1 > (MAX_TX_HIST_BIN - 1)) {
15200 dur1 = MAX_TX_HIST_BIN - 1;
15201 DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15203 hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
15205 dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15206 if (dur2 > (MAX_TX_HIST_BIN - 1)) {
15207 dur2 = MAX_TX_HIST_BIN - 1;
15208 DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15211 hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
15225 dhdp = hp2p_info->dhd_pub;
15231 __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
15232 hp2p_info->flowid));
15236 dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
15237 hp2p_info->hrtimer_init = FALSE;
15238 hp2p_info->num_timer_limit++;
15251 hp2p_flowid = dhd->bus->max_submission_rings -
15252 dhd->bus->max_cmn_rings - flowid + 1;
15253 hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15255 if (ring->pend_items_count == dhd->pkt_thresh) {
15258 hp2p_info->hrtimer_init = FALSE;
15259 hp2p_info->ring = NULL;
15260 hp2p_info->num_pkt_limit++;
15261 hrtimer_cancel(&hp2p_info->timer);
15265 __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
15267 if (hp2p_info->hrtimer_init == FALSE) {
15268 hp2p_info->hrtimer_init = TRUE;
15269 hp2p_info->flowid = flowid;
15270 hp2p_info->dhd_pub = dhd;
15271 hp2p_info->ring = ring;
15272 hp2p_info->num_timer_start++;
15274 hrtimer_start(&hp2p_info->timer,
15275 ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
15292 txdesc->metadata_buf_len = 0;
15293 txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
15294 txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
15295 txdesc->exp_time = dhd->pkt_expiry;
15298 __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
15299 txdesc->metadata_buf_addr.low_addr,
15300 txdesc->exp_time));
15311 OSL_DMA_MAP_DUMP(dhdp->osh);
15316 dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
15318 dhdp->memdump_enabled = DUMP_MEMFILE;
15321 dhdp->memdump_enabled = DUMP_MEMONLY;
15334 dhd_prot_t *prot = bus->dhd->prot;
15335 uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE;
15336 uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE;
15338 bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n",
15343 if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15346 if (prot->d2hring_edl != NULL) {
15351 bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t",
15363 if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15364 bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t",
15370 if (prot->d2hring_edl != NULL) {
15371 bcm_bprintf(strbuf, "%6u-%u",
15378 bcm_bprintf(strbuf, "--------------------------\n");
15386 dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ?
15387 bus->frs_isr_count : FRS_TRACE_SIZE;
15392 dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace,
15393 dumpsz, "ISR FLOW RING TRACE DRD-DWR");
15401 dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ?
15402 bus->frs_dpc_count : FRS_TRACE_SIZE;
15407 dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace,
15408 dumpsz, "DPC FLOW RING TRACE DRD-DWR");
15413 dhd_prot_t *prot = dhd->prot;
15416 ring = &prot->h2dring_ctrl_subn;
15417 frs_trace->h2d_ctrl_post_drd =
15418 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15419 frs_trace->h2d_ctrl_post_dwr =
15420 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15422 ring = &prot->d2hring_ctrl_cpln;
15423 frs_trace->d2h_ctrl_cpln_drd =
15424 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15425 frs_trace->d2h_ctrl_cpln_dwr =
15426 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15428 ring = &prot->h2dring_rxp_subn;
15429 frs_trace->h2d_rx_post_drd =
15430 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15431 frs_trace->h2d_rx_post_dwr =
15432 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15434 ring = &prot->d2hring_rx_cpln;
15435 frs_trace->d2h_rx_cpln_drd =
15436 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15437 frs_trace->d2h_rx_cpln_dwr =
15438 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15440 ring = &prot->d2hring_tx_cpln;
15441 frs_trace->d2h_tx_cpln_drd =
15442 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15443 frs_trace->d2h_tx_cpln_dwr =
15444 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15446 if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
15447 ring = prot->h2dring_info_subn;
15448 frs_trace->h2d_info_post_drd =
15449 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15450 frs_trace->h2d_info_post_dwr =
15451 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15453 ring = prot->d2hring_info_cpln;
15454 frs_trace->d2h_info_cpln_drd =
15455 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15456 frs_trace->d2h_info_cpln_dwr =
15457 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15459 if (prot->d2hring_edl != NULL) {
15460 ring = prot->d2hring_edl;
15461 frs_trace->d2h_ring_edl_drd =
15462 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15463 frs_trace->d2h_ring_edl_dwr =
15464 dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15472 uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE;
15473 dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt];
15477 if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15483 frs_isr_trace->timestamp = OSL_LOCALTIME_NS();
15484 dhd->bus->frs_isr_count ++;
15490 uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE;
15491 dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt];
15495 if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15501 frs_dpc_trace->timestamp = OSL_LOCALTIME_NS();
15502 dhd->bus->frs_dpc_count ++;