Lines Matching refs:sw_ring
110 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ member
182 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ member
396 struct igb_tx_entry *sw_ring; in eth_igb_xmit_pkts() local
419 sw_ring = txq->sw_ring; in eth_igb_xmit_pkts()
422 txe = &sw_ring[tx_id]; in eth_igb_xmit_pkts()
499 tx_end = sw_ring[tx_last].last_id; in eth_igb_xmit_pkts()
505 tx_end = sw_ring[tx_end].next_id; in eth_igb_xmit_pkts()
510 tx_end = sw_ring[tx_end].last_id; in eth_igb_xmit_pkts()
561 txn = &sw_ring[txe->next_id]; in eth_igb_xmit_pkts()
584 txn = &sw_ring[txe->next_id]; in eth_igb_xmit_pkts()
830 struct igb_rx_entry *sw_ring; in eth_igb_recv_pkts() local
849 sw_ring = rxq->sw_ring; in eth_igb_recv_pkts()
907 rxe = &sw_ring[rx_id]; in eth_igb_recv_pkts()
913 rte_igb_prefetch(sw_ring[rx_id].mbuf); in eth_igb_recv_pkts()
922 rte_igb_prefetch(&sw_ring[rx_id]); in eth_igb_recv_pkts()
1015 struct igb_rx_entry *sw_ring; in eth_igb_recv_scattered_pkts() local
1036 sw_ring = rxq->sw_ring; in eth_igb_recv_scattered_pkts()
1098 rxe = &sw_ring[rx_id]; in eth_igb_recv_scattered_pkts()
1104 rte_igb_prefetch(sw_ring[rx_id].mbuf); in eth_igb_recv_scattered_pkts()
1113 rte_igb_prefetch(&sw_ring[rx_id]); in eth_igb_recv_scattered_pkts()
1283 if (txq->sw_ring != NULL) { in igb_tx_queue_release_mbufs()
1285 if (txq->sw_ring[i].mbuf != NULL) { in igb_tx_queue_release_mbufs()
1286 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); in igb_tx_queue_release_mbufs()
1287 txq->sw_ring[i].mbuf = NULL; in igb_tx_queue_release_mbufs()
1298 rte_free(txq->sw_ring); in igb_tx_queue_release()
1313 struct igb_tx_entry *sw_ring; in igb_tx_done_cleanup() local
1324 sw_ring = txq->sw_ring; in igb_tx_done_cleanup()
1335 tx_first = sw_ring[txq->tx_tail].last_id; in igb_tx_done_cleanup()
1338 tx_first = sw_ring[tx_first].next_id; in igb_tx_done_cleanup()
1348 tx_last = sw_ring[tx_id].last_id; in igb_tx_done_cleanup()
1350 if (sw_ring[tx_last].mbuf) { in igb_tx_done_cleanup()
1359 tx_next = sw_ring[tx_last].next_id; in igb_tx_done_cleanup()
1365 if (sw_ring[tx_id].mbuf) { in igb_tx_done_cleanup()
1367 sw_ring[tx_id].mbuf); in igb_tx_done_cleanup()
1368 sw_ring[tx_id].mbuf = NULL; in igb_tx_done_cleanup()
1369 sw_ring[tx_id].last_id = tx_id; in igb_tx_done_cleanup()
1373 tx_id = sw_ring[tx_id].next_id; in igb_tx_done_cleanup()
1406 tx_id = sw_ring[tx_id].next_id; in igb_tx_done_cleanup()
1408 if (sw_ring[tx_id].mbuf) in igb_tx_done_cleanup()
1416 if (!sw_ring[tx_id].mbuf) in igb_tx_done_cleanup()
1444 struct igb_tx_entry *txe = txq->sw_ring; in igb_reset_tx_queue()
1597 txq->sw_ring = rte_zmalloc("txq->sw_ring", in eth_igb_tx_queue_setup()
1600 if (txq->sw_ring == NULL) { in eth_igb_tx_queue_setup()
1605 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); in eth_igb_tx_queue_setup()
1621 if (rxq->sw_ring != NULL) { in igb_rx_queue_release_mbufs()
1623 if (rxq->sw_ring[i].mbuf != NULL) { in igb_rx_queue_release_mbufs()
1624 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); in igb_rx_queue_release_mbufs()
1625 rxq->sw_ring[i].mbuf = NULL; in igb_rx_queue_release_mbufs()
1636 rte_free(rxq->sw_ring); in igb_rx_queue_release()
1800 rxq->sw_ring = rte_zmalloc("rxq->sw_ring", in eth_igb_rx_queue_setup()
1803 if (rxq->sw_ring == NULL) { in eth_igb_rx_queue_setup()
1808 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); in eth_igb_rx_queue_setup()
2277 struct igb_rx_entry *rxe = rxq->sw_ring; in igb_alloc_rx_queue_mbufs()