Lines Matching refs:txq

258 igbe_set_xmit_ctx(struct igb_tx_queue* txq,  in igbe_set_xmit_ctx()  argument
268 ctx_curr = txq->ctx_curr; in igbe_set_xmit_ctx()
269 ctx_idx = ctx_curr + txq->ctx_start; in igbe_set_xmit_ctx()
328 txq->ctx_cache[ctx_curr].flags = ol_flags; in igbe_set_xmit_ctx()
329 txq->ctx_cache[ctx_curr].tx_offload.data = in igbe_set_xmit_ctx()
331 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask; in igbe_set_xmit_ctx()
345 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags, in what_advctx_update() argument
349 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && in what_advctx_update()
350 (txq->ctx_cache[txq->ctx_curr].tx_offload.data == in what_advctx_update()
351 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { in what_advctx_update()
352 return txq->ctx_curr; in what_advctx_update()
356 txq->ctx_curr ^= 1; in what_advctx_update()
357 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && in what_advctx_update()
358 (txq->ctx_cache[txq->ctx_curr].tx_offload.data == in what_advctx_update()
359 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { in what_advctx_update()
360 return txq->ctx_curr; in what_advctx_update()
395 struct igb_tx_queue *txq; in eth_igb_xmit_pkts() local
418 txq = tx_queue; in eth_igb_xmit_pkts()
419 sw_ring = txq->sw_ring; in eth_igb_xmit_pkts()
420 txr = txq->tx_ring; in eth_igb_xmit_pkts()
421 tx_id = txq->tx_tail; in eth_igb_xmit_pkts()
452 ctx = what_advctx_update(txq, tx_ol_req, tx_offload); in eth_igb_xmit_pkts()
455 ctx = txq->ctx_curr + txq->ctx_start; in eth_igb_xmit_pkts()
458 if (tx_last >= txq->nb_tx_desc) in eth_igb_xmit_pkts()
459 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); in eth_igb_xmit_pkts()
463 (unsigned) txq->port_id, in eth_igb_xmit_pkts()
464 (unsigned) txq->queue_id, in eth_igb_xmit_pkts()
542 cmd_type_len = txq->txd_type | in eth_igb_xmit_pkts()
569 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload); in eth_igb_xmit_pkts()
626 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); in eth_igb_xmit_pkts()
628 (unsigned) txq->port_id, (unsigned) txq->queue_id, in eth_igb_xmit_pkts()
630 txq->tx_tail = tx_id; in eth_igb_xmit_pkts()
1279 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) in igb_tx_queue_release_mbufs() argument
1283 if (txq->sw_ring != NULL) { in igb_tx_queue_release_mbufs()
1284 for (i = 0; i < txq->nb_tx_desc; i++) { in igb_tx_queue_release_mbufs()
1285 if (txq->sw_ring[i].mbuf != NULL) { in igb_tx_queue_release_mbufs()
1286 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); in igb_tx_queue_release_mbufs()
1287 txq->sw_ring[i].mbuf = NULL; in igb_tx_queue_release_mbufs()
1294 igb_tx_queue_release(struct igb_tx_queue *txq) in igb_tx_queue_release() argument
1296 if (txq != NULL) { in igb_tx_queue_release()
1297 igb_tx_queue_release_mbufs(txq); in igb_tx_queue_release()
1298 rte_free(txq->sw_ring); in igb_tx_queue_release()
1299 rte_memzone_free(txq->mz); in igb_tx_queue_release()
1300 rte_free(txq); in igb_tx_queue_release()
1311 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt) in igb_tx_done_cleanup() argument
1321 if (!txq) in igb_tx_done_cleanup()
1324 sw_ring = txq->sw_ring; in igb_tx_done_cleanup()
1325 txr = txq->tx_ring; in igb_tx_done_cleanup()
1335 tx_first = sw_ring[txq->tx_tail].last_id; in igb_tx_done_cleanup()
1425 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt) in eth_igb_tx_done_cleanup() argument
1427 return igb_tx_done_cleanup(txq, free_cnt); in eth_igb_tx_done_cleanup()
1431 igb_reset_tx_queue_stat(struct igb_tx_queue *txq) in igb_reset_tx_queue_stat() argument
1433 txq->tx_head = 0; in igb_reset_tx_queue_stat()
1434 txq->tx_tail = 0; in igb_reset_tx_queue_stat()
1435 txq->ctx_curr = 0; in igb_reset_tx_queue_stat()
1436 memset((void*)&txq->ctx_cache, 0, in igb_reset_tx_queue_stat()
1441 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) in igb_reset_tx_queue() argument
1444 struct igb_tx_entry *txe = txq->sw_ring; in igb_reset_tx_queue()
1450 for (i = 0; i < txq->nb_tx_desc; i++) { in igb_reset_tx_queue()
1451 txq->tx_ring[i] = zeroed_desc; in igb_reset_tx_queue()
1455 prev = (uint16_t)(txq->nb_tx_desc - 1); in igb_reset_tx_queue()
1456 for (i = 0; i < txq->nb_tx_desc; i++) { in igb_reset_tx_queue()
1457 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]); in igb_reset_tx_queue()
1466 txq->txd_type = E1000_ADVTXD_DTYP_DATA; in igb_reset_tx_queue()
1469 txq->ctx_start = txq->queue_id * IGB_CTX_NUM; in igb_reset_tx_queue()
1471 igb_reset_tx_queue_stat(txq); in igb_reset_tx_queue()
1509 struct igb_tx_queue *txq; in eth_igb_tx_queue_setup() local
1552 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), in eth_igb_tx_queue_setup()
1554 if (txq == NULL) in eth_igb_tx_queue_setup()
1566 igb_tx_queue_release(txq); in eth_igb_tx_queue_setup()
1570 txq->mz = tz; in eth_igb_tx_queue_setup()
1571 txq->nb_tx_desc = nb_desc; in eth_igb_tx_queue_setup()
1572 txq->pthresh = tx_conf->tx_thresh.pthresh; in eth_igb_tx_queue_setup()
1573 txq->hthresh = tx_conf->tx_thresh.hthresh; in eth_igb_tx_queue_setup()
1574 txq->wthresh = tx_conf->tx_thresh.wthresh; in eth_igb_tx_queue_setup()
1575 if (txq->wthresh > 0 && hw->mac.type == e1000_82576) in eth_igb_tx_queue_setup()
1576 txq->wthresh = 1; in eth_igb_tx_queue_setup()
1577 txq->queue_id = queue_idx; in eth_igb_tx_queue_setup()
1578 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? in eth_igb_tx_queue_setup()
1580 txq->port_id = dev->data->port_id; in eth_igb_tx_queue_setup()
1582 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); in eth_igb_tx_queue_setup()
1583 txq->tx_ring_phys_addr = tz->iova; in eth_igb_tx_queue_setup()
1585 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; in eth_igb_tx_queue_setup()
1588 txq->tx_ring_phys_addr = igb_gbd_addr_t_p[index]; in eth_igb_tx_queue_setup()
1589 txq->tx_ring = (union e1000_adv_tx_desc *)igb_gbd_addr_t_v[index]; in eth_igb_tx_queue_setup()
1593 txq->tx_ring_phys_addr, in eth_igb_tx_queue_setup()
1594 txq->tx_ring); in eth_igb_tx_queue_setup()
1597 txq->sw_ring = rte_zmalloc("txq->sw_ring", in eth_igb_tx_queue_setup()
1600 if (txq->sw_ring == NULL) { in eth_igb_tx_queue_setup()
1601 igb_tx_queue_release(txq); in eth_igb_tx_queue_setup()
1605 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); in eth_igb_tx_queue_setup()
1607 igb_reset_tx_queue(txq, dev); in eth_igb_tx_queue_setup()
1610 dev->data->tx_queues[queue_idx] = txq; in eth_igb_tx_queue_setup()
1611 txq->offloads = offloads; in eth_igb_tx_queue_setup()
1866 struct igb_tx_queue *txq = tx_queue; in eth_igb_tx_descriptor_status() local
1870 if (unlikely(offset >= txq->nb_tx_desc)) in eth_igb_tx_descriptor_status()
1873 desc = txq->tx_tail + offset; in eth_igb_tx_descriptor_status()
1874 if (desc >= txq->nb_tx_desc) in eth_igb_tx_descriptor_status()
1875 desc -= txq->nb_tx_desc; in eth_igb_tx_descriptor_status()
1877 status = &txq->tx_ring[desc].wb.status; in eth_igb_tx_descriptor_status()
1888 struct igb_tx_queue *txq; in igb_dev_clear_queues() local
1892 txq = dev->data->tx_queues[i]; in igb_dev_clear_queues()
1893 if (txq != NULL) { in igb_dev_clear_queues()
1894 igb_tx_queue_release_mbufs(txq); in igb_dev_clear_queues()
1895 igb_reset_tx_queue(txq, dev); in igb_dev_clear_queues()
2617 struct igb_tx_queue *txq; in eth_igb_tx_init() local
2627 txq = dev->data->tx_queues[i]; in eth_igb_tx_init()
2628 bus_addr = txq->tx_ring_phys_addr; in eth_igb_tx_init()
2630 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx), in eth_igb_tx_init()
2631 txq->nb_tx_desc * in eth_igb_tx_init()
2633 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx), in eth_igb_tx_init()
2635 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr); in eth_igb_tx_init()
2638 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0); in eth_igb_tx_init()
2639 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0); in eth_igb_tx_init()
2642 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx)); in eth_igb_tx_init()
2643 txdctl |= txq->pthresh & 0x1F; in eth_igb_tx_init()
2644 txdctl |= ((txq->hthresh & 0x1F) << 8); in eth_igb_tx_init()
2645 txdctl |= ((txq->wthresh & 0x1F) << 16); in eth_igb_tx_init()
2647 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl); in eth_igb_tx_init()
2814 struct igb_tx_queue *txq; in eth_igbvf_tx_init() local
2824 txq = dev->data->tx_queues[i]; in eth_igbvf_tx_init()
2825 bus_addr = txq->tx_ring_phys_addr; in eth_igbvf_tx_init()
2827 txq->nb_tx_desc * in eth_igbvf_tx_init()
2839 txdctl |= txq->pthresh & 0x1F; in eth_igbvf_tx_init()
2840 txdctl |= ((txq->hthresh & 0x1F) << 8); in eth_igbvf_tx_init()
2851 txdctl |= ((txq->wthresh & 0x1F) << 16); in eth_igbvf_tx_init()
2879 struct igb_tx_queue *txq; in igb_txq_info_get() local
2881 txq = dev->data->tx_queues[queue_id]; in igb_txq_info_get()
2883 qinfo->nb_desc = txq->nb_tx_desc; in igb_txq_info_get()
2885 qinfo->conf.tx_thresh.pthresh = txq->pthresh; in igb_txq_info_get()
2886 qinfo->conf.tx_thresh.hthresh = txq->hthresh; in igb_txq_info_get()
2887 qinfo->conf.tx_thresh.wthresh = txq->wthresh; in igb_txq_info_get()
2888 qinfo->conf.offloads = txq->offloads; in igb_txq_info_get()