Lines Matching refs:txq
38 struct hfi1_ipoib_txq *txq; member
46 struct hfi1_ipoib_txq *txq; member
58 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument
60 return hfi1_ipoib_txreqs(txq->sent_txreqs, in hfi1_ipoib_used()
61 atomic64_read(&txq->complete_txreqs)); in hfi1_ipoib_used()
64 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument
66 if (atomic_inc_return(&txq->stops) == 1) in hfi1_ipoib_stop_txq()
67 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
70 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument
72 if (atomic_dec_and_test(&txq->stops)) in hfi1_ipoib_wake_txq()
73 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
76 static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_hwat() argument
78 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_hwat()
79 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat()
82 static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_lwat() argument
84 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_lwat()
85 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat()
88 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_depth() argument
90 ++txq->sent_txreqs; in hfi1_ipoib_check_queue_depth()
91 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && in hfi1_ipoib_check_queue_depth()
92 !atomic_xchg(&txq->ring_full, 1)) in hfi1_ipoib_check_queue_depth()
93 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_check_queue_depth()
96 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_stopped() argument
98 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_check_queue_stopped()
114 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && in hfi1_ipoib_check_queue_stopped()
115 atomic_xchg(&txq->ring_full, 0)) in hfi1_ipoib_check_queue_stopped()
116 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_check_queue_stopped()
130 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
131 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx()
139 static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget) in hfi1_ipoib_drain_tx_ring() argument
141 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring()
162 atomic64_add(work_done, &txq->complete_txreqs); in hfi1_ipoib_drain_tx_ring()
169 hfi1_ipoib_check_queue_stopped(txq); in hfi1_ipoib_drain_tx_ring()
177 struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis]; in hfi1_ipoib_process_tx_ring() local
179 int work_done = hfi1_ipoib_drain_tx_ring(txq, budget); in hfi1_ipoib_process_tx_ring()
189 struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring; in hfi1_ipoib_add_tx()
205 napi_schedule(tx->txq->napi); in hfi1_ipoib_add_tx()
207 struct hfi1_ipoib_txq *txq = tx->txq; in hfi1_ipoib_add_tx() local
212 atomic64_inc(&txq->complete_txreqs); in hfi1_ipoib_add_tx()
213 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx); in hfi1_ipoib_add_tx()
358 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs)); in hfi1_ipoib_build_ib_tx_headers()
393 tx->txq = txp->txq; in hfi1_ipoib_send_dma_common()
401 if (txp->txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_common()
402 txp->txq->flow.tx_queue = txp->flow.tx_queue; in hfi1_ipoib_send_dma_common()
403 txp->txq->flow.sc5 = txp->flow.sc5; in hfi1_ipoib_send_dma_common()
404 txp->txq->sde = in hfi1_ipoib_send_dma_common()
420 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_submit_tx_list() argument
425 ret = sdma_send_txlist(txq->sde, in hfi1_ipoib_submit_tx_list()
426 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx_list()
427 &txq->tx_list, in hfi1_ipoib_submit_tx_list()
432 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); in hfi1_ipoib_submit_tx_list()
438 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_flush_tx_list() argument
442 if (!list_empty(&txq->tx_list)) { in hfi1_ipoib_flush_tx_list()
444 ret = hfi1_ipoib_submit_tx_list(dev, txq); in hfi1_ipoib_flush_tx_list()
454 static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq, in hfi1_ipoib_submit_tx() argument
459 ret = sdma_send_txreq(txq->sde, in hfi1_ipoib_submit_tx()
460 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx()
462 txq->pkts_sent); in hfi1_ipoib_submit_tx()
464 txq->pkts_sent = true; in hfi1_ipoib_submit_tx()
465 iowait_starve_clear(txq->pkts_sent, &txq->wait); in hfi1_ipoib_submit_tx()
476 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_single() local
494 ret = hfi1_ipoib_submit_tx(txq, tx); in hfi1_ipoib_send_dma_single()
500 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_single()
504 txq->pkts_sent = false; in hfi1_ipoib_send_dma_single()
521 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_list() local
525 if (txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_list()
528 ret = hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
550 list_add_tail(&tx->txreq.list, &txq->tx_list); in hfi1_ipoib_send_dma_list()
552 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_list()
559 (void)hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
597 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)]; in hfi1_ipoib_send_dma()
603 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list)) in hfi1_ipoib_send_dma()
623 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_sleep() local
628 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) { in hfi1_ipoib_sdma_sleep()
636 list_add_tail(&txreq->list, &txq->tx_list); in hfi1_ipoib_sdma_sleep()
637 if (list_empty(&txq->wait.list)) { in hfi1_ipoib_sdma_sleep()
638 if (!atomic_xchg(&txq->no_desc, 1)) in hfi1_ipoib_sdma_sleep()
639 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_sdma_sleep()
659 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_wakeup() local
662 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) in hfi1_ipoib_sdma_wakeup()
671 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_flush_txq() local
673 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_flush_txq()
676 likely(!hfi1_ipoib_flush_tx_list(dev, txq))) in hfi1_ipoib_flush_txq()
677 if (atomic_xchg(&txq->no_desc, 0)) in hfi1_ipoib_flush_txq()
678 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_flush_txq()
718 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
720 iowait_init(&txq->wait, in hfi1_ipoib_txreq_init()
728 txq->priv = priv; in hfi1_ipoib_txreq_init()
729 txq->sde = NULL; in hfi1_ipoib_txreq_init()
730 INIT_LIST_HEAD(&txq->tx_list); in hfi1_ipoib_txreq_init()
731 atomic64_set(&txq->complete_txreqs, 0); in hfi1_ipoib_txreq_init()
732 atomic_set(&txq->stops, 0); in hfi1_ipoib_txreq_init()
733 atomic_set(&txq->ring_full, 0); in hfi1_ipoib_txreq_init()
734 atomic_set(&txq->no_desc, 0); in hfi1_ipoib_txreq_init()
735 txq->q_idx = i; in hfi1_ipoib_txreq_init()
736 txq->flow.tx_queue = 0xff; in hfi1_ipoib_txreq_init()
737 txq->flow.sc5 = 0xff; in hfi1_ipoib_txreq_init()
738 txq->pkts_sent = false; in hfi1_ipoib_txreq_init()
743 txq->tx_ring.items = in hfi1_ipoib_txreq_init()
747 if (!txq->tx_ring.items) in hfi1_ipoib_txreq_init()
750 spin_lock_init(&txq->tx_ring.producer_lock); in hfi1_ipoib_txreq_init()
751 spin_lock_init(&txq->tx_ring.consumer_lock); in hfi1_ipoib_txreq_init()
752 txq->tx_ring.max_items = tx_ring_size; in hfi1_ipoib_txreq_init()
754 txq->napi = &priv->tx_napis[i]; in hfi1_ipoib_txreq_init()
755 netif_tx_napi_add(dev, txq->napi, in hfi1_ipoib_txreq_init()
764 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
766 netif_napi_del(txq->napi); in hfi1_ipoib_txreq_init()
767 kfree(txq->tx_ring.items); in hfi1_ipoib_txreq_init()
783 static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_drain_tx_list() argument
787 atomic64_t *complete_txreqs = &txq->complete_txreqs; in hfi1_ipoib_drain_tx_list()
789 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) { in hfi1_ipoib_drain_tx_list()
794 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_list()
796 kmem_cache_free(txq->priv->txreq_cache, tx); in hfi1_ipoib_drain_tx_list()
800 if (hfi1_ipoib_used(txq)) in hfi1_ipoib_drain_tx_list()
801 dd_dev_warn(txq->priv->dd, in hfi1_ipoib_drain_tx_list()
803 txq->q_idx, in hfi1_ipoib_drain_tx_list()
804 hfi1_ipoib_txreqs(txq->sent_txreqs, in hfi1_ipoib_drain_tx_list()
813 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_deinit() local
815 iowait_cancel_work(&txq->wait); in hfi1_ipoib_txreq_deinit()
816 iowait_sdma_drain(&txq->wait); in hfi1_ipoib_txreq_deinit()
817 hfi1_ipoib_drain_tx_list(txq); in hfi1_ipoib_txreq_deinit()
818 netif_napi_del(txq->napi); in hfi1_ipoib_txreq_deinit()
819 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); in hfi1_ipoib_txreq_deinit()
820 kfree(txq->tx_ring.items); in hfi1_ipoib_txreq_deinit()
839 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_enable() local
841 napi_enable(txq->napi); in hfi1_ipoib_napi_tx_enable()
851 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_disable() local
853 napi_disable(txq->napi); in hfi1_ipoib_napi_tx_disable()
854 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); in hfi1_ipoib_napi_tx_disable()