xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hfi1/ipoib_tx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2020 Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * This file contains HFI1 support for IPOIB SDMA functionality
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/log2.h>
12*4882a593Smuzhiyun #include <linux/circ_buf.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "sdma.h"
15*4882a593Smuzhiyun #include "verbs.h"
16*4882a593Smuzhiyun #include "trace_ibhdrs.h"
17*4882a593Smuzhiyun #include "ipoib.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Add a convenience helper */
20*4882a593Smuzhiyun #define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
21*4882a593Smuzhiyun #define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
22*4882a593Smuzhiyun #define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * struct ipoib_txreq - IPOIB transmit descriptor
26*4882a593Smuzhiyun  * @txreq: sdma transmit request
27*4882a593Smuzhiyun  * @sdma_hdr: 9b ib headers
28*4882a593Smuzhiyun  * @sdma_status: status returned by sdma engine
29*4882a593Smuzhiyun  * @priv: ipoib netdev private data
30*4882a593Smuzhiyun  * @txq: txq on which skb was output
31*4882a593Smuzhiyun  * @skb: skb to send
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun struct ipoib_txreq {
34*4882a593Smuzhiyun 	struct sdma_txreq           txreq;
35*4882a593Smuzhiyun 	struct hfi1_sdma_header     sdma_hdr;
36*4882a593Smuzhiyun 	int                         sdma_status;
37*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv;
38*4882a593Smuzhiyun 	struct hfi1_ipoib_txq      *txq;
39*4882a593Smuzhiyun 	struct sk_buff             *skb;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct ipoib_txparms {
43*4882a593Smuzhiyun 	struct hfi1_devdata        *dd;
44*4882a593Smuzhiyun 	struct rdma_ah_attr        *ah_attr;
45*4882a593Smuzhiyun 	struct hfi1_ibport         *ibp;
46*4882a593Smuzhiyun 	struct hfi1_ipoib_txq      *txq;
47*4882a593Smuzhiyun 	union hfi1_ipoib_flow       flow;
48*4882a593Smuzhiyun 	u32                         dqpn;
49*4882a593Smuzhiyun 	u8                          hdr_dwords;
50*4882a593Smuzhiyun 	u8                          entropy;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
hfi1_ipoib_txreqs(const u64 sent,const u64 completed)53*4882a593Smuzhiyun static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return sent - completed;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
hfi1_ipoib_used(struct hfi1_ipoib_txq * txq)58*4882a593Smuzhiyun static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return hfi1_ipoib_txreqs(txq->sent_txreqs,
61*4882a593Smuzhiyun 				 atomic64_read(&txq->complete_txreqs));
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq * txq)64*4882a593Smuzhiyun static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	if (atomic_inc_return(&txq->stops) == 1)
67*4882a593Smuzhiyun 		netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq * txq)70*4882a593Smuzhiyun static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	if (atomic_dec_and_test(&txq->stops))
73*4882a593Smuzhiyun 		netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq * txq)76*4882a593Smuzhiyun static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	return min_t(uint, txq->priv->netdev->tx_queue_len,
79*4882a593Smuzhiyun 		     txq->tx_ring.max_items - 1);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq * txq)82*4882a593Smuzhiyun static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	return min_t(uint, txq->priv->netdev->tx_queue_len,
85*4882a593Smuzhiyun 		     txq->tx_ring.max_items) >> 1;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq * txq)88*4882a593Smuzhiyun static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	++txq->sent_txreqs;
91*4882a593Smuzhiyun 	if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
92*4882a593Smuzhiyun 	    !atomic_xchg(&txq->ring_full, 1))
93*4882a593Smuzhiyun 		hfi1_ipoib_stop_txq(txq);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq * txq)96*4882a593Smuzhiyun static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct net_device *dev = txq->priv->netdev;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* If shutting down just return as queue state is irrelevant */
101*4882a593Smuzhiyun 	if (unlikely(dev->reg_state != NETREG_REGISTERED))
102*4882a593Smuzhiyun 		return;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * When the queue has been drained to less than half full it will be
106*4882a593Smuzhiyun 	 * restarted.
107*4882a593Smuzhiyun 	 * The size of the txreq ring is fixed at initialization.
108*4882a593Smuzhiyun 	 * The tx queue len can be adjusted upward while the interface is
109*4882a593Smuzhiyun 	 * running.
110*4882a593Smuzhiyun 	 * The tx queue len can be large enough to overflow the txreq_ring.
111*4882a593Smuzhiyun 	 * Use the minimum of the current tx_queue_len or the rings max txreqs
112*4882a593Smuzhiyun 	 * to protect against ring overflow.
113*4882a593Smuzhiyun 	 */
114*4882a593Smuzhiyun 	if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
115*4882a593Smuzhiyun 	    atomic_xchg(&txq->ring_full, 0))
116*4882a593Smuzhiyun 		hfi1_ipoib_wake_txq(txq);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
hfi1_ipoib_free_tx(struct ipoib_txreq * tx,int budget)119*4882a593Smuzhiyun static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = tx->priv;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (likely(!tx->sdma_status)) {
124*4882a593Smuzhiyun 		hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
125*4882a593Smuzhiyun 	} else {
126*4882a593Smuzhiyun 		++priv->netdev->stats.tx_errors;
127*4882a593Smuzhiyun 		dd_dev_warn(priv->dd,
128*4882a593Smuzhiyun 			    "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
129*4882a593Smuzhiyun 			    __func__, tx->sdma_status,
130*4882a593Smuzhiyun 			    le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
131*4882a593Smuzhiyun 			    tx->txq->sde->this_idx);
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	napi_consume_skb(tx->skb, budget);
135*4882a593Smuzhiyun 	sdma_txclean(priv->dd, &tx->txreq);
136*4882a593Smuzhiyun 	kmem_cache_free(priv->txreq_cache, tx);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq * txq,int budget)139*4882a593Smuzhiyun static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
142*4882a593Smuzhiyun 	unsigned long head;
143*4882a593Smuzhiyun 	unsigned long tail;
144*4882a593Smuzhiyun 	unsigned int max_tx;
145*4882a593Smuzhiyun 	int work_done;
146*4882a593Smuzhiyun 	int tx_count;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	spin_lock_bh(&tx_ring->consumer_lock);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* Read index before reading contents at that index. */
151*4882a593Smuzhiyun 	head = smp_load_acquire(&tx_ring->head);
152*4882a593Smuzhiyun 	tail = tx_ring->tail;
153*4882a593Smuzhiyun 	max_tx = tx_ring->max_items;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for (tx_count = work_done; tx_count; tx_count--) {
158*4882a593Smuzhiyun 		hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
159*4882a593Smuzhiyun 		tail = CIRC_NEXT(tail, max_tx);
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	atomic64_add(work_done, &txq->complete_txreqs);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* Finished freeing tx items so store the tail value. */
165*4882a593Smuzhiyun 	smp_store_release(&tx_ring->tail, tail);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	spin_unlock_bh(&tx_ring->consumer_lock);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	hfi1_ipoib_check_queue_stopped(txq);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return work_done;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
hfi1_ipoib_process_tx_ring(struct napi_struct * napi,int budget)174*4882a593Smuzhiyun static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
177*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if (work_done < budget)
182*4882a593Smuzhiyun 		napi_complete_done(napi, work_done);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return work_done;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
hfi1_ipoib_add_tx(struct ipoib_txreq * tx)187*4882a593Smuzhiyun static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
190*4882a593Smuzhiyun 	unsigned long head;
191*4882a593Smuzhiyun 	unsigned long tail;
192*4882a593Smuzhiyun 	size_t max_tx;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	spin_lock(&tx_ring->producer_lock);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	head = tx_ring->head;
197*4882a593Smuzhiyun 	tail = READ_ONCE(tx_ring->tail);
198*4882a593Smuzhiyun 	max_tx = tx_ring->max_items;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (likely(CIRC_SPACE(head, tail, max_tx))) {
201*4882a593Smuzhiyun 		tx_ring->items[head] = tx;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		/* Finish storing txreq before incrementing head. */
204*4882a593Smuzhiyun 		smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
205*4882a593Smuzhiyun 		napi_schedule(tx->txq->napi);
206*4882a593Smuzhiyun 	} else {
207*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = tx->txq;
208*4882a593Smuzhiyun 		struct hfi1_ipoib_dev_priv *priv = tx->priv;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		/* Ring was full */
211*4882a593Smuzhiyun 		hfi1_ipoib_free_tx(tx, 0);
212*4882a593Smuzhiyun 		atomic64_inc(&txq->complete_txreqs);
213*4882a593Smuzhiyun 		dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	spin_unlock(&tx_ring->producer_lock);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
hfi1_ipoib_sdma_complete(struct sdma_txreq * txreq,int status)219*4882a593Smuzhiyun static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	tx->sdma_status = status;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	hfi1_ipoib_add_tx(tx);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
hfi1_ipoib_build_ulp_payload(struct ipoib_txreq * tx,struct ipoib_txparms * txp)228*4882a593Smuzhiyun static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
229*4882a593Smuzhiyun 					struct ipoib_txparms *txp)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct hfi1_devdata *dd = txp->dd;
232*4882a593Smuzhiyun 	struct sdma_txreq *txreq = &tx->txreq;
233*4882a593Smuzhiyun 	struct sk_buff *skb = tx->skb;
234*4882a593Smuzhiyun 	int ret = 0;
235*4882a593Smuzhiyun 	int i;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (skb_headlen(skb)) {
238*4882a593Smuzhiyun 		ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
239*4882a593Smuzhiyun 		if (unlikely(ret))
240*4882a593Smuzhiyun 			return ret;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
244*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		ret = sdma_txadd_page(dd,
247*4882a593Smuzhiyun 				      txreq,
248*4882a593Smuzhiyun 				      skb_frag_page(frag),
249*4882a593Smuzhiyun 				      frag->bv_offset,
250*4882a593Smuzhiyun 				      skb_frag_size(frag));
251*4882a593Smuzhiyun 		if (unlikely(ret))
252*4882a593Smuzhiyun 			break;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return ret;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
hfi1_ipoib_build_tx_desc(struct ipoib_txreq * tx,struct ipoib_txparms * txp)258*4882a593Smuzhiyun static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
259*4882a593Smuzhiyun 				    struct ipoib_txparms *txp)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct hfi1_devdata *dd = txp->dd;
262*4882a593Smuzhiyun 	struct sdma_txreq *txreq = &tx->txreq;
263*4882a593Smuzhiyun 	struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
264*4882a593Smuzhiyun 	u16 pkt_bytes =
265*4882a593Smuzhiyun 		sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
266*4882a593Smuzhiyun 	int ret;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
269*4882a593Smuzhiyun 	if (unlikely(ret))
270*4882a593Smuzhiyun 		return ret;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* add pbc + headers */
273*4882a593Smuzhiyun 	ret = sdma_txadd_kvaddr(dd,
274*4882a593Smuzhiyun 				txreq,
275*4882a593Smuzhiyun 				sdma_hdr,
276*4882a593Smuzhiyun 				sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
277*4882a593Smuzhiyun 	if (unlikely(ret))
278*4882a593Smuzhiyun 		return ret;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* add the ulp payload */
281*4882a593Smuzhiyun 	return hfi1_ipoib_build_ulp_payload(tx, txp);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq * tx,struct ipoib_txparms * txp)284*4882a593Smuzhiyun static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
285*4882a593Smuzhiyun 					   struct ipoib_txparms *txp)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = tx->priv;
288*4882a593Smuzhiyun 	struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
289*4882a593Smuzhiyun 	struct sk_buff *skb = tx->skb;
290*4882a593Smuzhiyun 	struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
291*4882a593Smuzhiyun 	struct rdma_ah_attr *ah_attr = txp->ah_attr;
292*4882a593Smuzhiyun 	struct ib_other_headers *ohdr;
293*4882a593Smuzhiyun 	struct ib_grh *grh;
294*4882a593Smuzhiyun 	u16 dwords;
295*4882a593Smuzhiyun 	u16 slid;
296*4882a593Smuzhiyun 	u16 dlid;
297*4882a593Smuzhiyun 	u16 lrh0;
298*4882a593Smuzhiyun 	u32 bth0;
299*4882a593Smuzhiyun 	u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
300*4882a593Smuzhiyun 			 priv->netdev->dev_addr[2] << 8 |
301*4882a593Smuzhiyun 			 priv->netdev->dev_addr[3]);
302*4882a593Smuzhiyun 	u16 payload_dwords;
303*4882a593Smuzhiyun 	u8 pad_cnt;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	pad_cnt = -skb->len & 3;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Includes ICRC */
308*4882a593Smuzhiyun 	payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
311*4882a593Smuzhiyun 	txp->hdr_dwords = 7;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
314*4882a593Smuzhiyun 		grh = &sdma_hdr->hdr.ibh.u.l.grh;
315*4882a593Smuzhiyun 		txp->hdr_dwords +=
316*4882a593Smuzhiyun 			hfi1_make_grh(txp->ibp,
317*4882a593Smuzhiyun 				      grh,
318*4882a593Smuzhiyun 				      rdma_ah_read_grh(ah_attr),
319*4882a593Smuzhiyun 				      txp->hdr_dwords - LRH_9B_DWORDS,
320*4882a593Smuzhiyun 				      payload_dwords);
321*4882a593Smuzhiyun 		lrh0 = HFI1_LRH_GRH;
322*4882a593Smuzhiyun 		ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
323*4882a593Smuzhiyun 	} else {
324*4882a593Smuzhiyun 		lrh0 = HFI1_LRH_BTH;
325*4882a593Smuzhiyun 		ohdr = &sdma_hdr->hdr.ibh.u.oth;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
329*4882a593Smuzhiyun 	lrh0 |= (txp->flow.sc5 & 0xf) << 12;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
332*4882a593Smuzhiyun 	if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
333*4882a593Smuzhiyun 		slid = be16_to_cpu(IB_LID_PERMISSIVE);
334*4882a593Smuzhiyun 	} else {
335*4882a593Smuzhiyun 		u16 lid = (u16)ppd->lid;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		if (lid) {
338*4882a593Smuzhiyun 			lid |= rdma_ah_get_path_bits(ah_attr) &
339*4882a593Smuzhiyun 				((1 << ppd->lmc) - 1);
340*4882a593Smuzhiyun 			slid = lid;
341*4882a593Smuzhiyun 		} else {
342*4882a593Smuzhiyun 			slid = be16_to_cpu(IB_LID_PERMISSIVE);
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Includes ICRC */
347*4882a593Smuzhiyun 	dwords = txp->hdr_dwords + payload_dwords;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Build the lrh */
350*4882a593Smuzhiyun 	sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
351*4882a593Smuzhiyun 	hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Build the bth */
354*4882a593Smuzhiyun 	bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	ohdr->bth[0] = cpu_to_be32(bth0);
357*4882a593Smuzhiyun 	ohdr->bth[1] = cpu_to_be32(txp->dqpn);
358*4882a593Smuzhiyun 	ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Build the deth */
361*4882a593Smuzhiyun 	ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
362*4882a593Smuzhiyun 	ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
363*4882a593Smuzhiyun 					  HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* Construct the pbc. */
366*4882a593Smuzhiyun 	sdma_hdr->pbc =
367*4882a593Smuzhiyun 		cpu_to_le64(create_pbc(ppd,
368*4882a593Smuzhiyun 				       ib_is_sc5(txp->flow.sc5) <<
369*4882a593Smuzhiyun 							      PBC_DC_INFO_SHIFT,
370*4882a593Smuzhiyun 				       0,
371*4882a593Smuzhiyun 				       sc_to_vlt(priv->dd, txp->flow.sc5),
372*4882a593Smuzhiyun 				       dwords - SIZE_OF_CRC +
373*4882a593Smuzhiyun 						(sizeof(sdma_hdr->pbc) >> 2)));
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
hfi1_ipoib_send_dma_common(struct net_device * dev,struct sk_buff * skb,struct ipoib_txparms * txp)376*4882a593Smuzhiyun static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
377*4882a593Smuzhiyun 						      struct sk_buff *skb,
378*4882a593Smuzhiyun 						      struct ipoib_txparms *txp)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
381*4882a593Smuzhiyun 	struct ipoib_txreq *tx;
382*4882a593Smuzhiyun 	int ret;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	tx = kmem_cache_alloc_node(priv->txreq_cache,
385*4882a593Smuzhiyun 				   GFP_ATOMIC,
386*4882a593Smuzhiyun 				   priv->dd->node);
387*4882a593Smuzhiyun 	if (unlikely(!tx))
388*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* so that we can test if the sdma descriptors are there */
391*4882a593Smuzhiyun 	tx->txreq.num_desc = 0;
392*4882a593Smuzhiyun 	tx->priv = priv;
393*4882a593Smuzhiyun 	tx->txq = txp->txq;
394*4882a593Smuzhiyun 	tx->skb = skb;
395*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tx->txreq.list);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	hfi1_ipoib_build_ib_tx_headers(tx, txp);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	ret = hfi1_ipoib_build_tx_desc(tx, txp);
400*4882a593Smuzhiyun 	if (likely(!ret)) {
401*4882a593Smuzhiyun 		if (txp->txq->flow.as_int != txp->flow.as_int) {
402*4882a593Smuzhiyun 			txp->txq->flow.tx_queue = txp->flow.tx_queue;
403*4882a593Smuzhiyun 			txp->txq->flow.sc5 = txp->flow.sc5;
404*4882a593Smuzhiyun 			txp->txq->sde =
405*4882a593Smuzhiyun 				sdma_select_engine_sc(priv->dd,
406*4882a593Smuzhiyun 						      txp->flow.tx_queue,
407*4882a593Smuzhiyun 						      txp->flow.sc5);
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		return tx;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	sdma_txclean(priv->dd, &tx->txreq);
414*4882a593Smuzhiyun 	kmem_cache_free(priv->txreq_cache, tx);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	return ERR_PTR(ret);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
hfi1_ipoib_submit_tx_list(struct net_device * dev,struct hfi1_ipoib_txq * txq)419*4882a593Smuzhiyun static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
420*4882a593Smuzhiyun 				     struct hfi1_ipoib_txq *txq)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	int ret;
423*4882a593Smuzhiyun 	u16 count_out;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	ret = sdma_send_txlist(txq->sde,
426*4882a593Smuzhiyun 			       iowait_get_ib_work(&txq->wait),
427*4882a593Smuzhiyun 			       &txq->tx_list,
428*4882a593Smuzhiyun 			       &count_out);
429*4882a593Smuzhiyun 	if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
430*4882a593Smuzhiyun 		return ret;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return ret;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
hfi1_ipoib_flush_tx_list(struct net_device * dev,struct hfi1_ipoib_txq * txq)437*4882a593Smuzhiyun static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
438*4882a593Smuzhiyun 				    struct hfi1_ipoib_txq *txq)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	int ret = 0;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (!list_empty(&txq->tx_list)) {
443*4882a593Smuzhiyun 		/* Flush the current list */
444*4882a593Smuzhiyun 		ret = hfi1_ipoib_submit_tx_list(dev, txq);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		if (unlikely(ret))
447*4882a593Smuzhiyun 			if (ret != -EBUSY)
448*4882a593Smuzhiyun 				++dev->stats.tx_carrier_errors;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq * txq,struct ipoib_txreq * tx)454*4882a593Smuzhiyun static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
455*4882a593Smuzhiyun 				struct ipoib_txreq *tx)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	int ret;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	ret = sdma_send_txreq(txq->sde,
460*4882a593Smuzhiyun 			      iowait_get_ib_work(&txq->wait),
461*4882a593Smuzhiyun 			      &tx->txreq,
462*4882a593Smuzhiyun 			      txq->pkts_sent);
463*4882a593Smuzhiyun 	if (likely(!ret)) {
464*4882a593Smuzhiyun 		txq->pkts_sent = true;
465*4882a593Smuzhiyun 		iowait_starve_clear(txq->pkts_sent, &txq->wait);
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	return ret;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
hfi1_ipoib_send_dma_single(struct net_device * dev,struct sk_buff * skb,struct ipoib_txparms * txp)471*4882a593Smuzhiyun static int hfi1_ipoib_send_dma_single(struct net_device *dev,
472*4882a593Smuzhiyun 				      struct sk_buff *skb,
473*4882a593Smuzhiyun 				      struct ipoib_txparms *txp)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
476*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq = txp->txq;
477*4882a593Smuzhiyun 	struct ipoib_txreq *tx;
478*4882a593Smuzhiyun 	int ret;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
481*4882a593Smuzhiyun 	if (IS_ERR(tx)) {
482*4882a593Smuzhiyun 		int ret = PTR_ERR(tx);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 		if (ret == -ENOMEM)
487*4882a593Smuzhiyun 			++dev->stats.tx_errors;
488*4882a593Smuzhiyun 		else
489*4882a593Smuzhiyun 			++dev->stats.tx_carrier_errors;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		return NETDEV_TX_OK;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	ret = hfi1_ipoib_submit_tx(txq, tx);
495*4882a593Smuzhiyun 	if (likely(!ret)) {
496*4882a593Smuzhiyun tx_ok:
497*4882a593Smuzhiyun 		trace_sdma_output_ibhdr(tx->priv->dd,
498*4882a593Smuzhiyun 					&tx->sdma_hdr.hdr,
499*4882a593Smuzhiyun 					ib_is_sc5(txp->flow.sc5));
500*4882a593Smuzhiyun 		hfi1_ipoib_check_queue_depth(txq);
501*4882a593Smuzhiyun 		return NETDEV_TX_OK;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	txq->pkts_sent = false;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	if (ret == -EBUSY || ret == -ECOMM)
507*4882a593Smuzhiyun 		goto tx_ok;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	sdma_txclean(priv->dd, &tx->txreq);
510*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
511*4882a593Smuzhiyun 	kmem_cache_free(priv->txreq_cache, tx);
512*4882a593Smuzhiyun 	++dev->stats.tx_carrier_errors;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	return NETDEV_TX_OK;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
hfi1_ipoib_send_dma_list(struct net_device * dev,struct sk_buff * skb,struct ipoib_txparms * txp)517*4882a593Smuzhiyun static int hfi1_ipoib_send_dma_list(struct net_device *dev,
518*4882a593Smuzhiyun 				    struct sk_buff *skb,
519*4882a593Smuzhiyun 				    struct ipoib_txparms *txp)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq = txp->txq;
522*4882a593Smuzhiyun 	struct ipoib_txreq *tx;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* Has the flow change ? */
525*4882a593Smuzhiyun 	if (txq->flow.as_int != txp->flow.as_int) {
526*4882a593Smuzhiyun 		int ret;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		ret = hfi1_ipoib_flush_tx_list(dev, txq);
529*4882a593Smuzhiyun 		if (unlikely(ret)) {
530*4882a593Smuzhiyun 			if (ret == -EBUSY)
531*4882a593Smuzhiyun 				++dev->stats.tx_dropped;
532*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
533*4882a593Smuzhiyun 			return NETDEV_TX_OK;
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 	tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
537*4882a593Smuzhiyun 	if (IS_ERR(tx)) {
538*4882a593Smuzhiyun 		int ret = PTR_ERR(tx);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		if (ret == -ENOMEM)
543*4882a593Smuzhiyun 			++dev->stats.tx_errors;
544*4882a593Smuzhiyun 		else
545*4882a593Smuzhiyun 			++dev->stats.tx_carrier_errors;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		return NETDEV_TX_OK;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	list_add_tail(&tx->txreq.list, &txq->tx_list);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	hfi1_ipoib_check_queue_depth(txq);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	trace_sdma_output_ibhdr(tx->priv->dd,
555*4882a593Smuzhiyun 				&tx->sdma_hdr.hdr,
556*4882a593Smuzhiyun 				ib_is_sc5(txp->flow.sc5));
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!netdev_xmit_more())
559*4882a593Smuzhiyun 		(void)hfi1_ipoib_flush_tx_list(dev, txq);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	return NETDEV_TX_OK;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
hfi1_ipoib_calc_entropy(struct sk_buff * skb)564*4882a593Smuzhiyun static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	if (skb_transport_header_was_set(skb)) {
567*4882a593Smuzhiyun 		u8 *hdr = (u8 *)skb_transport_header(skb);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 		return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return (u8)skb_get_queue_mapping(skb);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
hfi1_ipoib_send_dma(struct net_device * dev,struct sk_buff * skb,struct ib_ah * address,u32 dqpn)575*4882a593Smuzhiyun int hfi1_ipoib_send_dma(struct net_device *dev,
576*4882a593Smuzhiyun 			struct sk_buff *skb,
577*4882a593Smuzhiyun 			struct ib_ah *address,
578*4882a593Smuzhiyun 			u32 dqpn)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
581*4882a593Smuzhiyun 	struct ipoib_txparms txp;
582*4882a593Smuzhiyun 	struct rdma_netdev *rn = netdev_priv(dev);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
585*4882a593Smuzhiyun 		dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
586*4882a593Smuzhiyun 			    skb->len,
587*4882a593Smuzhiyun 			    rn->mtu + HFI1_IPOIB_ENCAP_LEN);
588*4882a593Smuzhiyun 		++dev->stats.tx_dropped;
589*4882a593Smuzhiyun 		++dev->stats.tx_errors;
590*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
591*4882a593Smuzhiyun 		return NETDEV_TX_OK;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	txp.dd = priv->dd;
595*4882a593Smuzhiyun 	txp.ah_attr = &ibah_to_rvtah(address)->attr;
596*4882a593Smuzhiyun 	txp.ibp = to_iport(priv->device, priv->port_num);
597*4882a593Smuzhiyun 	txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
598*4882a593Smuzhiyun 	txp.dqpn = dqpn;
599*4882a593Smuzhiyun 	txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
600*4882a593Smuzhiyun 	txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
601*4882a593Smuzhiyun 	txp.entropy = hfi1_ipoib_calc_entropy(skb);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
604*4882a593Smuzhiyun 		return hfi1_ipoib_send_dma_list(dev, skb, &txp);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	return hfi1_ipoib_send_dma_single(dev, skb,  &txp);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun  * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
611*4882a593Smuzhiyun  *
612*4882a593Smuzhiyun  * This function gets called from sdma_send_txreq() when there are not enough
613*4882a593Smuzhiyun  * sdma descriptors available to send the packet. It adds Tx queue's wait
614*4882a593Smuzhiyun  * structure to sdma engine's dmawait list to be woken up when descriptors
615*4882a593Smuzhiyun  * become available.
616*4882a593Smuzhiyun  */
hfi1_ipoib_sdma_sleep(struct sdma_engine * sde,struct iowait_work * wait,struct sdma_txreq * txreq,uint seq,bool pkts_sent)617*4882a593Smuzhiyun static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
618*4882a593Smuzhiyun 				 struct iowait_work *wait,
619*4882a593Smuzhiyun 				 struct sdma_txreq *txreq,
620*4882a593Smuzhiyun 				 uint seq,
621*4882a593Smuzhiyun 				 bool pkts_sent)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq =
624*4882a593Smuzhiyun 		container_of(wait->iow, struct hfi1_ipoib_txq, wait);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	write_seqlock(&sde->waitlock);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
629*4882a593Smuzhiyun 		if (sdma_progress(sde, seq, txreq)) {
630*4882a593Smuzhiyun 			write_sequnlock(&sde->waitlock);
631*4882a593Smuzhiyun 			return -EAGAIN;
632*4882a593Smuzhiyun 		}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		if (list_empty(&txreq->list))
635*4882a593Smuzhiyun 			/* came from non-list submit */
636*4882a593Smuzhiyun 			list_add_tail(&txreq->list, &txq->tx_list);
637*4882a593Smuzhiyun 		if (list_empty(&txq->wait.list)) {
638*4882a593Smuzhiyun 			if (!atomic_xchg(&txq->no_desc, 1))
639*4882a593Smuzhiyun 				hfi1_ipoib_stop_txq(txq);
640*4882a593Smuzhiyun 			iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
641*4882a593Smuzhiyun 		}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		write_sequnlock(&sde->waitlock);
644*4882a593Smuzhiyun 		return -EBUSY;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	write_sequnlock(&sde->waitlock);
648*4882a593Smuzhiyun 	return -EINVAL;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun  * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
653*4882a593Smuzhiyun  *
654*4882a593Smuzhiyun  * This function gets called when SDMA descriptors becomes available and Tx
655*4882a593Smuzhiyun  * queue's wait structure was previously added to sdma engine's dmawait list.
656*4882a593Smuzhiyun  */
hfi1_ipoib_sdma_wakeup(struct iowait * wait,int reason)657*4882a593Smuzhiyun static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq =
660*4882a593Smuzhiyun 		container_of(wait, struct hfi1_ipoib_txq, wait);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
663*4882a593Smuzhiyun 		iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
hfi1_ipoib_flush_txq(struct work_struct * work)666*4882a593Smuzhiyun static void hfi1_ipoib_flush_txq(struct work_struct *work)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	struct iowait_work *ioww =
669*4882a593Smuzhiyun 		container_of(work, struct iowait_work, iowork);
670*4882a593Smuzhiyun 	struct iowait *wait = iowait_ioww_to_iow(ioww);
671*4882a593Smuzhiyun 	struct hfi1_ipoib_txq *txq =
672*4882a593Smuzhiyun 		container_of(wait, struct hfi1_ipoib_txq, wait);
673*4882a593Smuzhiyun 	struct net_device *dev = txq->priv->netdev;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (likely(dev->reg_state == NETREG_REGISTERED) &&
676*4882a593Smuzhiyun 	    likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
677*4882a593Smuzhiyun 		if (atomic_xchg(&txq->no_desc, 0))
678*4882a593Smuzhiyun 			hfi1_ipoib_wake_txq(txq);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv * priv)681*4882a593Smuzhiyun int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct net_device *dev = priv->netdev;
684*4882a593Smuzhiyun 	char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
685*4882a593Smuzhiyun 	unsigned long tx_ring_size;
686*4882a593Smuzhiyun 	int i;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * Ring holds 1 less than tx_ring_size
690*4882a593Smuzhiyun 	 * Round up to next power of 2 in order to hold at least tx_queue_len
691*4882a593Smuzhiyun 	 */
692*4882a593Smuzhiyun 	tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
695*4882a593Smuzhiyun 	priv->txreq_cache = kmem_cache_create(buf,
696*4882a593Smuzhiyun 					      sizeof(struct ipoib_txreq),
697*4882a593Smuzhiyun 					      0,
698*4882a593Smuzhiyun 					      0,
699*4882a593Smuzhiyun 					      NULL);
700*4882a593Smuzhiyun 	if (!priv->txreq_cache)
701*4882a593Smuzhiyun 		return -ENOMEM;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	priv->tx_napis = kcalloc_node(dev->num_tx_queues,
704*4882a593Smuzhiyun 				      sizeof(struct napi_struct),
705*4882a593Smuzhiyun 				      GFP_KERNEL,
706*4882a593Smuzhiyun 				      priv->dd->node);
707*4882a593Smuzhiyun 	if (!priv->tx_napis)
708*4882a593Smuzhiyun 		goto free_txreq_cache;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	priv->txqs = kcalloc_node(dev->num_tx_queues,
711*4882a593Smuzhiyun 				  sizeof(struct hfi1_ipoib_txq),
712*4882a593Smuzhiyun 				  GFP_KERNEL,
713*4882a593Smuzhiyun 				  priv->dd->node);
714*4882a593Smuzhiyun 	if (!priv->txqs)
715*4882a593Smuzhiyun 		goto free_tx_napis;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	for (i = 0; i < dev->num_tx_queues; i++) {
718*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		iowait_init(&txq->wait,
721*4882a593Smuzhiyun 			    0,
722*4882a593Smuzhiyun 			    hfi1_ipoib_flush_txq,
723*4882a593Smuzhiyun 			    NULL,
724*4882a593Smuzhiyun 			    hfi1_ipoib_sdma_sleep,
725*4882a593Smuzhiyun 			    hfi1_ipoib_sdma_wakeup,
726*4882a593Smuzhiyun 			    NULL,
727*4882a593Smuzhiyun 			    NULL);
728*4882a593Smuzhiyun 		txq->priv = priv;
729*4882a593Smuzhiyun 		txq->sde = NULL;
730*4882a593Smuzhiyun 		INIT_LIST_HEAD(&txq->tx_list);
731*4882a593Smuzhiyun 		atomic64_set(&txq->complete_txreqs, 0);
732*4882a593Smuzhiyun 		atomic_set(&txq->stops, 0);
733*4882a593Smuzhiyun 		atomic_set(&txq->ring_full, 0);
734*4882a593Smuzhiyun 		atomic_set(&txq->no_desc, 0);
735*4882a593Smuzhiyun 		txq->q_idx = i;
736*4882a593Smuzhiyun 		txq->flow.tx_queue = 0xff;
737*4882a593Smuzhiyun 		txq->flow.sc5 = 0xff;
738*4882a593Smuzhiyun 		txq->pkts_sent = false;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
741*4882a593Smuzhiyun 					     priv->dd->node);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		txq->tx_ring.items =
744*4882a593Smuzhiyun 			kcalloc_node(tx_ring_size,
745*4882a593Smuzhiyun 				     sizeof(struct ipoib_txreq *),
746*4882a593Smuzhiyun 				     GFP_KERNEL, priv->dd->node);
747*4882a593Smuzhiyun 		if (!txq->tx_ring.items)
748*4882a593Smuzhiyun 			goto free_txqs;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 		spin_lock_init(&txq->tx_ring.producer_lock);
751*4882a593Smuzhiyun 		spin_lock_init(&txq->tx_ring.consumer_lock);
752*4882a593Smuzhiyun 		txq->tx_ring.max_items = tx_ring_size;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		txq->napi = &priv->tx_napis[i];
755*4882a593Smuzhiyun 		netif_tx_napi_add(dev, txq->napi,
756*4882a593Smuzhiyun 				  hfi1_ipoib_process_tx_ring,
757*4882a593Smuzhiyun 				  NAPI_POLL_WEIGHT);
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	return 0;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun free_txqs:
763*4882a593Smuzhiyun 	for (i--; i >= 0; i--) {
764*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		netif_napi_del(txq->napi);
767*4882a593Smuzhiyun 		kfree(txq->tx_ring.items);
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	kfree(priv->txqs);
771*4882a593Smuzhiyun 	priv->txqs = NULL;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun free_tx_napis:
774*4882a593Smuzhiyun 	kfree(priv->tx_napis);
775*4882a593Smuzhiyun 	priv->tx_napis = NULL;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun free_txreq_cache:
778*4882a593Smuzhiyun 	kmem_cache_destroy(priv->txreq_cache);
779*4882a593Smuzhiyun 	priv->txreq_cache = NULL;
780*4882a593Smuzhiyun 	return -ENOMEM;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq * txq)783*4882a593Smuzhiyun static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	struct sdma_txreq *txreq;
786*4882a593Smuzhiyun 	struct sdma_txreq *txreq_tmp;
787*4882a593Smuzhiyun 	atomic64_t *complete_txreqs = &txq->complete_txreqs;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
790*4882a593Smuzhiyun 		struct ipoib_txreq *tx =
791*4882a593Smuzhiyun 			container_of(txreq, struct ipoib_txreq, txreq);
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		list_del(&txreq->list);
794*4882a593Smuzhiyun 		sdma_txclean(txq->priv->dd, &tx->txreq);
795*4882a593Smuzhiyun 		dev_kfree_skb_any(tx->skb);
796*4882a593Smuzhiyun 		kmem_cache_free(txq->priv->txreq_cache, tx);
797*4882a593Smuzhiyun 		atomic64_inc(complete_txreqs);
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	if (hfi1_ipoib_used(txq))
801*4882a593Smuzhiyun 		dd_dev_warn(txq->priv->dd,
802*4882a593Smuzhiyun 			    "txq %d not empty found %llu requests\n",
803*4882a593Smuzhiyun 			    txq->q_idx,
804*4882a593Smuzhiyun 			    hfi1_ipoib_txreqs(txq->sent_txreqs,
805*4882a593Smuzhiyun 					      atomic64_read(complete_txreqs)));
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv * priv)808*4882a593Smuzhiyun void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	int i;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	for (i = 0; i < priv->netdev->num_tx_queues; i++) {
813*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		iowait_cancel_work(&txq->wait);
816*4882a593Smuzhiyun 		iowait_sdma_drain(&txq->wait);
817*4882a593Smuzhiyun 		hfi1_ipoib_drain_tx_list(txq);
818*4882a593Smuzhiyun 		netif_napi_del(txq->napi);
819*4882a593Smuzhiyun 		(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
820*4882a593Smuzhiyun 		kfree(txq->tx_ring.items);
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	kfree(priv->txqs);
824*4882a593Smuzhiyun 	priv->txqs = NULL;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	kfree(priv->tx_napis);
827*4882a593Smuzhiyun 	priv->tx_napis = NULL;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	kmem_cache_destroy(priv->txreq_cache);
830*4882a593Smuzhiyun 	priv->txreq_cache = NULL;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun 
hfi1_ipoib_napi_tx_enable(struct net_device * dev)833*4882a593Smuzhiyun void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
836*4882a593Smuzhiyun 	int i;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	for (i = 0; i < dev->num_tx_queues; i++) {
839*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		napi_enable(txq->napi);
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
hfi1_ipoib_napi_tx_disable(struct net_device * dev)845*4882a593Smuzhiyun void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
848*4882a593Smuzhiyun 	int i;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	for (i = 0; i < dev->num_tx_queues; i++) {
851*4882a593Smuzhiyun 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		napi_disable(txq->napi);
854*4882a593Smuzhiyun 		(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun }
857