xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/sfc/falcon/tx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun  * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun  * Copyright 2005-2006 Fen Systems Ltd.
5*4882a593Smuzhiyun  * Copyright 2005-2013 Solarflare Communications Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/tcp.h>
10*4882a593Smuzhiyun #include <linux/ip.h>
11*4882a593Smuzhiyun #include <linux/in.h>
12*4882a593Smuzhiyun #include <linux/ipv6.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <net/ipv6.h>
15*4882a593Smuzhiyun #include <linux/if_ether.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/cache.h>
18*4882a593Smuzhiyun #include "net_driver.h"
19*4882a593Smuzhiyun #include "efx.h"
20*4882a593Smuzhiyun #include "io.h"
21*4882a593Smuzhiyun #include "nic.h"
22*4882a593Smuzhiyun #include "tx.h"
23*4882a593Smuzhiyun #include "workarounds.h"
24*4882a593Smuzhiyun 
ef4_tx_get_copy_buffer(struct ef4_tx_queue * tx_queue,struct ef4_tx_buffer * buffer)25*4882a593Smuzhiyun static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
26*4882a593Smuzhiyun 					 struct ef4_tx_buffer *buffer)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
29*4882a593Smuzhiyun 	struct ef4_buffer *page_buf =
30*4882a593Smuzhiyun 		&tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
31*4882a593Smuzhiyun 	unsigned int offset =
32*4882a593Smuzhiyun 		((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (unlikely(!page_buf->addr) &&
35*4882a593Smuzhiyun 	    ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
36*4882a593Smuzhiyun 				 GFP_ATOMIC))
37*4882a593Smuzhiyun 		return NULL;
38*4882a593Smuzhiyun 	buffer->dma_addr = page_buf->dma_addr + offset;
39*4882a593Smuzhiyun 	buffer->unmap_len = 0;
40*4882a593Smuzhiyun 	return (u8 *)page_buf->addr + offset;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue * tx_queue,struct ef4_tx_buffer * buffer,size_t len)43*4882a593Smuzhiyun u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
44*4882a593Smuzhiyun 				   struct ef4_tx_buffer *buffer, size_t len)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	if (len > EF4_TX_CB_SIZE)
47*4882a593Smuzhiyun 		return NULL;
48*4882a593Smuzhiyun 	return ef4_tx_get_copy_buffer(tx_queue, buffer);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
ef4_dequeue_buffer(struct ef4_tx_queue * tx_queue,struct ef4_tx_buffer * buffer,unsigned int * pkts_compl,unsigned int * bytes_compl)51*4882a593Smuzhiyun static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
52*4882a593Smuzhiyun 			       struct ef4_tx_buffer *buffer,
53*4882a593Smuzhiyun 			       unsigned int *pkts_compl,
54*4882a593Smuzhiyun 			       unsigned int *bytes_compl)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	if (buffer->unmap_len) {
57*4882a593Smuzhiyun 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
58*4882a593Smuzhiyun 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
59*4882a593Smuzhiyun 		if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
60*4882a593Smuzhiyun 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
61*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
62*4882a593Smuzhiyun 		else
63*4882a593Smuzhiyun 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
64*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
65*4882a593Smuzhiyun 		buffer->unmap_len = 0;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (buffer->flags & EF4_TX_BUF_SKB) {
69*4882a593Smuzhiyun 		(*pkts_compl)++;
70*4882a593Smuzhiyun 		(*bytes_compl) += buffer->skb->len;
71*4882a593Smuzhiyun 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
72*4882a593Smuzhiyun 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
73*4882a593Smuzhiyun 			   "TX queue %d transmission id %x complete\n",
74*4882a593Smuzhiyun 			   tx_queue->queue, tx_queue->read_count);
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	buffer->len = 0;
78*4882a593Smuzhiyun 	buffer->flags = 0;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
ef4_tx_max_skb_descs(struct ef4_nic * efx)81*4882a593Smuzhiyun unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	/* This is probably too much since we don't have any TSO support;
84*4882a593Smuzhiyun 	 * it's a left-over from when we had Software TSO.  But it's safer
85*4882a593Smuzhiyun 	 * to leave it as-is than try to determine a new bound.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	/* Header and payload descriptor for each output segment, plus
88*4882a593Smuzhiyun 	 * one for every input fragment boundary within a segment
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* Possibly one more per segment for the alignment workaround,
93*4882a593Smuzhiyun 	 * or for option descriptors
94*4882a593Smuzhiyun 	 */
95*4882a593Smuzhiyun 	if (EF4_WORKAROUND_5391(efx))
96*4882a593Smuzhiyun 		max_descs += EF4_TSO_MAX_SEGS;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* Possibly more for PCIe page boundaries within input fragments */
99*4882a593Smuzhiyun 	if (PAGE_SIZE > EF4_PAGE_SIZE)
100*4882a593Smuzhiyun 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
101*4882a593Smuzhiyun 				   DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return max_descs;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
ef4_tx_maybe_stop_queue(struct ef4_tx_queue * txq1)106*4882a593Smuzhiyun static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	/* We need to consider both queues that the net core sees as one */
109*4882a593Smuzhiyun 	struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
110*4882a593Smuzhiyun 	struct ef4_nic *efx = txq1->efx;
111*4882a593Smuzhiyun 	unsigned int fill_level;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	fill_level = max(txq1->insert_count - txq1->old_read_count,
114*4882a593Smuzhiyun 			 txq2->insert_count - txq2->old_read_count);
115*4882a593Smuzhiyun 	if (likely(fill_level < efx->txq_stop_thresh))
116*4882a593Smuzhiyun 		return;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* We used the stale old_read_count above, which gives us a
119*4882a593Smuzhiyun 	 * pessimistic estimate of the fill level (which may even
120*4882a593Smuzhiyun 	 * validly be >= efx->txq_entries).  Now try again using
121*4882a593Smuzhiyun 	 * read_count (more likely to be a cache miss).
122*4882a593Smuzhiyun 	 *
123*4882a593Smuzhiyun 	 * If we read read_count and then conditionally stop the
124*4882a593Smuzhiyun 	 * queue, it is possible for the completion path to race with
125*4882a593Smuzhiyun 	 * us and complete all outstanding descriptors in the middle,
126*4882a593Smuzhiyun 	 * after which there will be no more completions to wake it.
127*4882a593Smuzhiyun 	 * Therefore we stop the queue first, then read read_count
128*4882a593Smuzhiyun 	 * (with a memory barrier to ensure the ordering), then
129*4882a593Smuzhiyun 	 * restart the queue if the fill level turns out to be low
130*4882a593Smuzhiyun 	 * enough.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	netif_tx_stop_queue(txq1->core_txq);
133*4882a593Smuzhiyun 	smp_mb();
134*4882a593Smuzhiyun 	txq1->old_read_count = READ_ONCE(txq1->read_count);
135*4882a593Smuzhiyun 	txq2->old_read_count = READ_ONCE(txq2->read_count);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	fill_level = max(txq1->insert_count - txq1->old_read_count,
138*4882a593Smuzhiyun 			 txq2->insert_count - txq2->old_read_count);
139*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
140*4882a593Smuzhiyun 	if (likely(fill_level < efx->txq_stop_thresh)) {
141*4882a593Smuzhiyun 		smp_mb();
142*4882a593Smuzhiyun 		if (likely(!efx->loopback_selftest))
143*4882a593Smuzhiyun 			netif_tx_start_queue(txq1->core_txq);
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
ef4_enqueue_skb_copy(struct ef4_tx_queue * tx_queue,struct sk_buff * skb)147*4882a593Smuzhiyun static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
148*4882a593Smuzhiyun 				struct sk_buff *skb)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	unsigned int min_len = tx_queue->tx_min_size;
151*4882a593Smuzhiyun 	unsigned int copy_len = skb->len;
152*4882a593Smuzhiyun 	struct ef4_tx_buffer *buffer;
153*4882a593Smuzhiyun 	u8 *copy_buffer;
154*4882a593Smuzhiyun 	int rc;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
161*4882a593Smuzhiyun 	if (unlikely(!copy_buffer))
162*4882a593Smuzhiyun 		return -ENOMEM;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
165*4882a593Smuzhiyun 	EF4_WARN_ON_PARANOID(rc);
166*4882a593Smuzhiyun 	if (unlikely(copy_len < min_len)) {
167*4882a593Smuzhiyun 		memset(copy_buffer + copy_len, 0, min_len - copy_len);
168*4882a593Smuzhiyun 		buffer->len = min_len;
169*4882a593Smuzhiyun 	} else {
170*4882a593Smuzhiyun 		buffer->len = copy_len;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	buffer->skb = skb;
174*4882a593Smuzhiyun 	buffer->flags = EF4_TX_BUF_SKB;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	++tx_queue->insert_count;
177*4882a593Smuzhiyun 	return rc;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
ef4_tx_map_chunk(struct ef4_tx_queue * tx_queue,dma_addr_t dma_addr,size_t len)180*4882a593Smuzhiyun static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
181*4882a593Smuzhiyun 					      dma_addr_t dma_addr,
182*4882a593Smuzhiyun 					      size_t len)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	const struct ef4_nic_type *nic_type = tx_queue->efx->type;
185*4882a593Smuzhiyun 	struct ef4_tx_buffer *buffer;
186*4882a593Smuzhiyun 	unsigned int dma_len;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* Map the fragment taking account of NIC-dependent DMA limits. */
189*4882a593Smuzhiyun 	do {
190*4882a593Smuzhiyun 		buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
191*4882a593Smuzhiyun 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		buffer->len = dma_len;
194*4882a593Smuzhiyun 		buffer->dma_addr = dma_addr;
195*4882a593Smuzhiyun 		buffer->flags = EF4_TX_BUF_CONT;
196*4882a593Smuzhiyun 		len -= dma_len;
197*4882a593Smuzhiyun 		dma_addr += dma_len;
198*4882a593Smuzhiyun 		++tx_queue->insert_count;
199*4882a593Smuzhiyun 	} while (len);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return buffer;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* Map all data from an SKB for DMA and create descriptors on the queue.
205*4882a593Smuzhiyun  */
ef4_tx_map_data(struct ef4_tx_queue * tx_queue,struct sk_buff * skb)206*4882a593Smuzhiyun static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
209*4882a593Smuzhiyun 	struct device *dma_dev = &efx->pci_dev->dev;
210*4882a593Smuzhiyun 	unsigned int frag_index, nr_frags;
211*4882a593Smuzhiyun 	dma_addr_t dma_addr, unmap_addr;
212*4882a593Smuzhiyun 	unsigned short dma_flags;
213*4882a593Smuzhiyun 	size_t len, unmap_len;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	nr_frags = skb_shinfo(skb)->nr_frags;
216*4882a593Smuzhiyun 	frag_index = 0;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Map header data. */
219*4882a593Smuzhiyun 	len = skb_headlen(skb);
220*4882a593Smuzhiyun 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
221*4882a593Smuzhiyun 	dma_flags = EF4_TX_BUF_MAP_SINGLE;
222*4882a593Smuzhiyun 	unmap_len = len;
223*4882a593Smuzhiyun 	unmap_addr = dma_addr;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
226*4882a593Smuzhiyun 		return -EIO;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* Add descriptors for each fragment. */
229*4882a593Smuzhiyun 	do {
230*4882a593Smuzhiyun 		struct ef4_tx_buffer *buffer;
231*4882a593Smuzhiyun 		skb_frag_t *fragment;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		/* The final descriptor for a fragment is responsible for
236*4882a593Smuzhiyun 		 * unmapping the whole fragment.
237*4882a593Smuzhiyun 		 */
238*4882a593Smuzhiyun 		buffer->flags = EF4_TX_BUF_CONT | dma_flags;
239*4882a593Smuzhiyun 		buffer->unmap_len = unmap_len;
240*4882a593Smuzhiyun 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		if (frag_index >= nr_frags) {
243*4882a593Smuzhiyun 			/* Store SKB details with the final buffer for
244*4882a593Smuzhiyun 			 * the completion.
245*4882a593Smuzhiyun 			 */
246*4882a593Smuzhiyun 			buffer->skb = skb;
247*4882a593Smuzhiyun 			buffer->flags = EF4_TX_BUF_SKB | dma_flags;
248*4882a593Smuzhiyun 			return 0;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		/* Move on to the next fragment. */
252*4882a593Smuzhiyun 		fragment = &skb_shinfo(skb)->frags[frag_index++];
253*4882a593Smuzhiyun 		len = skb_frag_size(fragment);
254*4882a593Smuzhiyun 		dma_addr = skb_frag_dma_map(dma_dev, fragment,
255*4882a593Smuzhiyun 				0, len, DMA_TO_DEVICE);
256*4882a593Smuzhiyun 		dma_flags = 0;
257*4882a593Smuzhiyun 		unmap_len = len;
258*4882a593Smuzhiyun 		unmap_addr = dma_addr;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
261*4882a593Smuzhiyun 			return -EIO;
262*4882a593Smuzhiyun 	} while (1);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /* Remove buffers put into a tx_queue.  None of the buffers must have
266*4882a593Smuzhiyun  * an skb attached.
267*4882a593Smuzhiyun  */
ef4_enqueue_unwind(struct ef4_tx_queue * tx_queue)268*4882a593Smuzhiyun static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct ef4_tx_buffer *buffer;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* Work backwards until we hit the original insert pointer value */
273*4882a593Smuzhiyun 	while (tx_queue->insert_count != tx_queue->write_count) {
274*4882a593Smuzhiyun 		--tx_queue->insert_count;
275*4882a593Smuzhiyun 		buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
276*4882a593Smuzhiyun 		ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun  * Add a socket buffer to a TX queue
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * This maps all fragments of a socket buffer for DMA and adds them to
284*4882a593Smuzhiyun  * the TX queue.  The queue's insert pointer will be incremented by
285*4882a593Smuzhiyun  * the number of fragments in the socket buffer.
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * If any DMA mapping fails, any mapped fragments will be unmapped,
288*4882a593Smuzhiyun  * the queue's insert pointer will be restored to its original value.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * This function is split out from ef4_hard_start_xmit to allow the
291*4882a593Smuzhiyun  * loopback test to direct packets via specific TX queues.
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * Returns NETDEV_TX_OK.
294*4882a593Smuzhiyun  * You must hold netif_tx_lock() to call this function.
295*4882a593Smuzhiyun  */
ef4_enqueue_skb(struct ef4_tx_queue * tx_queue,struct sk_buff * skb)296*4882a593Smuzhiyun netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	bool data_mapped = false;
299*4882a593Smuzhiyun 	unsigned int skb_len;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	skb_len = skb->len;
302*4882a593Smuzhiyun 	EF4_WARN_ON_PARANOID(skb_is_gso(skb));
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (skb_len < tx_queue->tx_min_size ||
305*4882a593Smuzhiyun 			(skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
306*4882a593Smuzhiyun 		/* Pad short packets or coalesce short fragmented packets. */
307*4882a593Smuzhiyun 		if (ef4_enqueue_skb_copy(tx_queue, skb))
308*4882a593Smuzhiyun 			goto err;
309*4882a593Smuzhiyun 		tx_queue->cb_packets++;
310*4882a593Smuzhiyun 		data_mapped = true;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Map for DMA and create descriptors if we haven't done so already. */
314*4882a593Smuzhiyun 	if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
315*4882a593Smuzhiyun 		goto err;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* Update BQL */
318*4882a593Smuzhiyun 	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* Pass off to hardware */
321*4882a593Smuzhiyun 	if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
322*4882a593Smuzhiyun 		struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		/* There could be packets left on the partner queue if those
325*4882a593Smuzhiyun 		 * SKBs had skb->xmit_more set. If we do not push those they
326*4882a593Smuzhiyun 		 * could be left for a long time and cause a netdev watchdog.
327*4882a593Smuzhiyun 		 */
328*4882a593Smuzhiyun 		if (txq2->xmit_more_available)
329*4882a593Smuzhiyun 			ef4_nic_push_buffers(txq2);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		ef4_nic_push_buffers(tx_queue);
332*4882a593Smuzhiyun 	} else {
333*4882a593Smuzhiyun 		tx_queue->xmit_more_available = netdev_xmit_more();
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	tx_queue->tx_packets++;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	ef4_tx_maybe_stop_queue(tx_queue);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return NETDEV_TX_OK;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun err:
344*4882a593Smuzhiyun 	ef4_enqueue_unwind(tx_queue);
345*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
346*4882a593Smuzhiyun 	return NETDEV_TX_OK;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun /* Remove packets from the TX queue
350*4882a593Smuzhiyun  *
351*4882a593Smuzhiyun  * This removes packets from the TX queue, up to and including the
352*4882a593Smuzhiyun  * specified index.
353*4882a593Smuzhiyun  */
ef4_dequeue_buffers(struct ef4_tx_queue * tx_queue,unsigned int index,unsigned int * pkts_compl,unsigned int * bytes_compl)354*4882a593Smuzhiyun static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
355*4882a593Smuzhiyun 				unsigned int index,
356*4882a593Smuzhiyun 				unsigned int *pkts_compl,
357*4882a593Smuzhiyun 				unsigned int *bytes_compl)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
360*4882a593Smuzhiyun 	unsigned int stop_index, read_ptr;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	stop_index = (index + 1) & tx_queue->ptr_mask;
363*4882a593Smuzhiyun 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	while (read_ptr != stop_index) {
366*4882a593Smuzhiyun 		struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
369*4882a593Smuzhiyun 		    unlikely(buffer->len == 0)) {
370*4882a593Smuzhiyun 			netif_err(efx, tx_err, efx->net_dev,
371*4882a593Smuzhiyun 				  "TX queue %d spurious TX completion id %x\n",
372*4882a593Smuzhiyun 				  tx_queue->queue, read_ptr);
373*4882a593Smuzhiyun 			ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
374*4882a593Smuzhiyun 			return;
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		++tx_queue->read_count;
380*4882a593Smuzhiyun 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /* Initiate a packet transmission.  We use one channel per CPU
385*4882a593Smuzhiyun  * (sharing when we have more CPUs than channels).  On Falcon, the TX
386*4882a593Smuzhiyun  * completion events will be directed back to the CPU that transmitted
387*4882a593Smuzhiyun  * the packet, which should be cache-efficient.
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * Context: non-blocking.
390*4882a593Smuzhiyun  * Note that returning anything other than NETDEV_TX_OK will cause the
391*4882a593Smuzhiyun  * OS to free the skb.
392*4882a593Smuzhiyun  */
ef4_hard_start_xmit(struct sk_buff * skb,struct net_device * net_dev)393*4882a593Smuzhiyun netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
394*4882a593Smuzhiyun 				struct net_device *net_dev)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct ef4_nic *efx = netdev_priv(net_dev);
397*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
398*4882a593Smuzhiyun 	unsigned index, type;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	index = skb_get_queue_mapping(skb);
403*4882a593Smuzhiyun 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
404*4882a593Smuzhiyun 	if (index >= efx->n_tx_channels) {
405*4882a593Smuzhiyun 		index -= efx->n_tx_channels;
406*4882a593Smuzhiyun 		type |= EF4_TXQ_TYPE_HIGHPRI;
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 	tx_queue = ef4_get_tx_queue(efx, index, type);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return ef4_enqueue_skb(tx_queue, skb);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
ef4_init_tx_queue_core_txq(struct ef4_tx_queue * tx_queue)413*4882a593Smuzhiyun void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* Must be inverse of queue lookup in ef4_hard_start_xmit() */
418*4882a593Smuzhiyun 	tx_queue->core_txq =
419*4882a593Smuzhiyun 		netdev_get_tx_queue(efx->net_dev,
420*4882a593Smuzhiyun 				    tx_queue->queue / EF4_TXQ_TYPES +
421*4882a593Smuzhiyun 				    ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
422*4882a593Smuzhiyun 				     efx->n_tx_channels : 0));
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
ef4_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)425*4882a593Smuzhiyun int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
426*4882a593Smuzhiyun 		 void *type_data)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	struct ef4_nic *efx = netdev_priv(net_dev);
429*4882a593Smuzhiyun 	struct tc_mqprio_qopt *mqprio = type_data;
430*4882a593Smuzhiyun 	struct ef4_channel *channel;
431*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
432*4882a593Smuzhiyun 	unsigned tc, num_tc;
433*4882a593Smuzhiyun 	int rc;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (type != TC_SETUP_QDISC_MQPRIO)
436*4882a593Smuzhiyun 		return -EOPNOTSUPP;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	num_tc = mqprio->num_tc;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
441*4882a593Smuzhiyun 		return -EINVAL;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (num_tc == net_dev->num_tc)
446*4882a593Smuzhiyun 		return 0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	for (tc = 0; tc < num_tc; tc++) {
449*4882a593Smuzhiyun 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
450*4882a593Smuzhiyun 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (num_tc > net_dev->num_tc) {
454*4882a593Smuzhiyun 		/* Initialise high-priority queues as necessary */
455*4882a593Smuzhiyun 		ef4_for_each_channel(channel, efx) {
456*4882a593Smuzhiyun 			ef4_for_each_possible_channel_tx_queue(tx_queue,
457*4882a593Smuzhiyun 							       channel) {
458*4882a593Smuzhiyun 				if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
459*4882a593Smuzhiyun 					continue;
460*4882a593Smuzhiyun 				if (!tx_queue->buffer) {
461*4882a593Smuzhiyun 					rc = ef4_probe_tx_queue(tx_queue);
462*4882a593Smuzhiyun 					if (rc)
463*4882a593Smuzhiyun 						return rc;
464*4882a593Smuzhiyun 				}
465*4882a593Smuzhiyun 				if (!tx_queue->initialised)
466*4882a593Smuzhiyun 					ef4_init_tx_queue(tx_queue);
467*4882a593Smuzhiyun 				ef4_init_tx_queue_core_txq(tx_queue);
468*4882a593Smuzhiyun 			}
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 	} else {
471*4882a593Smuzhiyun 		/* Reduce number of classes before number of queues */
472*4882a593Smuzhiyun 		net_dev->num_tc = num_tc;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	rc = netif_set_real_num_tx_queues(net_dev,
476*4882a593Smuzhiyun 					  max_t(int, num_tc, 1) *
477*4882a593Smuzhiyun 					  efx->n_tx_channels);
478*4882a593Smuzhiyun 	if (rc)
479*4882a593Smuzhiyun 		return rc;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* Do not destroy high-priority queues when they become
482*4882a593Smuzhiyun 	 * unused.  We would have to flush them first, and it is
483*4882a593Smuzhiyun 	 * fairly difficult to flush a subset of TX queues.  Leave
484*4882a593Smuzhiyun 	 * it to ef4_fini_channels().
485*4882a593Smuzhiyun 	 */
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	net_dev->num_tc = num_tc;
488*4882a593Smuzhiyun 	return 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
ef4_xmit_done(struct ef4_tx_queue * tx_queue,unsigned int index)491*4882a593Smuzhiyun void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	unsigned fill_level;
494*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
495*4882a593Smuzhiyun 	struct ef4_tx_queue *txq2;
496*4882a593Smuzhiyun 	unsigned int pkts_compl = 0, bytes_compl = 0;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
501*4882a593Smuzhiyun 	tx_queue->pkts_compl += pkts_compl;
502*4882a593Smuzhiyun 	tx_queue->bytes_compl += bytes_compl;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (pkts_compl > 1)
505*4882a593Smuzhiyun 		++tx_queue->merge_events;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/* See if we need to restart the netif queue.  This memory
508*4882a593Smuzhiyun 	 * barrier ensures that we write read_count (inside
509*4882a593Smuzhiyun 	 * ef4_dequeue_buffers()) before reading the queue status.
510*4882a593Smuzhiyun 	 */
511*4882a593Smuzhiyun 	smp_mb();
512*4882a593Smuzhiyun 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
513*4882a593Smuzhiyun 	    likely(efx->port_enabled) &&
514*4882a593Smuzhiyun 	    likely(netif_device_present(efx->net_dev))) {
515*4882a593Smuzhiyun 		txq2 = ef4_tx_queue_partner(tx_queue);
516*4882a593Smuzhiyun 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
517*4882a593Smuzhiyun 				 txq2->insert_count - txq2->read_count);
518*4882a593Smuzhiyun 		if (fill_level <= efx->txq_wake_thresh)
519*4882a593Smuzhiyun 			netif_tx_wake_queue(tx_queue->core_txq);
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Check whether the hardware queue is now empty */
523*4882a593Smuzhiyun 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
524*4882a593Smuzhiyun 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
525*4882a593Smuzhiyun 		if (tx_queue->read_count == tx_queue->old_write_count) {
526*4882a593Smuzhiyun 			smp_mb();
527*4882a593Smuzhiyun 			tx_queue->empty_read_count =
528*4882a593Smuzhiyun 				tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
529*4882a593Smuzhiyun 		}
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
ef4_tx_cb_page_count(struct ef4_tx_queue * tx_queue)533*4882a593Smuzhiyun static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
ef4_probe_tx_queue(struct ef4_tx_queue * tx_queue)538*4882a593Smuzhiyun int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
541*4882a593Smuzhiyun 	unsigned int entries;
542*4882a593Smuzhiyun 	int rc;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* Create the smallest power-of-two aligned ring */
545*4882a593Smuzhiyun 	entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
546*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
547*4882a593Smuzhiyun 	tx_queue->ptr_mask = entries - 1;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
550*4882a593Smuzhiyun 		  "creating TX queue %d size %#x mask %#x\n",
551*4882a593Smuzhiyun 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* Allocate software ring */
554*4882a593Smuzhiyun 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
555*4882a593Smuzhiyun 				   GFP_KERNEL);
556*4882a593Smuzhiyun 	if (!tx_queue->buffer)
557*4882a593Smuzhiyun 		return -ENOMEM;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
560*4882a593Smuzhiyun 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
561*4882a593Smuzhiyun 	if (!tx_queue->cb_page) {
562*4882a593Smuzhiyun 		rc = -ENOMEM;
563*4882a593Smuzhiyun 		goto fail1;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* Allocate hardware ring */
567*4882a593Smuzhiyun 	rc = ef4_nic_probe_tx(tx_queue);
568*4882a593Smuzhiyun 	if (rc)
569*4882a593Smuzhiyun 		goto fail2;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun fail2:
574*4882a593Smuzhiyun 	kfree(tx_queue->cb_page);
575*4882a593Smuzhiyun 	tx_queue->cb_page = NULL;
576*4882a593Smuzhiyun fail1:
577*4882a593Smuzhiyun 	kfree(tx_queue->buffer);
578*4882a593Smuzhiyun 	tx_queue->buffer = NULL;
579*4882a593Smuzhiyun 	return rc;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
ef4_init_tx_queue(struct ef4_tx_queue * tx_queue)582*4882a593Smuzhiyun void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	netif_dbg(efx, drv, efx->net_dev,
587*4882a593Smuzhiyun 		  "initialising TX queue %d\n", tx_queue->queue);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	tx_queue->insert_count = 0;
590*4882a593Smuzhiyun 	tx_queue->write_count = 0;
591*4882a593Smuzhiyun 	tx_queue->old_write_count = 0;
592*4882a593Smuzhiyun 	tx_queue->read_count = 0;
593*4882a593Smuzhiyun 	tx_queue->old_read_count = 0;
594*4882a593Smuzhiyun 	tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
595*4882a593Smuzhiyun 	tx_queue->xmit_more_available = false;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* Some older hardware requires Tx writes larger than 32. */
598*4882a593Smuzhiyun 	tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/* Set up TX descriptor ring */
601*4882a593Smuzhiyun 	ef4_nic_init_tx(tx_queue);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	tx_queue->initialised = true;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
ef4_fini_tx_queue(struct ef4_tx_queue * tx_queue)606*4882a593Smuzhiyun void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	struct ef4_tx_buffer *buffer;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
611*4882a593Smuzhiyun 		  "shutting down TX queue %d\n", tx_queue->queue);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (!tx_queue->buffer)
614*4882a593Smuzhiyun 		return;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* Free any buffers left in the ring */
617*4882a593Smuzhiyun 	while (tx_queue->read_count != tx_queue->write_count) {
618*4882a593Smuzhiyun 		unsigned int pkts_compl = 0, bytes_compl = 0;
619*4882a593Smuzhiyun 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
620*4882a593Smuzhiyun 		ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 		++tx_queue->read_count;
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 	tx_queue->xmit_more_available = false;
625*4882a593Smuzhiyun 	netdev_tx_reset_queue(tx_queue->core_txq);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
ef4_remove_tx_queue(struct ef4_tx_queue * tx_queue)628*4882a593Smuzhiyun void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	int i;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (!tx_queue->buffer)
633*4882a593Smuzhiyun 		return;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
636*4882a593Smuzhiyun 		  "destroying TX queue %d\n", tx_queue->queue);
637*4882a593Smuzhiyun 	ef4_nic_remove_tx(tx_queue);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (tx_queue->cb_page) {
640*4882a593Smuzhiyun 		for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
641*4882a593Smuzhiyun 			ef4_nic_free_buffer(tx_queue->efx,
642*4882a593Smuzhiyun 					    &tx_queue->cb_page[i]);
643*4882a593Smuzhiyun 		kfree(tx_queue->cb_page);
644*4882a593Smuzhiyun 		tx_queue->cb_page = NULL;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	kfree(tx_queue->buffer);
648*4882a593Smuzhiyun 	tx_queue->buffer = NULL;
649*4882a593Smuzhiyun }
650