xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/sfc/tx_common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun  * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun  * Copyright 2018 Solarflare Communications Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify it
7*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 as published
8*4882a593Smuzhiyun  * by the Free Software Foundation, incorporated herein by reference.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "net_driver.h"
12*4882a593Smuzhiyun #include "efx.h"
13*4882a593Smuzhiyun #include "nic_common.h"
14*4882a593Smuzhiyun #include "tx_common.h"
15*4882a593Smuzhiyun 
efx_tx_cb_page_count(struct efx_tx_queue * tx_queue)16*4882a593Smuzhiyun static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
19*4882a593Smuzhiyun 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
efx_probe_tx_queue(struct efx_tx_queue * tx_queue)22*4882a593Smuzhiyun int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	struct efx_nic *efx = tx_queue->efx;
25*4882a593Smuzhiyun 	unsigned int entries;
26*4882a593Smuzhiyun 	int rc;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* Create the smallest power-of-two aligned ring */
29*4882a593Smuzhiyun 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
30*4882a593Smuzhiyun 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
31*4882a593Smuzhiyun 	tx_queue->ptr_mask = entries - 1;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
34*4882a593Smuzhiyun 		  "creating TX queue %d size %#x mask %#x\n",
35*4882a593Smuzhiyun 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* Allocate software ring */
38*4882a593Smuzhiyun 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
39*4882a593Smuzhiyun 				   GFP_KERNEL);
40*4882a593Smuzhiyun 	if (!tx_queue->buffer)
41*4882a593Smuzhiyun 		return -ENOMEM;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
44*4882a593Smuzhiyun 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
45*4882a593Smuzhiyun 	if (!tx_queue->cb_page) {
46*4882a593Smuzhiyun 		rc = -ENOMEM;
47*4882a593Smuzhiyun 		goto fail1;
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* Allocate hardware ring, determine TXQ type */
51*4882a593Smuzhiyun 	rc = efx_nic_probe_tx(tx_queue);
52*4882a593Smuzhiyun 	if (rc)
53*4882a593Smuzhiyun 		goto fail2;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
56*4882a593Smuzhiyun 	return 0;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun fail2:
59*4882a593Smuzhiyun 	kfree(tx_queue->cb_page);
60*4882a593Smuzhiyun 	tx_queue->cb_page = NULL;
61*4882a593Smuzhiyun fail1:
62*4882a593Smuzhiyun 	kfree(tx_queue->buffer);
63*4882a593Smuzhiyun 	tx_queue->buffer = NULL;
64*4882a593Smuzhiyun 	return rc;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
efx_init_tx_queue(struct efx_tx_queue * tx_queue)67*4882a593Smuzhiyun void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct efx_nic *efx = tx_queue->efx;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	netif_dbg(efx, drv, efx->net_dev,
72*4882a593Smuzhiyun 		  "initialising TX queue %d\n", tx_queue->queue);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	tx_queue->insert_count = 0;
75*4882a593Smuzhiyun 	tx_queue->notify_count = 0;
76*4882a593Smuzhiyun 	tx_queue->write_count = 0;
77*4882a593Smuzhiyun 	tx_queue->packet_write_count = 0;
78*4882a593Smuzhiyun 	tx_queue->old_write_count = 0;
79*4882a593Smuzhiyun 	tx_queue->read_count = 0;
80*4882a593Smuzhiyun 	tx_queue->old_read_count = 0;
81*4882a593Smuzhiyun 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
82*4882a593Smuzhiyun 	tx_queue->xmit_pending = false;
83*4882a593Smuzhiyun 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
84*4882a593Smuzhiyun 				  tx_queue->channel == efx_ptp_channel(efx));
85*4882a593Smuzhiyun 	tx_queue->completed_timestamp_major = 0;
86*4882a593Smuzhiyun 	tx_queue->completed_timestamp_minor = 0;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
89*4882a593Smuzhiyun 	tx_queue->tso_version = 0;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* Set up TX descriptor ring */
92*4882a593Smuzhiyun 	efx_nic_init_tx(tx_queue);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	tx_queue->initialised = true;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
efx_fini_tx_queue(struct efx_tx_queue * tx_queue)97*4882a593Smuzhiyun void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct efx_tx_buffer *buffer;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
102*4882a593Smuzhiyun 		  "shutting down TX queue %d\n", tx_queue->queue);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (!tx_queue->buffer)
105*4882a593Smuzhiyun 		return;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* Free any buffers left in the ring */
108*4882a593Smuzhiyun 	while (tx_queue->read_count != tx_queue->write_count) {
109*4882a593Smuzhiyun 		unsigned int pkts_compl = 0, bytes_compl = 0;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
112*4882a593Smuzhiyun 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 		++tx_queue->read_count;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	tx_queue->xmit_pending = false;
117*4882a593Smuzhiyun 	netdev_tx_reset_queue(tx_queue->core_txq);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
efx_remove_tx_queue(struct efx_tx_queue * tx_queue)120*4882a593Smuzhiyun void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	int i;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (!tx_queue->buffer)
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
128*4882a593Smuzhiyun 		  "destroying TX queue %d\n", tx_queue->queue);
129*4882a593Smuzhiyun 	efx_nic_remove_tx(tx_queue);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (tx_queue->cb_page) {
132*4882a593Smuzhiyun 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
133*4882a593Smuzhiyun 			efx_nic_free_buffer(tx_queue->efx,
134*4882a593Smuzhiyun 					    &tx_queue->cb_page[i]);
135*4882a593Smuzhiyun 		kfree(tx_queue->cb_page);
136*4882a593Smuzhiyun 		tx_queue->cb_page = NULL;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	kfree(tx_queue->buffer);
140*4882a593Smuzhiyun 	tx_queue->buffer = NULL;
141*4882a593Smuzhiyun 	tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
efx_dequeue_buffer(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer,unsigned int * pkts_compl,unsigned int * bytes_compl)144*4882a593Smuzhiyun void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
145*4882a593Smuzhiyun 			struct efx_tx_buffer *buffer,
146*4882a593Smuzhiyun 			unsigned int *pkts_compl,
147*4882a593Smuzhiyun 			unsigned int *bytes_compl)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	if (buffer->unmap_len) {
150*4882a593Smuzhiyun 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
151*4882a593Smuzhiyun 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
154*4882a593Smuzhiyun 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
155*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
156*4882a593Smuzhiyun 		else
157*4882a593Smuzhiyun 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
158*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
159*4882a593Smuzhiyun 		buffer->unmap_len = 0;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (buffer->flags & EFX_TX_BUF_SKB) {
163*4882a593Smuzhiyun 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
166*4882a593Smuzhiyun 		(*pkts_compl)++;
167*4882a593Smuzhiyun 		(*bytes_compl) += skb->len;
168*4882a593Smuzhiyun 		if (tx_queue->timestamping &&
169*4882a593Smuzhiyun 		    (tx_queue->completed_timestamp_major ||
170*4882a593Smuzhiyun 		     tx_queue->completed_timestamp_minor)) {
171*4882a593Smuzhiyun 			struct skb_shared_hwtstamps hwtstamp;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 			hwtstamp.hwtstamp =
174*4882a593Smuzhiyun 				efx_ptp_nic_to_kernel_time(tx_queue);
175*4882a593Smuzhiyun 			skb_tstamp_tx(skb, &hwtstamp);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 			tx_queue->completed_timestamp_major = 0;
178*4882a593Smuzhiyun 			tx_queue->completed_timestamp_minor = 0;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
181*4882a593Smuzhiyun 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
182*4882a593Smuzhiyun 			   "TX queue %d transmission id %x complete\n",
183*4882a593Smuzhiyun 			   tx_queue->queue, tx_queue->read_count);
184*4882a593Smuzhiyun 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
185*4882a593Smuzhiyun 		xdp_return_frame_rx_napi(buffer->xdpf);
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	buffer->len = 0;
189*4882a593Smuzhiyun 	buffer->flags = 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /* Remove packets from the TX queue
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * This removes packets from the TX queue, up to and including the
195*4882a593Smuzhiyun  * specified index.
196*4882a593Smuzhiyun  */
efx_dequeue_buffers(struct efx_tx_queue * tx_queue,unsigned int index,unsigned int * pkts_compl,unsigned int * bytes_compl)197*4882a593Smuzhiyun static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
198*4882a593Smuzhiyun 				unsigned int index,
199*4882a593Smuzhiyun 				unsigned int *pkts_compl,
200*4882a593Smuzhiyun 				unsigned int *bytes_compl)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct efx_nic *efx = tx_queue->efx;
203*4882a593Smuzhiyun 	unsigned int stop_index, read_ptr;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	stop_index = (index + 1) & tx_queue->ptr_mask;
206*4882a593Smuzhiyun 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	while (read_ptr != stop_index) {
209*4882a593Smuzhiyun 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		if (!efx_tx_buffer_in_use(buffer)) {
212*4882a593Smuzhiyun 			netif_err(efx, tx_err, efx->net_dev,
213*4882a593Smuzhiyun 				  "TX queue %d spurious TX completion id %d\n",
214*4882a593Smuzhiyun 				  tx_queue->queue, read_ptr);
215*4882a593Smuzhiyun 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
216*4882a593Smuzhiyun 			return;
217*4882a593Smuzhiyun 		}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		++tx_queue->read_count;
222*4882a593Smuzhiyun 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
efx_xmit_done_check_empty(struct efx_tx_queue * tx_queue)226*4882a593Smuzhiyun void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
229*4882a593Smuzhiyun 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
230*4882a593Smuzhiyun 		if (tx_queue->read_count == tx_queue->old_write_count) {
231*4882a593Smuzhiyun 			/* Ensure that read_count is flushed. */
232*4882a593Smuzhiyun 			smp_mb();
233*4882a593Smuzhiyun 			tx_queue->empty_read_count =
234*4882a593Smuzhiyun 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
efx_xmit_done(struct efx_tx_queue * tx_queue,unsigned int index)239*4882a593Smuzhiyun void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
242*4882a593Smuzhiyun 	struct efx_nic *efx = tx_queue->efx;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
247*4882a593Smuzhiyun 	tx_queue->pkts_compl += pkts_compl;
248*4882a593Smuzhiyun 	tx_queue->bytes_compl += bytes_compl;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (pkts_compl > 1)
251*4882a593Smuzhiyun 		++tx_queue->merge_events;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* See if we need to restart the netif queue.  This memory
254*4882a593Smuzhiyun 	 * barrier ensures that we write read_count (inside
255*4882a593Smuzhiyun 	 * efx_dequeue_buffers()) before reading the queue status.
256*4882a593Smuzhiyun 	 */
257*4882a593Smuzhiyun 	smp_mb();
258*4882a593Smuzhiyun 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
259*4882a593Smuzhiyun 	    likely(efx->port_enabled) &&
260*4882a593Smuzhiyun 	    likely(netif_device_present(efx->net_dev))) {
261*4882a593Smuzhiyun 		fill_level = efx_channel_tx_fill_level(tx_queue->channel);
262*4882a593Smuzhiyun 		if (fill_level <= efx->txq_wake_thresh)
263*4882a593Smuzhiyun 			netif_tx_wake_queue(tx_queue->core_txq);
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	efx_xmit_done_check_empty(tx_queue);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /* Remove buffers put into a tx_queue for the current packet.
270*4882a593Smuzhiyun  * None of the buffers must have an skb attached.
271*4882a593Smuzhiyun  */
efx_enqueue_unwind(struct efx_tx_queue * tx_queue,unsigned int insert_count)272*4882a593Smuzhiyun void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
273*4882a593Smuzhiyun 			unsigned int insert_count)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct efx_tx_buffer *buffer;
276*4882a593Smuzhiyun 	unsigned int bytes_compl = 0;
277*4882a593Smuzhiyun 	unsigned int pkts_compl = 0;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* Work backwards until we hit the original insert pointer value */
280*4882a593Smuzhiyun 	while (tx_queue->insert_count != insert_count) {
281*4882a593Smuzhiyun 		--tx_queue->insert_count;
282*4882a593Smuzhiyun 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
283*4882a593Smuzhiyun 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
efx_tx_map_chunk(struct efx_tx_queue * tx_queue,dma_addr_t dma_addr,size_t len)287*4882a593Smuzhiyun struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
288*4882a593Smuzhiyun 				       dma_addr_t dma_addr, size_t len)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
291*4882a593Smuzhiyun 	struct efx_tx_buffer *buffer;
292*4882a593Smuzhiyun 	unsigned int dma_len;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* Map the fragment taking account of NIC-dependent DMA limits. */
295*4882a593Smuzhiyun 	do {
296*4882a593Smuzhiyun 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		if (nic_type->tx_limit_len)
299*4882a593Smuzhiyun 			dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
300*4882a593Smuzhiyun 		else
301*4882a593Smuzhiyun 			dma_len = len;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		buffer->len = dma_len;
304*4882a593Smuzhiyun 		buffer->dma_addr = dma_addr;
305*4882a593Smuzhiyun 		buffer->flags = EFX_TX_BUF_CONT;
306*4882a593Smuzhiyun 		len -= dma_len;
307*4882a593Smuzhiyun 		dma_addr += dma_len;
308*4882a593Smuzhiyun 		++tx_queue->insert_count;
309*4882a593Smuzhiyun 	} while (len);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return buffer;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
efx_tx_tso_header_length(struct sk_buff * skb)314*4882a593Smuzhiyun int efx_tx_tso_header_length(struct sk_buff *skb)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	size_t header_len;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (skb->encapsulation)
319*4882a593Smuzhiyun 		header_len = skb_inner_transport_header(skb) -
320*4882a593Smuzhiyun 				skb->data +
321*4882a593Smuzhiyun 				(inner_tcp_hdr(skb)->doff << 2u);
322*4882a593Smuzhiyun 	else
323*4882a593Smuzhiyun 		header_len = skb_transport_header(skb) - skb->data +
324*4882a593Smuzhiyun 				(tcp_hdr(skb)->doff << 2u);
325*4882a593Smuzhiyun 	return header_len;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /* Map all data from an SKB for DMA and create descriptors on the queue. */
efx_tx_map_data(struct efx_tx_queue * tx_queue,struct sk_buff * skb,unsigned int segment_count)329*4882a593Smuzhiyun int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
330*4882a593Smuzhiyun 		    unsigned int segment_count)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct efx_nic *efx = tx_queue->efx;
333*4882a593Smuzhiyun 	struct device *dma_dev = &efx->pci_dev->dev;
334*4882a593Smuzhiyun 	unsigned int frag_index, nr_frags;
335*4882a593Smuzhiyun 	dma_addr_t dma_addr, unmap_addr;
336*4882a593Smuzhiyun 	unsigned short dma_flags;
337*4882a593Smuzhiyun 	size_t len, unmap_len;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	nr_frags = skb_shinfo(skb)->nr_frags;
340*4882a593Smuzhiyun 	frag_index = 0;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* Map header data. */
343*4882a593Smuzhiyun 	len = skb_headlen(skb);
344*4882a593Smuzhiyun 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
345*4882a593Smuzhiyun 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
346*4882a593Smuzhiyun 	unmap_len = len;
347*4882a593Smuzhiyun 	unmap_addr = dma_addr;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
350*4882a593Smuzhiyun 		return -EIO;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (segment_count) {
353*4882a593Smuzhiyun 		/* For TSO we need to put the header in to a separate
354*4882a593Smuzhiyun 		 * descriptor. Map this separately if necessary.
355*4882a593Smuzhiyun 		 */
356*4882a593Smuzhiyun 		size_t header_len = efx_tx_tso_header_length(skb);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (header_len != len) {
359*4882a593Smuzhiyun 			tx_queue->tso_long_headers++;
360*4882a593Smuzhiyun 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
361*4882a593Smuzhiyun 			len -= header_len;
362*4882a593Smuzhiyun 			dma_addr += header_len;
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Add descriptors for each fragment. */
367*4882a593Smuzhiyun 	do {
368*4882a593Smuzhiyun 		struct efx_tx_buffer *buffer;
369*4882a593Smuzhiyun 		skb_frag_t *fragment;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		/* The final descriptor for a fragment is responsible for
374*4882a593Smuzhiyun 		 * unmapping the whole fragment.
375*4882a593Smuzhiyun 		 */
376*4882a593Smuzhiyun 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
377*4882a593Smuzhiyun 		buffer->unmap_len = unmap_len;
378*4882a593Smuzhiyun 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (frag_index >= nr_frags) {
381*4882a593Smuzhiyun 			/* Store SKB details with the final buffer for
382*4882a593Smuzhiyun 			 * the completion.
383*4882a593Smuzhiyun 			 */
384*4882a593Smuzhiyun 			buffer->skb = skb;
385*4882a593Smuzhiyun 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
386*4882a593Smuzhiyun 			return 0;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		/* Move on to the next fragment. */
390*4882a593Smuzhiyun 		fragment = &skb_shinfo(skb)->frags[frag_index++];
391*4882a593Smuzhiyun 		len = skb_frag_size(fragment);
392*4882a593Smuzhiyun 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
393*4882a593Smuzhiyun 					    DMA_TO_DEVICE);
394*4882a593Smuzhiyun 		dma_flags = 0;
395*4882a593Smuzhiyun 		unmap_len = len;
396*4882a593Smuzhiyun 		unmap_addr = dma_addr;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
399*4882a593Smuzhiyun 			return -EIO;
400*4882a593Smuzhiyun 	} while (1);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
efx_tx_max_skb_descs(struct efx_nic * efx)403*4882a593Smuzhiyun unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	/* Header and payload descriptor for each output segment, plus
406*4882a593Smuzhiyun 	 * one for every input fragment boundary within a segment
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* Possibly one more per segment for option descriptors */
411*4882a593Smuzhiyun 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
412*4882a593Smuzhiyun 		max_descs += EFX_TSO_MAX_SEGS;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* Possibly more for PCIe page boundaries within input fragments */
415*4882a593Smuzhiyun 	if (PAGE_SIZE > EFX_PAGE_SIZE)
416*4882a593Smuzhiyun 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
417*4882a593Smuzhiyun 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return max_descs;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun  * Fallback to software TSO.
424*4882a593Smuzhiyun  *
425*4882a593Smuzhiyun  * This is used if we are unable to send a GSO packet through hardware TSO.
426*4882a593Smuzhiyun  * This should only ever happen due to per-queue restrictions - unsupported
427*4882a593Smuzhiyun  * packets should first be filtered by the feature flags.
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  * Returns 0 on success, error code otherwise.
430*4882a593Smuzhiyun  */
efx_tx_tso_fallback(struct efx_tx_queue * tx_queue,struct sk_buff * skb)431*4882a593Smuzhiyun int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct sk_buff *segments, *next;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	segments = skb_gso_segment(skb, 0);
436*4882a593Smuzhiyun 	if (IS_ERR(segments))
437*4882a593Smuzhiyun 		return PTR_ERR(segments);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	dev_consume_skb_any(skb);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	skb_list_walk_safe(segments, skb, next) {
442*4882a593Smuzhiyun 		skb_mark_not_on_list(skb);
443*4882a593Smuzhiyun 		efx_enqueue_skb(tx_queue, skb);
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	return 0;
447*4882a593Smuzhiyun }
448