1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun * Copyright 2005-2006 Fen Systems Ltd.
5*4882a593Smuzhiyun * Copyright 2005-2015 Solarflare Communications Inc.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/tcp.h>
10*4882a593Smuzhiyun #include <linux/ip.h>
11*4882a593Smuzhiyun #include <linux/in.h>
12*4882a593Smuzhiyun #include <linux/ipv6.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <net/ipv6.h>
15*4882a593Smuzhiyun #include <linux/if_ether.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/moduleparam.h>
18*4882a593Smuzhiyun #include <linux/cache.h>
19*4882a593Smuzhiyun #include "net_driver.h"
20*4882a593Smuzhiyun #include "efx.h"
21*4882a593Smuzhiyun #include "io.h"
22*4882a593Smuzhiyun #include "nic.h"
23*4882a593Smuzhiyun #include "tx.h"
24*4882a593Smuzhiyun #include "workarounds.h"
25*4882a593Smuzhiyun #include "ef10_regs.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Efx legacy TCP segmentation acceleration.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Utilises firmware support to go faster than GSO (but not as fast as TSOv2).
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Requires TX checksum offload support.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun * struct tso_state - TSO state for an SKB
38*4882a593Smuzhiyun * @out_len: Remaining length in current segment
39*4882a593Smuzhiyun * @seqnum: Current sequence number
40*4882a593Smuzhiyun * @ipv4_id: Current IPv4 ID, host endian
41*4882a593Smuzhiyun * @packet_space: Remaining space in current packet
42*4882a593Smuzhiyun * @dma_addr: DMA address of current position
43*4882a593Smuzhiyun * @in_len: Remaining length in current SKB fragment
44*4882a593Smuzhiyun * @unmap_len: Length of SKB fragment
45*4882a593Smuzhiyun * @unmap_addr: DMA address of SKB fragment
46*4882a593Smuzhiyun * @protocol: Network protocol (after any VLAN header)
47*4882a593Smuzhiyun * @ip_off: Offset of IP header
48*4882a593Smuzhiyun * @tcp_off: Offset of TCP header
49*4882a593Smuzhiyun * @header_len: Number of bytes of header
50*4882a593Smuzhiyun * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
51*4882a593Smuzhiyun * @header_dma_addr: Header DMA address
52*4882a593Smuzhiyun * @header_unmap_len: Header DMA mapped length
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * The state used during segmentation. It is put into this data structure
55*4882a593Smuzhiyun * just to make it easy to pass into inline functions.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun struct tso_state {
58*4882a593Smuzhiyun /* Output position */
59*4882a593Smuzhiyun unsigned int out_len;
60*4882a593Smuzhiyun unsigned int seqnum;
61*4882a593Smuzhiyun u16 ipv4_id;
62*4882a593Smuzhiyun unsigned int packet_space;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Input position */
65*4882a593Smuzhiyun dma_addr_t dma_addr;
66*4882a593Smuzhiyun unsigned int in_len;
67*4882a593Smuzhiyun unsigned int unmap_len;
68*4882a593Smuzhiyun dma_addr_t unmap_addr;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun __be16 protocol;
71*4882a593Smuzhiyun unsigned int ip_off;
72*4882a593Smuzhiyun unsigned int tcp_off;
73*4882a593Smuzhiyun unsigned int header_len;
74*4882a593Smuzhiyun unsigned int ip_base_len;
75*4882a593Smuzhiyun dma_addr_t header_dma_addr;
76*4882a593Smuzhiyun unsigned int header_unmap_len;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
prefetch_ptr(struct efx_tx_queue * tx_queue)79*4882a593Smuzhiyun static inline void prefetch_ptr(struct efx_tx_queue *tx_queue)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue);
82*4882a593Smuzhiyun char *ptr;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun ptr = (char *) (tx_queue->buffer + insert_ptr);
85*4882a593Smuzhiyun prefetch(ptr);
86*4882a593Smuzhiyun prefetch(ptr + 0x80);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr);
89*4882a593Smuzhiyun prefetch(ptr);
90*4882a593Smuzhiyun prefetch(ptr + 0x80);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * efx_tx_queue_insert - push descriptors onto the TX queue
95*4882a593Smuzhiyun * @tx_queue: Efx TX queue
96*4882a593Smuzhiyun * @dma_addr: DMA address of fragment
97*4882a593Smuzhiyun * @len: Length of fragment
98*4882a593Smuzhiyun * @final_buffer: The final buffer inserted into the queue
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Push descriptors onto the TX queue.
101*4882a593Smuzhiyun */
efx_tx_queue_insert(struct efx_tx_queue * tx_queue,dma_addr_t dma_addr,unsigned int len,struct efx_tx_buffer ** final_buffer)102*4882a593Smuzhiyun static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
103*4882a593Smuzhiyun dma_addr_t dma_addr, unsigned int len,
104*4882a593Smuzhiyun struct efx_tx_buffer **final_buffer)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct efx_tx_buffer *buffer;
107*4882a593Smuzhiyun unsigned int dma_len;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(len <= 0);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun while (1) {
112*4882a593Smuzhiyun buffer = efx_tx_queue_get_insert_buffer(tx_queue);
113*4882a593Smuzhiyun ++tx_queue->insert_count;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count -
116*4882a593Smuzhiyun tx_queue->read_count >=
117*4882a593Smuzhiyun tx_queue->efx->txq_entries);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun buffer->dma_addr = dma_addr;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun dma_len = tx_queue->efx->type->tx_limit_len(tx_queue,
122*4882a593Smuzhiyun dma_addr, len);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* If there's space for everything this is our last buffer. */
125*4882a593Smuzhiyun if (dma_len >= len)
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun buffer->len = dma_len;
129*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_CONT;
130*4882a593Smuzhiyun dma_addr += dma_len;
131*4882a593Smuzhiyun len -= dma_len;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(!len);
135*4882a593Smuzhiyun buffer->len = len;
136*4882a593Smuzhiyun *final_buffer = buffer;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * Verify that our various assumptions about sk_buffs and the conditions
141*4882a593Smuzhiyun * under which TSO will be attempted hold true. Return the protocol number.
142*4882a593Smuzhiyun */
efx_tso_check_protocol(struct sk_buff * skb)143*4882a593Smuzhiyun static __be16 efx_tso_check_protocol(struct sk_buff *skb)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun __be16 protocol = skb->protocol;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
148*4882a593Smuzhiyun protocol);
149*4882a593Smuzhiyun if (protocol == htons(ETH_P_8021Q)) {
150*4882a593Smuzhiyun struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun protocol = veh->h_vlan_encapsulated_proto;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (protocol == htons(ETH_P_IP)) {
156*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
157*4882a593Smuzhiyun } else {
158*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(protocol != htons(ETH_P_IPV6));
159*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) +
162*4882a593Smuzhiyun (tcp_hdr(skb)->doff << 2u)) >
163*4882a593Smuzhiyun skb_headlen(skb));
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return protocol;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Parse the SKB header and initialise state. */
tso_start(struct tso_state * st,struct efx_nic * efx,struct efx_tx_queue * tx_queue,const struct sk_buff * skb)169*4882a593Smuzhiyun static int tso_start(struct tso_state *st, struct efx_nic *efx,
170*4882a593Smuzhiyun struct efx_tx_queue *tx_queue,
171*4882a593Smuzhiyun const struct sk_buff *skb)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct device *dma_dev = &efx->pci_dev->dev;
174*4882a593Smuzhiyun unsigned int header_len, in_len;
175*4882a593Smuzhiyun dma_addr_t dma_addr;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun st->ip_off = skb_network_header(skb) - skb->data;
178*4882a593Smuzhiyun st->tcp_off = skb_transport_header(skb) - skb->data;
179*4882a593Smuzhiyun header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
180*4882a593Smuzhiyun in_len = skb_headlen(skb) - header_len;
181*4882a593Smuzhiyun st->header_len = header_len;
182*4882a593Smuzhiyun st->in_len = in_len;
183*4882a593Smuzhiyun if (st->protocol == htons(ETH_P_IP)) {
184*4882a593Smuzhiyun st->ip_base_len = st->header_len - st->ip_off;
185*4882a593Smuzhiyun st->ipv4_id = ntohs(ip_hdr(skb)->id);
186*4882a593Smuzhiyun } else {
187*4882a593Smuzhiyun st->ip_base_len = st->header_len - st->tcp_off;
188*4882a593Smuzhiyun st->ipv4_id = 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun st->seqnum = ntohl(tcp_hdr(skb)->seq);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg);
193*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn);
194*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun st->out_len = skb->len - header_len;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun dma_addr = dma_map_single(dma_dev, skb->data,
199*4882a593Smuzhiyun skb_headlen(skb), DMA_TO_DEVICE);
200*4882a593Smuzhiyun st->header_dma_addr = dma_addr;
201*4882a593Smuzhiyun st->header_unmap_len = skb_headlen(skb);
202*4882a593Smuzhiyun st->dma_addr = dma_addr + header_len;
203*4882a593Smuzhiyun st->unmap_len = 0;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
tso_get_fragment(struct tso_state * st,struct efx_nic * efx,skb_frag_t * frag)208*4882a593Smuzhiyun static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
209*4882a593Smuzhiyun skb_frag_t *frag)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
212*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
213*4882a593Smuzhiyun if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
214*4882a593Smuzhiyun st->unmap_len = skb_frag_size(frag);
215*4882a593Smuzhiyun st->in_len = skb_frag_size(frag);
216*4882a593Smuzhiyun st->dma_addr = st->unmap_addr;
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun return -ENOMEM;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * tso_fill_packet_with_fragment - form descriptors for the current fragment
225*4882a593Smuzhiyun * @tx_queue: Efx TX queue
226*4882a593Smuzhiyun * @skb: Socket buffer
227*4882a593Smuzhiyun * @st: TSO state
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * Form descriptors for the current fragment, until we reach the end
230*4882a593Smuzhiyun * of fragment or end-of-packet.
231*4882a593Smuzhiyun */
tso_fill_packet_with_fragment(struct efx_tx_queue * tx_queue,const struct sk_buff * skb,struct tso_state * st)232*4882a593Smuzhiyun static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
233*4882a593Smuzhiyun const struct sk_buff *skb,
234*4882a593Smuzhiyun struct tso_state *st)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct efx_tx_buffer *buffer;
237*4882a593Smuzhiyun int n;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (st->in_len == 0)
240*4882a593Smuzhiyun return;
241*4882a593Smuzhiyun if (st->packet_space == 0)
242*4882a593Smuzhiyun return;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(st->in_len <= 0);
245*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(st->packet_space <= 0);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun n = min(st->in_len, st->packet_space);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun st->packet_space -= n;
250*4882a593Smuzhiyun st->out_len -= n;
251*4882a593Smuzhiyun st->in_len -= n;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (st->out_len == 0) {
256*4882a593Smuzhiyun /* Transfer ownership of the skb */
257*4882a593Smuzhiyun buffer->skb = skb;
258*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_SKB;
259*4882a593Smuzhiyun } else if (st->packet_space != 0) {
260*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_CONT;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (st->in_len == 0) {
264*4882a593Smuzhiyun /* Transfer ownership of the DMA mapping */
265*4882a593Smuzhiyun buffer->unmap_len = st->unmap_len;
266*4882a593Smuzhiyun buffer->dma_offset = buffer->unmap_len - buffer->len;
267*4882a593Smuzhiyun st->unmap_len = 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun st->dma_addr += n;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun #define TCP_FLAGS_OFFSET 13
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * tso_start_new_packet - generate a new header and prepare for the new packet
278*4882a593Smuzhiyun * @tx_queue: Efx TX queue
279*4882a593Smuzhiyun * @skb: Socket buffer
280*4882a593Smuzhiyun * @st: TSO state
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * Generate a new header and prepare for the new packet. Return 0 on
283*4882a593Smuzhiyun * success, or -%ENOMEM if failed to alloc header, or other negative error.
284*4882a593Smuzhiyun */
tso_start_new_packet(struct efx_tx_queue * tx_queue,const struct sk_buff * skb,struct tso_state * st)285*4882a593Smuzhiyun static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
286*4882a593Smuzhiyun const struct sk_buff *skb,
287*4882a593Smuzhiyun struct tso_state *st)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct efx_tx_buffer *buffer =
290*4882a593Smuzhiyun efx_tx_queue_get_insert_buffer(tx_queue);
291*4882a593Smuzhiyun bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
292*4882a593Smuzhiyun u8 tcp_flags_mask, tcp_flags;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (!is_last) {
295*4882a593Smuzhiyun st->packet_space = skb_shinfo(skb)->gso_size;
296*4882a593Smuzhiyun tcp_flags_mask = 0x09; /* mask out FIN and PSH */
297*4882a593Smuzhiyun } else {
298*4882a593Smuzhiyun st->packet_space = st->out_len;
299*4882a593Smuzhiyun tcp_flags_mask = 0x00;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (WARN_ON(!st->header_unmap_len))
303*4882a593Smuzhiyun return -EINVAL;
304*4882a593Smuzhiyun /* Send the original headers with a TSO option descriptor
305*4882a593Smuzhiyun * in front
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_OPTION;
310*4882a593Smuzhiyun buffer->len = 0;
311*4882a593Smuzhiyun buffer->unmap_len = 0;
312*4882a593Smuzhiyun EFX_POPULATE_QWORD_5(buffer->option,
313*4882a593Smuzhiyun ESF_DZ_TX_DESC_IS_OPT, 1,
314*4882a593Smuzhiyun ESF_DZ_TX_OPTION_TYPE,
315*4882a593Smuzhiyun ESE_DZ_TX_OPTION_DESC_TSO,
316*4882a593Smuzhiyun ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
317*4882a593Smuzhiyun ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
318*4882a593Smuzhiyun ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
319*4882a593Smuzhiyun ++tx_queue->insert_count;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* We mapped the headers in tso_start(). Unmap them
322*4882a593Smuzhiyun * when the last segment is completed.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun buffer = efx_tx_queue_get_insert_buffer(tx_queue);
325*4882a593Smuzhiyun buffer->dma_addr = st->header_dma_addr;
326*4882a593Smuzhiyun buffer->len = st->header_len;
327*4882a593Smuzhiyun if (is_last) {
328*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
329*4882a593Smuzhiyun buffer->unmap_len = st->header_unmap_len;
330*4882a593Smuzhiyun buffer->dma_offset = 0;
331*4882a593Smuzhiyun /* Ensure we only unmap them once in case of a
332*4882a593Smuzhiyun * later DMA mapping error and rollback
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun st->header_unmap_len = 0;
335*4882a593Smuzhiyun } else {
336*4882a593Smuzhiyun buffer->flags = EFX_TX_BUF_CONT;
337*4882a593Smuzhiyun buffer->unmap_len = 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun ++tx_queue->insert_count;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun st->seqnum += skb_shinfo(skb)->gso_size;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Linux leaves suitable gaps in the IP ID space for us to fill. */
344*4882a593Smuzhiyun ++st->ipv4_id;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
351*4882a593Smuzhiyun * @tx_queue: Efx TX queue
352*4882a593Smuzhiyun * @skb: Socket buffer
353*4882a593Smuzhiyun * @data_mapped: Did we map the data? Always set to true
354*4882a593Smuzhiyun * by this on success.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Context: You must hold netif_tx_lock() to call this function.
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
359*4882a593Smuzhiyun * @skb was not enqueued. @skb is consumed unless return value is
360*4882a593Smuzhiyun * %EINVAL.
361*4882a593Smuzhiyun */
efx_enqueue_skb_tso(struct efx_tx_queue * tx_queue,struct sk_buff * skb,bool * data_mapped)362*4882a593Smuzhiyun int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
363*4882a593Smuzhiyun struct sk_buff *skb,
364*4882a593Smuzhiyun bool *data_mapped)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct efx_nic *efx = tx_queue->efx;
367*4882a593Smuzhiyun int frag_i, rc;
368*4882a593Smuzhiyun struct tso_state state;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (tx_queue->tso_version != 1)
371*4882a593Smuzhiyun return -EINVAL;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun prefetch(skb->data);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Find the packet protocol and sanity-check it */
376*4882a593Smuzhiyun state.protocol = efx_tso_check_protocol(skb);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun rc = tso_start(&state, efx, tx_queue, skb);
381*4882a593Smuzhiyun if (rc)
382*4882a593Smuzhiyun goto fail;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (likely(state.in_len == 0)) {
385*4882a593Smuzhiyun /* Grab the first payload fragment. */
386*4882a593Smuzhiyun EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1);
387*4882a593Smuzhiyun frag_i = 0;
388*4882a593Smuzhiyun rc = tso_get_fragment(&state, efx,
389*4882a593Smuzhiyun skb_shinfo(skb)->frags + frag_i);
390*4882a593Smuzhiyun if (rc)
391*4882a593Smuzhiyun goto fail;
392*4882a593Smuzhiyun } else {
393*4882a593Smuzhiyun /* Payload starts in the header area. */
394*4882a593Smuzhiyun frag_i = -1;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun rc = tso_start_new_packet(tx_queue, skb, &state);
398*4882a593Smuzhiyun if (rc)
399*4882a593Smuzhiyun goto fail;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun prefetch_ptr(tx_queue);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun while (1) {
404*4882a593Smuzhiyun tso_fill_packet_with_fragment(tx_queue, skb, &state);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Move onto the next fragment? */
407*4882a593Smuzhiyun if (state.in_len == 0) {
408*4882a593Smuzhiyun if (++frag_i >= skb_shinfo(skb)->nr_frags)
409*4882a593Smuzhiyun /* End of payload reached. */
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun rc = tso_get_fragment(&state, efx,
412*4882a593Smuzhiyun skb_shinfo(skb)->frags + frag_i);
413*4882a593Smuzhiyun if (rc)
414*4882a593Smuzhiyun goto fail;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* Start at new packet? */
418*4882a593Smuzhiyun if (state.packet_space == 0) {
419*4882a593Smuzhiyun rc = tso_start_new_packet(tx_queue, skb, &state);
420*4882a593Smuzhiyun if (rc)
421*4882a593Smuzhiyun goto fail;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun *data_mapped = true;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun fail:
430*4882a593Smuzhiyun if (rc == -ENOMEM)
431*4882a593Smuzhiyun netif_err(efx, tx_err, efx->net_dev,
432*4882a593Smuzhiyun "Out of memory for TSO headers, or DMA mapping error\n");
433*4882a593Smuzhiyun else
434*4882a593Smuzhiyun netif_err(efx, tx_err, efx->net_dev, "TSO failed, rc = %d\n", rc);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* Free the DMA mapping we were in the process of writing out */
437*4882a593Smuzhiyun if (state.unmap_len) {
438*4882a593Smuzhiyun dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
439*4882a593Smuzhiyun state.unmap_len, DMA_TO_DEVICE);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Free the header DMA mapping */
443*4882a593Smuzhiyun if (state.header_unmap_len)
444*4882a593Smuzhiyun dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
445*4882a593Smuzhiyun state.header_unmap_len, DMA_TO_DEVICE);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun return rc;
448*4882a593Smuzhiyun }
449