1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2*4882a593Smuzhiyun /* Google virtual Ethernet (gve) driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2015-2019 Google, Inc.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "gve.h"
8*4882a593Smuzhiyun #include "gve_adminq.h"
9*4882a593Smuzhiyun #include <linux/ip.h>
10*4882a593Smuzhiyun #include <linux/tcp.h>
11*4882a593Smuzhiyun #include <linux/vmalloc.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)14*4882a593Smuzhiyun static inline void gve_tx_put_doorbell(struct gve_priv *priv,
15*4882a593Smuzhiyun struct gve_queue_resources *q_resources,
16*4882a593Smuzhiyun u32 val)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* gvnic can only transmit from a Registered Segment.
22*4882a593Smuzhiyun * We copy skb payloads into the registered segment before writing Tx
23*4882a593Smuzhiyun * descriptors and ringing the Tx doorbell.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
26*4882a593Smuzhiyun * free allocations in the order they were allocated.
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)29*4882a593Smuzhiyun static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
32*4882a593Smuzhiyun PAGE_KERNEL);
33*4882a593Smuzhiyun if (unlikely(!fifo->base)) {
34*4882a593Smuzhiyun netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
35*4882a593Smuzhiyun fifo->qpl->id);
36*4882a593Smuzhiyun return -ENOMEM;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
40*4882a593Smuzhiyun atomic_set(&fifo->available, fifo->size);
41*4882a593Smuzhiyun fifo->head = 0;
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)45*4882a593Smuzhiyun static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun WARN(atomic_read(&fifo->available) != fifo->size,
48*4882a593Smuzhiyun "Releasing non-empty fifo");
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun vunmap(fifo->base);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)53*4882a593Smuzhiyun static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
54*4882a593Smuzhiyun size_t bytes)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)59*4882a593Smuzhiyun static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun return (atomic_read(&fifo->available) <= bytes) ? false : true;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
65*4882a593Smuzhiyun * @fifo: FIFO to allocate from
66*4882a593Smuzhiyun * @bytes: Allocation size
67*4882a593Smuzhiyun * @iov: Scatter-gather elements to fill with allocation fragment base/len
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Returns number of valid elements in iov[] or negative on error.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Allocations from a given FIFO must be externally synchronized but concurrent
72*4882a593Smuzhiyun * allocation and frees are allowed.
73*4882a593Smuzhiyun */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])74*4882a593Smuzhiyun static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
75*4882a593Smuzhiyun struct gve_tx_iovec iov[2])
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun size_t overflow, padding;
78*4882a593Smuzhiyun u32 aligned_head;
79*4882a593Smuzhiyun int nfrags = 0;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (!bytes)
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* This check happens before we know how much padding is needed to
85*4882a593Smuzhiyun * align to a cacheline boundary for the payload, but that is fine,
86*4882a593Smuzhiyun * because the FIFO head always start aligned, and the FIFO's boundaries
87*4882a593Smuzhiyun * are aligned, so if there is space for the data, there is space for
88*4882a593Smuzhiyun * the padding to the next alignment.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
91*4882a593Smuzhiyun "Reached %s when there's not enough space in the fifo", __func__);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun nfrags++;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun iov[0].iov_offset = fifo->head;
96*4882a593Smuzhiyun iov[0].iov_len = bytes;
97*4882a593Smuzhiyun fifo->head += bytes;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (fifo->head > fifo->size) {
100*4882a593Smuzhiyun /* If the allocation did not fit in the tail fragment of the
101*4882a593Smuzhiyun * FIFO, also use the head fragment.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun nfrags++;
104*4882a593Smuzhiyun overflow = fifo->head - fifo->size;
105*4882a593Smuzhiyun iov[0].iov_len -= overflow;
106*4882a593Smuzhiyun iov[1].iov_offset = 0; /* Start of fifo*/
107*4882a593Smuzhiyun iov[1].iov_len = overflow;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun fifo->head = overflow;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Re-align to a cacheline boundary */
113*4882a593Smuzhiyun aligned_head = L1_CACHE_ALIGN(fifo->head);
114*4882a593Smuzhiyun padding = aligned_head - fifo->head;
115*4882a593Smuzhiyun iov[nfrags - 1].iov_padding = padding;
116*4882a593Smuzhiyun atomic_sub(bytes + padding, &fifo->available);
117*4882a593Smuzhiyun fifo->head = aligned_head;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (fifo->head == fifo->size)
120*4882a593Smuzhiyun fifo->head = 0;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return nfrags;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* gve_tx_free_fifo - Return space to Tx FIFO
126*4882a593Smuzhiyun * @fifo: FIFO to return fragments to
127*4882a593Smuzhiyun * @bytes: Bytes to free
128*4882a593Smuzhiyun */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)129*4882a593Smuzhiyun static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun atomic_add(bytes, &fifo->available);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
gve_tx_remove_from_block(struct gve_priv * priv,int queue_idx)134*4882a593Smuzhiyun static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct gve_notify_block *block =
137*4882a593Smuzhiyun &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun block->tx = NULL;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
143*4882a593Smuzhiyun u32 to_do, bool try_to_wake);
144*4882a593Smuzhiyun
gve_tx_free_ring(struct gve_priv * priv,int idx)145*4882a593Smuzhiyun static void gve_tx_free_ring(struct gve_priv *priv, int idx)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct gve_tx_ring *tx = &priv->tx[idx];
148*4882a593Smuzhiyun struct device *hdev = &priv->pdev->dev;
149*4882a593Smuzhiyun size_t bytes;
150*4882a593Smuzhiyun u32 slots;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun gve_tx_remove_from_block(priv, idx);
153*4882a593Smuzhiyun slots = tx->mask + 1;
154*4882a593Smuzhiyun gve_clean_tx_done(priv, tx, tx->req, false);
155*4882a593Smuzhiyun netdev_tx_reset_queue(tx->netdev_txq);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun dma_free_coherent(hdev, sizeof(*tx->q_resources),
158*4882a593Smuzhiyun tx->q_resources, tx->q_resources_bus);
159*4882a593Smuzhiyun tx->q_resources = NULL;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun gve_tx_fifo_release(priv, &tx->tx_fifo);
162*4882a593Smuzhiyun gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
163*4882a593Smuzhiyun tx->tx_fifo.qpl = NULL;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun bytes = sizeof(*tx->desc) * slots;
166*4882a593Smuzhiyun dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
167*4882a593Smuzhiyun tx->desc = NULL;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun vfree(tx->info);
170*4882a593Smuzhiyun tx->info = NULL;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
gve_tx_add_to_block(struct gve_priv * priv,int queue_idx)175*4882a593Smuzhiyun static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
178*4882a593Smuzhiyun struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
179*4882a593Smuzhiyun struct gve_tx_ring *tx = &priv->tx[queue_idx];
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun block->tx = tx;
182*4882a593Smuzhiyun tx->ntfy_id = ntfy_idx;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
gve_tx_alloc_ring(struct gve_priv * priv,int idx)185*4882a593Smuzhiyun static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct gve_tx_ring *tx = &priv->tx[idx];
188*4882a593Smuzhiyun struct device *hdev = &priv->pdev->dev;
189*4882a593Smuzhiyun u32 slots = priv->tx_desc_cnt;
190*4882a593Smuzhiyun size_t bytes;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* Make sure everything is zeroed to start */
193*4882a593Smuzhiyun memset(tx, 0, sizeof(*tx));
194*4882a593Smuzhiyun tx->q_num = idx;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun tx->mask = slots - 1;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* alloc metadata */
199*4882a593Smuzhiyun tx->info = vzalloc(sizeof(*tx->info) * slots);
200*4882a593Smuzhiyun if (!tx->info)
201*4882a593Smuzhiyun return -ENOMEM;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* alloc tx queue */
204*4882a593Smuzhiyun bytes = sizeof(*tx->desc) * slots;
205*4882a593Smuzhiyun tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
206*4882a593Smuzhiyun if (!tx->desc)
207*4882a593Smuzhiyun goto abort_with_info;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
210*4882a593Smuzhiyun if (!tx->tx_fifo.qpl)
211*4882a593Smuzhiyun goto abort_with_desc;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* map Tx FIFO */
214*4882a593Smuzhiyun if (gve_tx_fifo_init(priv, &tx->tx_fifo))
215*4882a593Smuzhiyun goto abort_with_qpl;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun tx->q_resources =
218*4882a593Smuzhiyun dma_alloc_coherent(hdev,
219*4882a593Smuzhiyun sizeof(*tx->q_resources),
220*4882a593Smuzhiyun &tx->q_resources_bus,
221*4882a593Smuzhiyun GFP_KERNEL);
222*4882a593Smuzhiyun if (!tx->q_resources)
223*4882a593Smuzhiyun goto abort_with_fifo;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
226*4882a593Smuzhiyun (unsigned long)tx->bus);
227*4882a593Smuzhiyun tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
228*4882a593Smuzhiyun gve_tx_add_to_block(priv, idx);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun abort_with_fifo:
233*4882a593Smuzhiyun gve_tx_fifo_release(priv, &tx->tx_fifo);
234*4882a593Smuzhiyun abort_with_qpl:
235*4882a593Smuzhiyun gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
236*4882a593Smuzhiyun abort_with_desc:
237*4882a593Smuzhiyun dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
238*4882a593Smuzhiyun tx->desc = NULL;
239*4882a593Smuzhiyun abort_with_info:
240*4882a593Smuzhiyun vfree(tx->info);
241*4882a593Smuzhiyun tx->info = NULL;
242*4882a593Smuzhiyun return -ENOMEM;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
gve_tx_alloc_rings(struct gve_priv * priv)245*4882a593Smuzhiyun int gve_tx_alloc_rings(struct gve_priv *priv)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun int err = 0;
248*4882a593Smuzhiyun int i;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun for (i = 0; i < priv->tx_cfg.num_queues; i++) {
251*4882a593Smuzhiyun err = gve_tx_alloc_ring(priv, i);
252*4882a593Smuzhiyun if (err) {
253*4882a593Smuzhiyun netif_err(priv, drv, priv->dev,
254*4882a593Smuzhiyun "Failed to alloc tx ring=%d: err=%d\n",
255*4882a593Smuzhiyun i, err);
256*4882a593Smuzhiyun break;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun /* Unallocate if there was an error */
260*4882a593Smuzhiyun if (err) {
261*4882a593Smuzhiyun int j;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (j = 0; j < i; j++)
264*4882a593Smuzhiyun gve_tx_free_ring(priv, j);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun return err;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
gve_tx_free_rings(struct gve_priv * priv)269*4882a593Smuzhiyun void gve_tx_free_rings(struct gve_priv *priv)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun int i;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun for (i = 0; i < priv->tx_cfg.num_queues; i++)
274*4882a593Smuzhiyun gve_tx_free_ring(priv, i);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* gve_tx_avail - Calculates the number of slots available in the ring
278*4882a593Smuzhiyun * @tx: tx ring to check
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Returns the number of slots available
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * The capacity of the queue is mask + 1. We don't need to reserve an entry.
283*4882a593Smuzhiyun **/
gve_tx_avail(struct gve_tx_ring * tx)284*4882a593Smuzhiyun static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return tx->mask + 1 - (tx->req - tx->done);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)289*4882a593Smuzhiyun static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
290*4882a593Smuzhiyun struct sk_buff *skb)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun int pad_bytes, align_hdr_pad;
293*4882a593Smuzhiyun int bytes;
294*4882a593Smuzhiyun int hlen;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
297*4882a593Smuzhiyun tcp_hdrlen(skb) : skb_headlen(skb);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
300*4882a593Smuzhiyun hlen);
301*4882a593Smuzhiyun /* We need to take into account the header alignment padding. */
302*4882a593Smuzhiyun align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
303*4882a593Smuzhiyun bytes = align_hdr_pad + pad_bytes + skb->len;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun return bytes;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* The most descriptors we could need are 3 - 1 for the headers, 1 for
309*4882a593Smuzhiyun * the beginning of the payload at the end of the FIFO, and 1 if the
310*4882a593Smuzhiyun * payload wraps to the beginning of the FIFO.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun #define MAX_TX_DESC_NEEDED 3
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Check if sufficient resources (descriptor ring space, FIFO space) are
315*4882a593Smuzhiyun * available to transmit the given number of bytes.
316*4882a593Smuzhiyun */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)317*4882a593Smuzhiyun static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED &&
320*4882a593Smuzhiyun gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required));
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_tx_ring * tx,struct sk_buff * skb)324*4882a593Smuzhiyun static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun int bytes_required;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun bytes_required = gve_skb_fifo_bytes_required(tx, skb);
329*4882a593Smuzhiyun if (likely(gve_can_tx(tx, bytes_required)))
330*4882a593Smuzhiyun return 0;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* No space, so stop the queue */
333*4882a593Smuzhiyun tx->stop_queue++;
334*4882a593Smuzhiyun netif_tx_stop_queue(tx->netdev_txq);
335*4882a593Smuzhiyun smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Now check for resources again, in case gve_clean_tx_done() freed
338*4882a593Smuzhiyun * resources after we checked and we stopped the queue after
339*4882a593Smuzhiyun * gve_clean_tx_done() checked.
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * gve_maybe_stop_tx() gve_clean_tx_done()
342*4882a593Smuzhiyun * nsegs/can_alloc test failed
343*4882a593Smuzhiyun * gve_tx_free_fifo()
344*4882a593Smuzhiyun * if (tx queue stopped)
345*4882a593Smuzhiyun * netif_tx_queue_wake()
346*4882a593Smuzhiyun * netif_tx_stop_queue()
347*4882a593Smuzhiyun * Need to check again for space here!
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (likely(!gve_can_tx(tx, bytes_required)))
350*4882a593Smuzhiyun return -EBUSY;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun netif_tx_start_queue(tx->netdev_txq);
353*4882a593Smuzhiyun tx->wake_queue++;
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,struct sk_buff * skb,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr)357*4882a593Smuzhiyun static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
358*4882a593Smuzhiyun struct sk_buff *skb, bool is_gso,
359*4882a593Smuzhiyun int l4_hdr_offset, u32 desc_cnt,
360*4882a593Smuzhiyun u16 hlen, u64 addr)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun /* l4_hdr_offset and csum_offset are in units of 16-bit words */
363*4882a593Smuzhiyun if (is_gso) {
364*4882a593Smuzhiyun pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
365*4882a593Smuzhiyun pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
366*4882a593Smuzhiyun pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
367*4882a593Smuzhiyun } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
368*4882a593Smuzhiyun pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
369*4882a593Smuzhiyun pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
370*4882a593Smuzhiyun pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
371*4882a593Smuzhiyun } else {
372*4882a593Smuzhiyun pkt_desc->pkt.type_flags = GVE_TXD_STD;
373*4882a593Smuzhiyun pkt_desc->pkt.l4_csum_offset = 0;
374*4882a593Smuzhiyun pkt_desc->pkt.l4_hdr_offset = 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun pkt_desc->pkt.desc_cnt = desc_cnt;
377*4882a593Smuzhiyun pkt_desc->pkt.len = cpu_to_be16(skb->len);
378*4882a593Smuzhiyun pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
379*4882a593Smuzhiyun pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,struct sk_buff * skb,bool is_gso,u16 len,u64 addr)382*4882a593Smuzhiyun static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
383*4882a593Smuzhiyun struct sk_buff *skb, bool is_gso,
384*4882a593Smuzhiyun u16 len, u64 addr)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun seg_desc->seg.type_flags = GVE_TXD_SEG;
387*4882a593Smuzhiyun if (is_gso) {
388*4882a593Smuzhiyun if (skb_is_gso_v6(skb))
389*4882a593Smuzhiyun seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
390*4882a593Smuzhiyun seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
391*4882a593Smuzhiyun seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun seg_desc->seg.seg_len = cpu_to_be16(len);
394*4882a593Smuzhiyun seg_desc->seg.seg_addr = cpu_to_be64(addr);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)397*4882a593Smuzhiyun static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
398*4882a593Smuzhiyun u64 iov_offset, u64 iov_len)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
401*4882a593Smuzhiyun u64 first_page = iov_offset / PAGE_SIZE;
402*4882a593Smuzhiyun dma_addr_t dma;
403*4882a593Smuzhiyun u64 page;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun for (page = first_page; page <= last_page; page++) {
406*4882a593Smuzhiyun dma = page_buses[page];
407*4882a593Smuzhiyun dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
gve_tx_add_skb(struct gve_tx_ring * tx,struct sk_buff * skb,struct device * dev)411*4882a593Smuzhiyun static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
412*4882a593Smuzhiyun struct device *dev)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
415*4882a593Smuzhiyun union gve_tx_desc *pkt_desc, *seg_desc;
416*4882a593Smuzhiyun struct gve_tx_buffer_state *info;
417*4882a593Smuzhiyun bool is_gso = skb_is_gso(skb);
418*4882a593Smuzhiyun u32 idx = tx->req & tx->mask;
419*4882a593Smuzhiyun int payload_iov = 2;
420*4882a593Smuzhiyun int copy_offset;
421*4882a593Smuzhiyun u32 next_idx;
422*4882a593Smuzhiyun int i;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun info = &tx->info[idx];
425*4882a593Smuzhiyun pkt_desc = &tx->desc[idx];
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun l4_hdr_offset = skb_checksum_start_offset(skb);
428*4882a593Smuzhiyun /* If the skb is gso, then we want the tcp header in the first segment
429*4882a593Smuzhiyun * otherwise we want the linear portion of the skb (which will contain
430*4882a593Smuzhiyun * the checksum because skb->csum_start and skb->csum_offset are given
431*4882a593Smuzhiyun * relative to skb->head) in the first segment.
432*4882a593Smuzhiyun */
433*4882a593Smuzhiyun hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
434*4882a593Smuzhiyun skb_headlen(skb);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun info->skb = skb;
437*4882a593Smuzhiyun /* We don't want to split the header, so if necessary, pad to the end
438*4882a593Smuzhiyun * of the fifo and then put the header at the beginning of the fifo.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
441*4882a593Smuzhiyun hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
442*4882a593Smuzhiyun &info->iov[0]);
443*4882a593Smuzhiyun WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
444*4882a593Smuzhiyun payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
445*4882a593Smuzhiyun &info->iov[payload_iov]);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
448*4882a593Smuzhiyun 1 + payload_nfrags, hlen,
449*4882a593Smuzhiyun info->iov[hdr_nfrags - 1].iov_offset);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun skb_copy_bits(skb, 0,
452*4882a593Smuzhiyun tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
453*4882a593Smuzhiyun hlen);
454*4882a593Smuzhiyun gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
455*4882a593Smuzhiyun info->iov[hdr_nfrags - 1].iov_offset,
456*4882a593Smuzhiyun info->iov[hdr_nfrags - 1].iov_len);
457*4882a593Smuzhiyun copy_offset = hlen;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
460*4882a593Smuzhiyun next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
461*4882a593Smuzhiyun seg_desc = &tx->desc[next_idx];
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
464*4882a593Smuzhiyun info->iov[i].iov_len,
465*4882a593Smuzhiyun info->iov[i].iov_offset);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun skb_copy_bits(skb, copy_offset,
468*4882a593Smuzhiyun tx->tx_fifo.base + info->iov[i].iov_offset,
469*4882a593Smuzhiyun info->iov[i].iov_len);
470*4882a593Smuzhiyun gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
471*4882a593Smuzhiyun info->iov[i].iov_offset,
472*4882a593Smuzhiyun info->iov[i].iov_len);
473*4882a593Smuzhiyun copy_offset += info->iov[i].iov_len;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun return 1 + payload_nfrags;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
gve_tx(struct sk_buff * skb,struct net_device * dev)479*4882a593Smuzhiyun netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct gve_priv *priv = netdev_priv(dev);
482*4882a593Smuzhiyun struct gve_tx_ring *tx;
483*4882a593Smuzhiyun int nsegs;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
486*4882a593Smuzhiyun "skb queue index out of range");
487*4882a593Smuzhiyun tx = &priv->tx[skb_get_queue_mapping(skb)];
488*4882a593Smuzhiyun if (unlikely(gve_maybe_stop_tx(tx, skb))) {
489*4882a593Smuzhiyun /* We need to ring the txq doorbell -- we have stopped the Tx
490*4882a593Smuzhiyun * queue for want of resources, but prior calls to gve_tx()
491*4882a593Smuzhiyun * may have added descriptors without ringing the doorbell.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
495*4882a593Smuzhiyun return NETDEV_TX_BUSY;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun netdev_tx_sent_queue(tx->netdev_txq, skb->len);
500*4882a593Smuzhiyun skb_tx_timestamp(skb);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* give packets to NIC */
503*4882a593Smuzhiyun tx->req += nsegs;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
506*4882a593Smuzhiyun return NETDEV_TX_OK;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
509*4882a593Smuzhiyun return NETDEV_TX_OK;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun #define GVE_TX_START_THRESH PAGE_SIZE
513*4882a593Smuzhiyun
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)514*4882a593Smuzhiyun static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
515*4882a593Smuzhiyun u32 to_do, bool try_to_wake)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct gve_tx_buffer_state *info;
518*4882a593Smuzhiyun u64 pkts = 0, bytes = 0;
519*4882a593Smuzhiyun size_t space_freed = 0;
520*4882a593Smuzhiyun struct sk_buff *skb;
521*4882a593Smuzhiyun int i, j;
522*4882a593Smuzhiyun u32 idx;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun for (j = 0; j < to_do; j++) {
525*4882a593Smuzhiyun idx = tx->done & tx->mask;
526*4882a593Smuzhiyun netif_info(priv, tx_done, priv->dev,
527*4882a593Smuzhiyun "[%d] %s: idx=%d (req=%u done=%u)\n",
528*4882a593Smuzhiyun tx->q_num, __func__, idx, tx->req, tx->done);
529*4882a593Smuzhiyun info = &tx->info[idx];
530*4882a593Smuzhiyun skb = info->skb;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* Mark as free */
533*4882a593Smuzhiyun if (skb) {
534*4882a593Smuzhiyun info->skb = NULL;
535*4882a593Smuzhiyun bytes += skb->len;
536*4882a593Smuzhiyun pkts++;
537*4882a593Smuzhiyun dev_consume_skb_any(skb);
538*4882a593Smuzhiyun /* FIFO free */
539*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
540*4882a593Smuzhiyun space_freed += info->iov[i].iov_len +
541*4882a593Smuzhiyun info->iov[i].iov_padding;
542*4882a593Smuzhiyun info->iov[i].iov_len = 0;
543*4882a593Smuzhiyun info->iov[i].iov_padding = 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun tx->done++;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun gve_tx_free_fifo(&tx->tx_fifo, space_freed);
550*4882a593Smuzhiyun u64_stats_update_begin(&tx->statss);
551*4882a593Smuzhiyun tx->bytes_done += bytes;
552*4882a593Smuzhiyun tx->pkt_done += pkts;
553*4882a593Smuzhiyun u64_stats_update_end(&tx->statss);
554*4882a593Smuzhiyun netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* start the queue if we've stopped it */
557*4882a593Smuzhiyun #ifndef CONFIG_BQL
558*4882a593Smuzhiyun /* Make sure that the doorbells are synced */
559*4882a593Smuzhiyun smp_mb();
560*4882a593Smuzhiyun #endif
561*4882a593Smuzhiyun if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
562*4882a593Smuzhiyun likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
563*4882a593Smuzhiyun tx->wake_queue++;
564*4882a593Smuzhiyun netif_tx_wake_queue(tx->netdev_txq);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return pkts;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)570*4882a593Smuzhiyun __be32 gve_tx_load_event_counter(struct gve_priv *priv,
571*4882a593Smuzhiyun struct gve_tx_ring *tx)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun return READ_ONCE(priv->counter_array[counter_index]);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
gve_tx_poll(struct gve_notify_block * block,int budget)578*4882a593Smuzhiyun bool gve_tx_poll(struct gve_notify_block *block, int budget)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct gve_priv *priv = block->priv;
581*4882a593Smuzhiyun struct gve_tx_ring *tx = block->tx;
582*4882a593Smuzhiyun bool repoll = false;
583*4882a593Smuzhiyun u32 nic_done;
584*4882a593Smuzhiyun u32 to_do;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* If budget is 0, do all the work */
587*4882a593Smuzhiyun if (budget == 0)
588*4882a593Smuzhiyun budget = INT_MAX;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Find out how much work there is to be done */
591*4882a593Smuzhiyun tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
592*4882a593Smuzhiyun nic_done = be32_to_cpu(tx->last_nic_done);
593*4882a593Smuzhiyun if (budget > 0) {
594*4882a593Smuzhiyun /* Do as much work as we have that the budget will
595*4882a593Smuzhiyun * allow
596*4882a593Smuzhiyun */
597*4882a593Smuzhiyun to_do = min_t(u32, (nic_done - tx->done), budget);
598*4882a593Smuzhiyun gve_clean_tx_done(priv, tx, to_do, true);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun /* If we still have work we want to repoll */
601*4882a593Smuzhiyun repoll |= (nic_done != tx->done);
602*4882a593Smuzhiyun return repoll;
603*4882a593Smuzhiyun }
604